2017-09-26 Thomas Koenig <tkoenig@gcc.gnu.org>
[official-gcc.git] / gcc / combine.c
blobe502fa147267d071adc0e47764ad8ded56ca0683
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras;
120 /* Number of instructions combined in this function. */
122 static int combine_successes;
124 /* Totals over entire compilation. */
126 static int total_attempts, total_merges, total_extras, total_successes;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn *i2mod;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs;
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
197 rtx last_set_value;
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick;
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
207 int last_set_label;
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies;
238 unsigned HOST_WIDE_INT nonzero_bits;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
255 static vec<reg_stat_type> reg_stat;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn *subst_insn;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
303 static rtx_insn *added_links_insn;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
328 static struct insn_link **uid_log_links;
330 static inline int
331 insn_uid_check (const_rtx insn)
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
335 return uid;
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack;
348 /* Allocate a link. */
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
353 struct insn_link *l
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
356 l->insn = insn;
357 l->regno = regno;
358 l->next = next;
359 return l;
362 /* Incremented for each basic block. */
364 static int label_tick;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
379 in a loop. */
381 static int nonzero_sign_valid;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
389 struct undo
391 struct undo *next;
392 enum undo_kind kind;
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
403 struct undobuf
405 struct undo *undos;
406 struct undo *frees;
407 rtx_insn *other_insn;
410 static struct undobuf undobuf;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences;
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
418 scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
421 scalar_int_mode,
422 unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
532 register. */
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
537 rtx_insn *ret;
538 unsigned int nregs;
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
544 return ret;
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
556 rtx x = *loc;
557 enum rtx_code code = GET_CODE (x);
558 rtx *result = NULL;
559 rtx *this_result;
560 int i;
561 const char *fmt;
563 switch (code)
565 case CONST:
566 case LABEL_REF:
567 case SYMBOL_REF:
568 CASE_CONST_ANY:
569 case CLOBBER:
570 return 0;
572 case SET:
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
586 break;
588 return find_single_use_1 (dest, &SET_SRC (x));
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
594 default:
595 break;
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
604 if (fmt[i] == 'e')
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
619 else if (fmt[i] == 'E')
621 int j;
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
641 return result;
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
659 static rtx *
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
667 if (dest == cc0_rtx)
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
680 if (!REG_P (dest))
681 return 0;
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
693 if (link)
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
702 return 0;
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
711 static void
712 do_SUBST (rtx *into, rtx newval)
714 struct undo *buf;
715 rtx oldval = *into;
717 if (oldval == newval)
718 return;
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
755 buf->next = undobuf.undos, undobuf.undos = buf;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
764 static void
765 do_SUBST_INT (int *into, int newval)
767 struct undo *buf;
768 int oldval = *into;
770 if (oldval == newval)
771 return;
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
783 buf->next = undobuf.undos, undobuf.undos = buf;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
793 static void
794 do_SUBST_MODE (rtx *into, machine_mode newval)
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
799 if (oldval == newval)
800 return;
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
812 buf->next = undobuf.undos, undobuf.undos = buf;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
819 static void
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
822 struct undo *buf;
823 struct insn_link * oldval = *into;
825 if (oldval == newval)
826 return;
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
838 buf->next = undobuf.undos, undobuf.undos = buf;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
851 static bool
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
863 if (i1)
865 i1_cost = INSN_COST (i1);
866 if (i0)
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
872 else
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
879 else
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
893 if (newi2pat)
895 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
896 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
897 ? new_i2_cost + new_i3_cost : 0;
899 else
901 new_cost = new_i3_cost;
902 new_i2_cost = 0;
905 if (undobuf.other_insn)
907 int old_other_cost, new_other_cost;
909 old_other_cost = INSN_COST (undobuf.other_insn);
910 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
911 if (old_other_cost > 0 && new_other_cost > 0)
913 old_cost += old_other_cost;
914 new_cost += new_other_cost;
916 else
917 old_cost = 0;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject = old_cost > 0 && new_cost > old_cost;
924 if (dump_file)
926 fprintf (dump_file, "%s combination of insns ",
927 reject ? "rejecting" : "allowing");
928 if (i0)
929 fprintf (dump_file, "%d, ", INSN_UID (i0));
930 if (i1 && INSN_UID (i1) != INSN_UID (i2))
931 fprintf (dump_file, "%d, ", INSN_UID (i1));
932 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
934 fprintf (dump_file, "original costs ");
935 if (i0)
936 fprintf (dump_file, "%d + ", i0_cost);
937 if (i1 && INSN_UID (i1) != INSN_UID (i2))
938 fprintf (dump_file, "%d + ", i1_cost);
939 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
941 if (newi2pat)
942 fprintf (dump_file, "replacement costs %d + %d = %d\n",
943 new_i2_cost, new_i3_cost, new_cost);
944 else
945 fprintf (dump_file, "replacement cost %d\n", new_cost);
948 if (reject)
949 return false;
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2) = new_i2_cost;
953 INSN_COST (i3) = new_i3_cost;
954 if (i1)
956 INSN_COST (i1) = 0;
957 if (i0)
958 INSN_COST (i0) = 0;
961 return true;
965 /* Delete any insns that copy a register to itself. */
967 static void
968 delete_noop_moves (void)
970 rtx_insn *insn, *next;
971 basic_block bb;
973 FOR_EACH_BB_FN (bb, cfun)
975 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
977 next = NEXT_INSN (insn);
978 if (INSN_P (insn) && noop_move_p (insn))
980 if (dump_file)
981 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
983 delete_insn_and_edges (insn);
990 /* Return false if we do not want to (or cannot) combine DEF. */
991 static bool
992 can_combine_def_p (df_ref def)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
996 return false;
998 unsigned int regno = DF_REF_REGNO (def);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno == FRAME_POINTER_REGNUM
1002 && (!reload_completed || frame_pointer_needed))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno == HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed || frame_pointer_needed))
1006 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1007 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1008 return false;
1010 return true;
1013 /* Return false if we do not want to (or cannot) combine USE. */
1014 static bool
1015 can_combine_use_p (df_ref use)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1019 return false;
1021 return true;
1024 /* Fill in log links field for all insns. */
1026 static void
1027 create_log_links (void)
1029 basic_block bb;
1030 rtx_insn **next_use;
1031 rtx_insn *insn;
1032 df_ref def, use;
1034 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb, cfun)
1047 FOR_BB_INSNS_REVERSE (bb, insn)
1049 if (!NONDEBUG_INSN_P (insn))
1050 continue;
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn));
1055 FOR_EACH_INSN_DEF (def, insn)
1057 unsigned int regno = DF_REF_REGNO (def);
1058 rtx_insn *use_insn;
1060 if (!next_use[regno])
1061 continue;
1063 if (!can_combine_def_p (def))
1064 continue;
1066 use_insn = next_use[regno];
1067 next_use[regno] = NULL;
1069 if (BLOCK_FOR_INSN (use_insn) != bb)
1070 continue;
1072 /* flow.c claimed:
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno < FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn)) >= 0)
1081 continue;
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link *links;
1085 FOR_EACH_LOG_LINK (links, use_insn)
1086 if (insn == links->insn && regno == links->regno)
1087 break;
1089 if (!links)
1090 LOG_LINKS (use_insn)
1091 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1094 FOR_EACH_INSN_USE (use, insn)
1095 if (can_combine_use_p (use))
1096 next_use[DF_REF_REGNO (use)] = insn;
1100 free (next_use);
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1108 pair. */
1110 static bool
1111 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1113 struct insn_link *links;
1114 FOR_EACH_LOG_LINK (links, b)
1115 if (links->insn == a)
1116 return true;
1117 if (HAVE_cc0 && sets_cc0_p (a))
1118 return true;
1119 return false;
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1127 static int
1128 combine_instructions (rtx_insn *f, unsigned int nregs)
1130 rtx_insn *insn, *next;
1131 rtx_insn *prev;
1132 struct insn_link *links, *nextlinks;
1133 rtx_insn *first;
1134 basic_block last_bb;
1136 int new_direct_jump_p = 0;
1138 for (first = f; first && !NONDEBUG_INSN_P (first); )
1139 first = NEXT_INSN (first);
1140 if (!first)
1141 return 0;
1143 combine_attempts = 0;
1144 combine_merges = 0;
1145 combine_extras = 0;
1146 combine_successes = 0;
1148 rtl_hooks = combine_rtl_hooks;
1150 reg_stat.safe_grow_cleared (nregs);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known = get_max_uid ();
1156 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1157 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1158 gcc_obstack_init (&insn_link_obstack);
1160 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid = 0;
1166 label_tick = label_tick_ebb_start = 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block, cfun)
1183 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1184 last_call_luid = 0;
1185 mem_last_set = -1;
1187 label_tick++;
1188 if (!single_pred_p (this_basic_block)
1189 || single_pred (this_basic_block) != last_bb)
1190 label_tick_ebb_start = label_tick;
1191 last_bb = this_basic_block;
1193 FOR_BB_INSNS (this_basic_block, insn)
1194 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1196 rtx links;
1198 subst_low_luid = DF_INSN_LUID (insn);
1199 subst_insn = insn;
1201 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1202 insn);
1203 record_dead_and_set_regs (insn);
1205 if (AUTO_INC_DEC)
1206 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1207 if (REG_NOTE_KIND (links) == REG_INC)
1208 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1209 insn);
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn))
1213 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1214 optimize_this_for_speed_p);
1215 if (dump_file)
1217 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1218 dump_insn_slim (dump_file, insn);
1223 nonzero_sign_valid = 1;
1225 /* Now scan all the insns in forward order. */
1226 label_tick = label_tick_ebb_start = 1;
1227 init_reg_last ();
1228 setup_incoming_promotions (first);
1229 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1230 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1232 FOR_EACH_BB_FN (this_basic_block, cfun)
1234 rtx_insn *last_combined_insn = NULL;
1235 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1236 last_call_luid = 0;
1237 mem_last_set = -1;
1239 label_tick++;
1240 if (!single_pred_p (this_basic_block)
1241 || single_pred (this_basic_block) != last_bb)
1242 label_tick_ebb_start = label_tick;
1243 last_bb = this_basic_block;
1245 rtl_profile_for_bb (this_basic_block);
1246 for (insn = BB_HEAD (this_basic_block);
1247 insn != NEXT_INSN (BB_END (this_basic_block));
1248 insn = next ? next : NEXT_INSN (insn))
1250 next = 0;
1251 if (!NONDEBUG_INSN_P (insn))
1252 continue;
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn)
1256 || last_combined_insn->deleted ()))
1257 last_combined_insn = PREV_INSN (last_combined_insn);
1258 if (last_combined_insn == NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1261 last_combined_insn = insn;
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn, PATTERN (insn));
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1269 into SUBREGs. */
1270 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1272 /* Try this insn with each insn it links back to. */
1274 FOR_EACH_LOG_LINK (links, insn)
1275 if ((next = try_combine (insn, links->insn, NULL,
1276 NULL, &new_direct_jump_p,
1277 last_combined_insn)) != 0)
1279 statistics_counter_event (cfun, "two-insn combine", 1);
1280 goto retry;
1283 /* Try each sequence of three linked insns ending with this one. */
1285 if (max_combine >= 3)
1286 FOR_EACH_LOG_LINK (links, insn)
1288 rtx_insn *link = links->insn;
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1292 if (NOTE_P (link))
1293 continue;
1295 FOR_EACH_LOG_LINK (nextlinks, link)
1296 if ((next = try_combine (insn, link, nextlinks->insn,
1297 NULL, &new_direct_jump_p,
1298 last_combined_insn)) != 0)
1300 statistics_counter_event (cfun, "three-insn combine", 1);
1301 goto retry;
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1312 if (HAVE_cc0
1313 && JUMP_P (insn)
1314 && (prev = prev_nonnote_insn (insn)) != 0
1315 && NONJUMP_INSN_P (prev)
1316 && sets_cc0_p (PATTERN (prev)))
1318 if ((next = try_combine (insn, prev, NULL, NULL,
1319 &new_direct_jump_p,
1320 last_combined_insn)) != 0)
1321 goto retry;
1323 FOR_EACH_LOG_LINK (nextlinks, prev)
1324 if ((next = try_combine (insn, prev, nextlinks->insn,
1325 NULL, &new_direct_jump_p,
1326 last_combined_insn)) != 0)
1327 goto retry;
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1332 && (prev = prev_nonnote_insn (insn)) != 0
1333 && NONJUMP_INSN_P (prev)
1334 && sets_cc0_p (PATTERN (prev))
1335 && GET_CODE (PATTERN (insn)) == SET
1336 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1338 if ((next = try_combine (insn, prev, NULL, NULL,
1339 &new_direct_jump_p,
1340 last_combined_insn)) != 0)
1341 goto retry;
1343 FOR_EACH_LOG_LINK (nextlinks, prev)
1344 if ((next = try_combine (insn, prev, nextlinks->insn,
1345 NULL, &new_direct_jump_p,
1346 last_combined_insn)) != 0)
1347 goto retry;
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1353 if (HAVE_cc0)
1355 FOR_EACH_LOG_LINK (links, insn)
1356 if (NONJUMP_INSN_P (links->insn)
1357 && GET_CODE (PATTERN (links->insn)) == SET
1358 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1359 && (prev = prev_nonnote_insn (links->insn)) != 0
1360 && NONJUMP_INSN_P (prev)
1361 && sets_cc0_p (PATTERN (prev))
1362 && (next = try_combine (insn, links->insn,
1363 prev, NULL, &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1368 /* Try combining an insn with two different insns whose results it
1369 uses. */
1370 if (max_combine >= 3)
1371 FOR_EACH_LOG_LINK (links, insn)
1372 for (nextlinks = links->next; nextlinks;
1373 nextlinks = nextlinks->next)
1374 if ((next = try_combine (insn, links->insn,
1375 nextlinks->insn, NULL,
1376 &new_direct_jump_p,
1377 last_combined_insn)) != 0)
1380 statistics_counter_event (cfun, "three-insn combine", 1);
1381 goto retry;
1384 /* Try four-instruction combinations. */
1385 if (max_combine >= 4)
1386 FOR_EACH_LOG_LINK (links, insn)
1388 struct insn_link *next1;
1389 rtx_insn *link = links->insn;
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1393 if (NOTE_P (link))
1394 continue;
1396 FOR_EACH_LOG_LINK (next1, link)
1398 rtx_insn *link1 = next1->insn;
1399 if (NOTE_P (link1))
1400 continue;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks, link1)
1403 if ((next = try_combine (insn, link, link1,
1404 nextlinks->insn,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1408 statistics_counter_event (cfun, "four-insn combine", 1);
1409 goto retry;
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks = next1->next; nextlinks;
1413 nextlinks = nextlinks->next)
1414 if ((next = try_combine (insn, link, link1,
1415 nextlinks->insn,
1416 &new_direct_jump_p,
1417 last_combined_insn)) != 0)
1419 statistics_counter_event (cfun, "four-insn combine", 1);
1420 goto retry;
1424 for (next1 = links->next; next1; next1 = next1->next)
1426 rtx_insn *link1 = next1->insn;
1427 if (NOTE_P (link1))
1428 continue;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks, link1)
1441 if ((next = try_combine (insn, link, link1,
1442 nextlinks->insn,
1443 &new_direct_jump_p,
1444 last_combined_insn)) != 0)
1446 statistics_counter_event (cfun, "four-insn combine", 1);
1447 goto retry;
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links, insn)
1455 rtx set, note;
1456 rtx_insn *temp = links->insn;
1457 if ((set = single_set (temp)) != 0
1458 && (note = find_reg_equal_equiv_note (temp)) != 0
1459 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note, SET_SRC (set))
1463 && (GET_MODE (note) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1465 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1466 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set), 0))
1468 == GET_MODE (note))))))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src = SET_SRC (set);
1474 rtx orig_dest = SET_DEST (set);
1475 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1476 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1477 SET_SRC (set) = note;
1478 i2mod = temp;
1479 i2mod_old_rhs = copy_rtx (orig_src);
1480 i2mod_new_rhs = copy_rtx (note);
1481 next = try_combine (insn, i2mod, NULL, NULL,
1482 &new_direct_jump_p,
1483 last_combined_insn);
1484 i2mod = NULL;
1485 if (next)
1487 statistics_counter_event (cfun, "insn-with-note combine", 1);
1488 goto retry;
1490 SET_SRC (set) = orig_src;
1491 SET_DEST (set) = orig_dest;
1495 if (!NOTE_P (insn))
1496 record_dead_and_set_regs (insn);
1498 retry:
1503 default_rtl_profile ();
1504 clear_bb_flags ();
1505 new_direct_jump_p |= purge_all_dead_edges ();
1506 delete_noop_moves ();
1508 /* Clean up. */
1509 obstack_free (&insn_link_obstack, NULL);
1510 free (uid_log_links);
1511 free (uid_insn_cost);
1512 reg_stat.release ();
1515 struct undo *undo, *next;
1516 for (undo = undobuf.frees; undo; undo = next)
1518 next = undo->next;
1519 free (undo);
1521 undobuf.frees = 0;
1524 total_attempts += combine_attempts;
1525 total_merges += combine_merges;
1526 total_extras += combine_extras;
1527 total_successes += combine_successes;
1529 nonzero_sign_valid = 0;
1530 rtl_hooks = general_rtl_hooks;
1532 /* Make recognizer allow volatile MEMs again. */
1533 init_recog ();
1535 return new_direct_jump_p;
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1540 static void
1541 init_reg_last (void)
1543 unsigned int i;
1544 reg_stat_type *p;
1546 FOR_EACH_VEC_ELT (reg_stat, i, p)
1547 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1550 /* Set up any promoted values for incoming argument registers. */
1552 static void
1553 setup_incoming_promotions (rtx_insn *first)
1555 tree arg;
1556 bool strictly_local = false;
1558 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1559 arg = DECL_CHAIN (arg))
1561 rtx x, reg = DECL_INCOMING_RTL (arg);
1562 int uns1, uns3;
1563 machine_mode mode1, mode2, mode3, mode4;
1565 /* Only continue if the incoming argument is in a register. */
1566 if (!REG_P (reg))
1567 continue;
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1 = TYPE_MODE (TREE_TYPE (arg));
1578 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1583 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1588 TREE_TYPE (cfun->decl), 0);
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4 = GET_MODE (reg);
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1595 if (mode1 == mode3)
1596 continue;
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1599 if (mode3 != mode4)
1600 continue;
1601 /* (c) There's no language level extension; */
1602 if (mode1 == mode2)
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local)
1609 continue;
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1614 else if (uns1)
1615 uns3 = true;
1616 else if (uns3)
1617 continue;
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1623 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1624 record_value_for_reg (reg, first, x);
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1634 kludge. */
1636 static rtx
1637 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1639 scalar_int_mode int_mode;
1640 if (CONST_INT_P (src)
1641 && is_a <scalar_int_mode> (mode, &int_mode)
1642 && GET_MODE_PRECISION (int_mode) < prec
1643 && INTVAL (src) > 0
1644 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1645 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1647 return src;
1650 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1651 and SET. */
1653 static void
1654 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1655 rtx x)
1657 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1658 unsigned HOST_WIDE_INT bits = 0;
1659 rtx reg_equal = NULL, src = SET_SRC (set);
1660 unsigned int num = 0;
1662 if (reg_equal_note)
1663 reg_equal = XEXP (reg_equal_note, 0);
1665 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1667 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1668 if (reg_equal)
1669 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1672 /* Don't call nonzero_bits if it cannot change anything. */
1673 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1675 bits = nonzero_bits (src, nonzero_bits_mode);
1676 if (reg_equal && bits)
1677 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1678 rsp->nonzero_bits |= bits;
1681 /* Don't call num_sign_bit_copies if it cannot change anything. */
1682 if (rsp->sign_bit_copies != 1)
1684 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1685 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1687 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1688 if (num == 0 || numeq > num)
1689 num = numeq;
1691 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1692 rsp->sign_bit_copies = num;
1696 /* Called via note_stores. If X is a pseudo that is narrower than
1697 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1699 If we are setting only a portion of X and we can't figure out what
1700 portion, assume all bits will be used since we don't know what will
1701 be happening.
1703 Similarly, set how many bits of X are known to be copies of the sign bit
1704 at all locations in the function. This is the smallest number implied
1705 by any set of X. */
1707 static void
1708 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1710 rtx_insn *insn = (rtx_insn *) data;
1711 scalar_int_mode mode;
1713 if (REG_P (x)
1714 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1715 /* If this register is undefined at the start of the file, we can't
1716 say what its contents were. */
1717 && ! REGNO_REG_SET_P
1718 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1719 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1720 && HWI_COMPUTABLE_MODE_P (mode))
1722 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1724 if (set == 0 || GET_CODE (set) == CLOBBER)
1726 rsp->nonzero_bits = GET_MODE_MASK (mode);
1727 rsp->sign_bit_copies = 1;
1728 return;
1731 /* If this register is being initialized using itself, and the
1732 register is uninitialized in this basic block, and there are
1733 no LOG_LINKS which set the register, then part of the
1734 register is uninitialized. In that case we can't assume
1735 anything about the number of nonzero bits.
1737 ??? We could do better if we checked this in
1738 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1739 could avoid making assumptions about the insn which initially
1740 sets the register, while still using the information in other
1741 insns. We would have to be careful to check every insn
1742 involved in the combination. */
1744 if (insn
1745 && reg_referenced_p (x, PATTERN (insn))
1746 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1747 REGNO (x)))
1749 struct insn_link *link;
1751 FOR_EACH_LOG_LINK (link, insn)
1752 if (dead_or_set_p (link->insn, x))
1753 break;
1754 if (!link)
1756 rsp->nonzero_bits = GET_MODE_MASK (mode);
1757 rsp->sign_bit_copies = 1;
1758 return;
1762 /* If this is a complex assignment, see if we can convert it into a
1763 simple assignment. */
1764 set = expand_field_assignment (set);
1766 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1767 set what we know about X. */
1769 if (SET_DEST (set) == x
1770 || (paradoxical_subreg_p (SET_DEST (set))
1771 && SUBREG_REG (SET_DEST (set)) == x))
1772 update_rsp_from_reg_equal (rsp, insn, set, x);
1773 else
1775 rsp->nonzero_bits = GET_MODE_MASK (mode);
1776 rsp->sign_bit_copies = 1;
1781 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1782 optionally insns that were previously combined into I3 or that will be
1783 combined into the merger of INSN and I3. The order is PRED, PRED2,
1784 INSN, SUCC, SUCC2, I3.
1786 Return 0 if the combination is not allowed for any reason.
1788 If the combination is allowed, *PDEST will be set to the single
1789 destination of INSN and *PSRC to the single source, and this function
1790 will return 1. */
1792 static int
1793 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1794 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1795 rtx *pdest, rtx *psrc)
1797 int i;
1798 const_rtx set = 0;
1799 rtx src, dest;
1800 rtx_insn *p;
1801 rtx link;
1802 bool all_adjacent = true;
1803 int (*is_volatile_p) (const_rtx);
1805 if (succ)
1807 if (succ2)
1809 if (next_active_insn (succ2) != i3)
1810 all_adjacent = false;
1811 if (next_active_insn (succ) != succ2)
1812 all_adjacent = false;
1814 else if (next_active_insn (succ) != i3)
1815 all_adjacent = false;
1816 if (next_active_insn (insn) != succ)
1817 all_adjacent = false;
1819 else if (next_active_insn (insn) != i3)
1820 all_adjacent = false;
1822 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1823 or a PARALLEL consisting of such a SET and CLOBBERs.
1825 If INSN has CLOBBER parallel parts, ignore them for our processing.
1826 By definition, these happen during the execution of the insn. When it
1827 is merged with another insn, all bets are off. If they are, in fact,
1828 needed and aren't also supplied in I3, they may be added by
1829 recog_for_combine. Otherwise, it won't match.
1831 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1832 note.
1834 Get the source and destination of INSN. If more than one, can't
1835 combine. */
1837 if (GET_CODE (PATTERN (insn)) == SET)
1838 set = PATTERN (insn);
1839 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1840 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1842 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1844 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1846 switch (GET_CODE (elt))
1848 /* This is important to combine floating point insns
1849 for the SH4 port. */
1850 case USE:
1851 /* Combining an isolated USE doesn't make sense.
1852 We depend here on combinable_i3pat to reject them. */
1853 /* The code below this loop only verifies that the inputs of
1854 the SET in INSN do not change. We call reg_set_between_p
1855 to verify that the REG in the USE does not change between
1856 I3 and INSN.
1857 If the USE in INSN was for a pseudo register, the matching
1858 insn pattern will likely match any register; combining this
1859 with any other USE would only be safe if we knew that the
1860 used registers have identical values, or if there was
1861 something to tell them apart, e.g. different modes. For
1862 now, we forgo such complicated tests and simply disallow
1863 combining of USES of pseudo registers with any other USE. */
1864 if (REG_P (XEXP (elt, 0))
1865 && GET_CODE (PATTERN (i3)) == PARALLEL)
1867 rtx i3pat = PATTERN (i3);
1868 int i = XVECLEN (i3pat, 0) - 1;
1869 unsigned int regno = REGNO (XEXP (elt, 0));
1873 rtx i3elt = XVECEXP (i3pat, 0, i);
1875 if (GET_CODE (i3elt) == USE
1876 && REG_P (XEXP (i3elt, 0))
1877 && (REGNO (XEXP (i3elt, 0)) == regno
1878 ? reg_set_between_p (XEXP (elt, 0),
1879 PREV_INSN (insn), i3)
1880 : regno >= FIRST_PSEUDO_REGISTER))
1881 return 0;
1883 while (--i >= 0);
1885 break;
1887 /* We can ignore CLOBBERs. */
1888 case CLOBBER:
1889 break;
1891 case SET:
1892 /* Ignore SETs whose result isn't used but not those that
1893 have side-effects. */
1894 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1895 && insn_nothrow_p (insn)
1896 && !side_effects_p (elt))
1897 break;
1899 /* If we have already found a SET, this is a second one and
1900 so we cannot combine with this insn. */
1901 if (set)
1902 return 0;
1904 set = elt;
1905 break;
1907 default:
1908 /* Anything else means we can't combine. */
1909 return 0;
1913 if (set == 0
1914 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1915 so don't do anything with it. */
1916 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1917 return 0;
1919 else
1920 return 0;
1922 if (set == 0)
1923 return 0;
1925 /* The simplification in expand_field_assignment may call back to
1926 get_last_value, so set safe guard here. */
1927 subst_low_luid = DF_INSN_LUID (insn);
1929 set = expand_field_assignment (set);
1930 src = SET_SRC (set), dest = SET_DEST (set);
1932 /* Do not eliminate user-specified register if it is in an
1933 asm input because we may break the register asm usage defined
1934 in GCC manual if allow to do so.
1935 Be aware that this may cover more cases than we expect but this
1936 should be harmless. */
1937 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1938 && extract_asm_operands (PATTERN (i3)))
1939 return 0;
1941 /* Don't eliminate a store in the stack pointer. */
1942 if (dest == stack_pointer_rtx
1943 /* Don't combine with an insn that sets a register to itself if it has
1944 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1945 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1946 /* Can't merge an ASM_OPERANDS. */
1947 || GET_CODE (src) == ASM_OPERANDS
1948 /* Can't merge a function call. */
1949 || GET_CODE (src) == CALL
1950 /* Don't eliminate a function call argument. */
1951 || (CALL_P (i3)
1952 && (find_reg_fusage (i3, USE, dest)
1953 || (REG_P (dest)
1954 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1955 && global_regs[REGNO (dest)])))
1956 /* Don't substitute into an incremented register. */
1957 || FIND_REG_INC_NOTE (i3, dest)
1958 || (succ && FIND_REG_INC_NOTE (succ, dest))
1959 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1960 /* Don't substitute into a non-local goto, this confuses CFG. */
1961 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1962 /* Make sure that DEST is not used after INSN but before SUCC, or
1963 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1964 || (!all_adjacent
1965 && ((succ2
1966 && (reg_used_between_p (dest, succ2, i3)
1967 || reg_used_between_p (dest, succ, succ2)))
1968 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1969 || (succ
1970 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1971 that case SUCC is not in the insn stream, so use SUCC2
1972 instead for this test. */
1973 && reg_used_between_p (dest, insn,
1974 succ2
1975 && INSN_UID (succ) == INSN_UID (succ2)
1976 ? succ2 : succ))))
1977 /* Make sure that the value that is to be substituted for the register
1978 does not use any registers whose values alter in between. However,
1979 If the insns are adjacent, a use can't cross a set even though we
1980 think it might (this can happen for a sequence of insns each setting
1981 the same destination; last_set of that register might point to
1982 a NOTE). If INSN has a REG_EQUIV note, the register is always
1983 equivalent to the memory so the substitution is valid even if there
1984 are intervening stores. Also, don't move a volatile asm or
1985 UNSPEC_VOLATILE across any other insns. */
1986 || (! all_adjacent
1987 && (((!MEM_P (src)
1988 || ! find_reg_note (insn, REG_EQUIV, src))
1989 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1990 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1991 || GET_CODE (src) == UNSPEC_VOLATILE))
1992 /* Don't combine across a CALL_INSN, because that would possibly
1993 change whether the life span of some REGs crosses calls or not,
1994 and it is a pain to update that information.
1995 Exception: if source is a constant, moving it later can't hurt.
1996 Accept that as a special case. */
1997 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1998 return 0;
2000 /* DEST must either be a REG or CC0. */
2001 if (REG_P (dest))
2003 /* If register alignment is being enforced for multi-word items in all
2004 cases except for parameters, it is possible to have a register copy
2005 insn referencing a hard register that is not allowed to contain the
2006 mode being copied and which would not be valid as an operand of most
2007 insns. Eliminate this problem by not combining with such an insn.
2009 Also, on some machines we don't want to extend the life of a hard
2010 register. */
2012 if (REG_P (src)
2013 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2014 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2015 /* Don't extend the life of a hard register unless it is
2016 user variable (if we have few registers) or it can't
2017 fit into the desired register (meaning something special
2018 is going on).
2019 Also avoid substituting a return register into I3, because
2020 reload can't handle a conflict with constraints of other
2021 inputs. */
2022 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2023 && !targetm.hard_regno_mode_ok (REGNO (src),
2024 GET_MODE (src)))))
2025 return 0;
2027 else if (GET_CODE (dest) != CC0)
2028 return 0;
2031 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2032 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2033 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2035 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2037 /* If the clobber represents an earlyclobber operand, we must not
2038 substitute an expression containing the clobbered register.
2039 As we do not analyze the constraint strings here, we have to
2040 make the conservative assumption. However, if the register is
2041 a fixed hard reg, the clobber cannot represent any operand;
2042 we leave it up to the machine description to either accept or
2043 reject use-and-clobber patterns. */
2044 if (!REG_P (reg)
2045 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2046 || !fixed_regs[REGNO (reg)])
2047 if (reg_overlap_mentioned_p (reg, src))
2048 return 0;
2051 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2052 or not), reject, unless nothing volatile comes between it and I3 */
2054 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2056 /* Make sure neither succ nor succ2 contains a volatile reference. */
2057 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2058 return 0;
2059 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2060 return 0;
2061 /* We'll check insns between INSN and I3 below. */
2064 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2065 to be an explicit register variable, and was chosen for a reason. */
2067 if (GET_CODE (src) == ASM_OPERANDS
2068 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2069 return 0;
2071 /* If INSN contains volatile references (specifically volatile MEMs),
2072 we cannot combine across any other volatile references.
2073 Even if INSN doesn't contain volatile references, any intervening
2074 volatile insn might affect machine state. */
2076 is_volatile_p = volatile_refs_p (PATTERN (insn))
2077 ? volatile_refs_p
2078 : volatile_insn_p;
2080 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2081 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2082 return 0;
2084 /* If INSN contains an autoincrement or autodecrement, make sure that
2085 register is not used between there and I3, and not already used in
2086 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2087 Also insist that I3 not be a jump; if it were one
2088 and the incremented register were spilled, we would lose. */
2090 if (AUTO_INC_DEC)
2091 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2092 if (REG_NOTE_KIND (link) == REG_INC
2093 && (JUMP_P (i3)
2094 || reg_used_between_p (XEXP (link, 0), insn, i3)
2095 || (pred != NULL_RTX
2096 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2097 || (pred2 != NULL_RTX
2098 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2099 || (succ != NULL_RTX
2100 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2101 || (succ2 != NULL_RTX
2102 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2103 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2104 return 0;
2106 /* Don't combine an insn that follows a CC0-setting insn.
2107 An insn that uses CC0 must not be separated from the one that sets it.
2108 We do, however, allow I2 to follow a CC0-setting insn if that insn
2109 is passed as I1; in that case it will be deleted also.
2110 We also allow combining in this case if all the insns are adjacent
2111 because that would leave the two CC0 insns adjacent as well.
2112 It would be more logical to test whether CC0 occurs inside I1 or I2,
2113 but that would be much slower, and this ought to be equivalent. */
2115 if (HAVE_cc0)
2117 p = prev_nonnote_insn (insn);
2118 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2119 && ! all_adjacent)
2120 return 0;
2123 /* If we get here, we have passed all the tests and the combination is
2124 to be allowed. */
2126 *pdest = dest;
2127 *psrc = src;
2129 return 1;
2132 /* LOC is the location within I3 that contains its pattern or the component
2133 of a PARALLEL of the pattern. We validate that it is valid for combining.
2135 One problem is if I3 modifies its output, as opposed to replacing it
2136 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2137 doing so would produce an insn that is not equivalent to the original insns.
2139 Consider:
2141 (set (reg:DI 101) (reg:DI 100))
2142 (set (subreg:SI (reg:DI 101) 0) <foo>)
2144 This is NOT equivalent to:
2146 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2147 (set (reg:DI 101) (reg:DI 100))])
2149 Not only does this modify 100 (in which case it might still be valid
2150 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2152 We can also run into a problem if I2 sets a register that I1
2153 uses and I1 gets directly substituted into I3 (not via I2). In that
2154 case, we would be getting the wrong value of I2DEST into I3, so we
2155 must reject the combination. This case occurs when I2 and I1 both
2156 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2157 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2158 of a SET must prevent combination from occurring. The same situation
2159 can occur for I0, in which case I0_NOT_IN_SRC is set.
2161 Before doing the above check, we first try to expand a field assignment
2162 into a set of logical operations.
2164 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2165 we place a register that is both set and used within I3. If more than one
2166 such register is detected, we fail.
2168 Return 1 if the combination is valid, zero otherwise. */
2170 static int
2171 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2172 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2174 rtx x = *loc;
2176 if (GET_CODE (x) == SET)
2178 rtx set = x ;
2179 rtx dest = SET_DEST (set);
2180 rtx src = SET_SRC (set);
2181 rtx inner_dest = dest;
2182 rtx subdest;
2184 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2185 || GET_CODE (inner_dest) == SUBREG
2186 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2187 inner_dest = XEXP (inner_dest, 0);
2189 /* Check for the case where I3 modifies its output, as discussed
2190 above. We don't want to prevent pseudos from being combined
2191 into the address of a MEM, so only prevent the combination if
2192 i1 or i2 set the same MEM. */
2193 if ((inner_dest != dest &&
2194 (!MEM_P (inner_dest)
2195 || rtx_equal_p (i2dest, inner_dest)
2196 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2197 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2198 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2199 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2200 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2202 /* This is the same test done in can_combine_p except we can't test
2203 all_adjacent; we don't have to, since this instruction will stay
2204 in place, thus we are not considering increasing the lifetime of
2205 INNER_DEST.
2207 Also, if this insn sets a function argument, combining it with
2208 something that might need a spill could clobber a previous
2209 function argument; the all_adjacent test in can_combine_p also
2210 checks this; here, we do a more specific test for this case. */
2212 || (REG_P (inner_dest)
2213 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2214 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2215 GET_MODE (inner_dest)))
2216 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2217 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2218 return 0;
2220 /* If DEST is used in I3, it is being killed in this insn, so
2221 record that for later. We have to consider paradoxical
2222 subregs here, since they kill the whole register, but we
2223 ignore partial subregs, STRICT_LOW_PART, etc.
2224 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2225 STACK_POINTER_REGNUM, since these are always considered to be
2226 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2227 subdest = dest;
2228 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2229 subdest = SUBREG_REG (subdest);
2230 if (pi3dest_killed
2231 && REG_P (subdest)
2232 && reg_referenced_p (subdest, PATTERN (i3))
2233 && REGNO (subdest) != FRAME_POINTER_REGNUM
2234 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2235 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2236 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2237 || (REGNO (subdest) != ARG_POINTER_REGNUM
2238 || ! fixed_regs [REGNO (subdest)]))
2239 && REGNO (subdest) != STACK_POINTER_REGNUM)
2241 if (*pi3dest_killed)
2242 return 0;
2244 *pi3dest_killed = subdest;
2248 else if (GET_CODE (x) == PARALLEL)
2250 int i;
2252 for (i = 0; i < XVECLEN (x, 0); i++)
2253 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2254 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2255 return 0;
2258 return 1;
2261 /* Return 1 if X is an arithmetic expression that contains a multiplication
2262 and division. We don't count multiplications by powers of two here. */
2264 static int
2265 contains_muldiv (rtx x)
2267 switch (GET_CODE (x))
2269 case MOD: case DIV: case UMOD: case UDIV:
2270 return 1;
2272 case MULT:
2273 return ! (CONST_INT_P (XEXP (x, 1))
2274 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2275 default:
2276 if (BINARY_P (x))
2277 return contains_muldiv (XEXP (x, 0))
2278 || contains_muldiv (XEXP (x, 1));
2280 if (UNARY_P (x))
2281 return contains_muldiv (XEXP (x, 0));
2283 return 0;
2287 /* Determine whether INSN can be used in a combination. Return nonzero if
2288 not. This is used in try_combine to detect early some cases where we
2289 can't perform combinations. */
2291 static int
2292 cant_combine_insn_p (rtx_insn *insn)
2294 rtx set;
2295 rtx src, dest;
2297 /* If this isn't really an insn, we can't do anything.
2298 This can occur when flow deletes an insn that it has merged into an
2299 auto-increment address. */
2300 if (!NONDEBUG_INSN_P (insn))
2301 return 1;
2303 /* Never combine loads and stores involving hard regs that are likely
2304 to be spilled. The register allocator can usually handle such
2305 reg-reg moves by tying. If we allow the combiner to make
2306 substitutions of likely-spilled regs, reload might die.
2307 As an exception, we allow combinations involving fixed regs; these are
2308 not available to the register allocator so there's no risk involved. */
2310 set = single_set (insn);
2311 if (! set)
2312 return 0;
2313 src = SET_SRC (set);
2314 dest = SET_DEST (set);
2315 if (GET_CODE (src) == SUBREG)
2316 src = SUBREG_REG (src);
2317 if (GET_CODE (dest) == SUBREG)
2318 dest = SUBREG_REG (dest);
2319 if (REG_P (src) && REG_P (dest)
2320 && ((HARD_REGISTER_P (src)
2321 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2322 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2323 || (HARD_REGISTER_P (dest)
2324 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2325 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2326 return 1;
2328 return 0;
2331 struct likely_spilled_retval_info
2333 unsigned regno, nregs;
2334 unsigned mask;
2337 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2338 hard registers that are known to be written to / clobbered in full. */
2339 static void
2340 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2342 struct likely_spilled_retval_info *const info =
2343 (struct likely_spilled_retval_info *) data;
2344 unsigned regno, nregs;
2345 unsigned new_mask;
2347 if (!REG_P (XEXP (set, 0)))
2348 return;
2349 regno = REGNO (x);
2350 if (regno >= info->regno + info->nregs)
2351 return;
2352 nregs = REG_NREGS (x);
2353 if (regno + nregs <= info->regno)
2354 return;
2355 new_mask = (2U << (nregs - 1)) - 1;
2356 if (regno < info->regno)
2357 new_mask >>= info->regno - regno;
2358 else
2359 new_mask <<= regno - info->regno;
2360 info->mask &= ~new_mask;
2363 /* Return nonzero iff part of the return value is live during INSN, and
2364 it is likely spilled. This can happen when more than one insn is needed
2365 to copy the return value, e.g. when we consider to combine into the
2366 second copy insn for a complex value. */
2368 static int
2369 likely_spilled_retval_p (rtx_insn *insn)
2371 rtx_insn *use = BB_END (this_basic_block);
2372 rtx reg;
2373 rtx_insn *p;
2374 unsigned regno, nregs;
2375 /* We assume here that no machine mode needs more than
2376 32 hard registers when the value overlaps with a register
2377 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2378 unsigned mask;
2379 struct likely_spilled_retval_info info;
2381 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2382 return 0;
2383 reg = XEXP (PATTERN (use), 0);
2384 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2385 return 0;
2386 regno = REGNO (reg);
2387 nregs = REG_NREGS (reg);
2388 if (nregs == 1)
2389 return 0;
2390 mask = (2U << (nregs - 1)) - 1;
2392 /* Disregard parts of the return value that are set later. */
2393 info.regno = regno;
2394 info.nregs = nregs;
2395 info.mask = mask;
2396 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2397 if (INSN_P (p))
2398 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2399 mask = info.mask;
2401 /* Check if any of the (probably) live return value registers is
2402 likely spilled. */
2403 nregs --;
2406 if ((mask & 1 << nregs)
2407 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2408 return 1;
2409 } while (nregs--);
2410 return 0;
2413 /* Adjust INSN after we made a change to its destination.
2415 Changing the destination can invalidate notes that say something about
2416 the results of the insn and a LOG_LINK pointing to the insn. */
2418 static void
2419 adjust_for_new_dest (rtx_insn *insn)
2421 /* For notes, be conservative and simply remove them. */
2422 remove_reg_equal_equiv_notes (insn);
2424 /* The new insn will have a destination that was previously the destination
2425 of an insn just above it. Call distribute_links to make a LOG_LINK from
2426 the next use of that destination. */
2428 rtx set = single_set (insn);
2429 gcc_assert (set);
2431 rtx reg = SET_DEST (set);
2433 while (GET_CODE (reg) == ZERO_EXTRACT
2434 || GET_CODE (reg) == STRICT_LOW_PART
2435 || GET_CODE (reg) == SUBREG)
2436 reg = XEXP (reg, 0);
2437 gcc_assert (REG_P (reg));
2439 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2441 df_insn_rescan (insn);
2444 /* Return TRUE if combine can reuse reg X in mode MODE.
2445 ADDED_SETS is nonzero if the original set is still required. */
2446 static bool
2447 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2449 unsigned int regno;
2451 if (!REG_P (x))
2452 return false;
2454 regno = REGNO (x);
2455 /* Allow hard registers if the new mode is legal, and occupies no more
2456 registers than the old mode. */
2457 if (regno < FIRST_PSEUDO_REGISTER)
2458 return (targetm.hard_regno_mode_ok (regno, mode)
2459 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2461 /* Or a pseudo that is only used once. */
2462 return (regno < reg_n_sets_max
2463 && REG_N_SETS (regno) == 1
2464 && !added_sets
2465 && !REG_USERVAR_P (x));
2469 /* Check whether X, the destination of a set, refers to part of
2470 the register specified by REG. */
2472 static bool
2473 reg_subword_p (rtx x, rtx reg)
2475 /* Check that reg is an integer mode register. */
2476 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2477 return false;
2479 if (GET_CODE (x) == STRICT_LOW_PART
2480 || GET_CODE (x) == ZERO_EXTRACT)
2481 x = XEXP (x, 0);
2483 return GET_CODE (x) == SUBREG
2484 && SUBREG_REG (x) == reg
2485 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2488 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2489 Note that the INSN should be deleted *after* removing dead edges, so
2490 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2491 but not for a (set (pc) (label_ref FOO)). */
2493 static void
2494 update_cfg_for_uncondjump (rtx_insn *insn)
2496 basic_block bb = BLOCK_FOR_INSN (insn);
2497 gcc_assert (BB_END (bb) == insn);
2499 purge_dead_edges (bb);
2501 delete_insn (insn);
2502 if (EDGE_COUNT (bb->succs) == 1)
2504 rtx_insn *insn;
2506 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2508 /* Remove barriers from the footer if there are any. */
2509 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2510 if (BARRIER_P (insn))
2512 if (PREV_INSN (insn))
2513 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2514 else
2515 BB_FOOTER (bb) = NEXT_INSN (insn);
2516 if (NEXT_INSN (insn))
2517 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2519 else if (LABEL_P (insn))
2520 break;
2524 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2525 by an arbitrary number of CLOBBERs. */
2526 static bool
2527 is_parallel_of_n_reg_sets (rtx pat, int n)
2529 if (GET_CODE (pat) != PARALLEL)
2530 return false;
2532 int len = XVECLEN (pat, 0);
2533 if (len < n)
2534 return false;
2536 int i;
2537 for (i = 0; i < n; i++)
2538 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2539 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2540 return false;
2541 for ( ; i < len; i++)
2542 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2543 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2544 return false;
2546 return true;
2549 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2550 CLOBBERs), can be split into individual SETs in that order, without
2551 changing semantics. */
2552 static bool
2553 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2555 if (!insn_nothrow_p (insn))
2556 return false;
2558 rtx pat = PATTERN (insn);
2560 int i, j;
2561 for (i = 0; i < n; i++)
2563 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2564 return false;
2566 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2568 for (j = i + 1; j < n; j++)
2569 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2570 return false;
2573 return true;
2576 /* Try to combine the insns I0, I1 and I2 into I3.
2577 Here I0, I1 and I2 appear earlier than I3.
2578 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2581 If we are combining more than two insns and the resulting insn is not
2582 recognized, try splitting it into two insns. If that happens, I2 and I3
2583 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2584 Otherwise, I0, I1 and I2 are pseudo-deleted.
2586 Return 0 if the combination does not work. Then nothing is changed.
2587 If we did the combination, return the insn at which combine should
2588 resume scanning.
2590 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2591 new direct jump instruction.
2593 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2594 been I3 passed to an earlier try_combine within the same basic
2595 block. */
2597 static rtx_insn *
2598 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2599 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2601 /* New patterns for I3 and I2, respectively. */
2602 rtx newpat, newi2pat = 0;
2603 rtvec newpat_vec_with_clobbers = 0;
2604 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2605 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2606 dead. */
2607 int added_sets_0, added_sets_1, added_sets_2;
2608 /* Total number of SETs to put into I3. */
2609 int total_sets;
2610 /* Nonzero if I2's or I1's body now appears in I3. */
2611 int i2_is_used = 0, i1_is_used = 0;
2612 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2613 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2614 /* Contains I3 if the destination of I3 is used in its source, which means
2615 that the old life of I3 is being killed. If that usage is placed into
2616 I2 and not in I3, a REG_DEAD note must be made. */
2617 rtx i3dest_killed = 0;
2618 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2619 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2620 /* Copy of SET_SRC of I1 and I0, if needed. */
2621 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2622 /* Set if I2DEST was reused as a scratch register. */
2623 bool i2scratch = false;
2624 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2625 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2626 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2627 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2628 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2629 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2630 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2631 /* Notes that must be added to REG_NOTES in I3 and I2. */
2632 rtx new_i3_notes, new_i2_notes;
2633 /* Notes that we substituted I3 into I2 instead of the normal case. */
2634 int i3_subst_into_i2 = 0;
2635 /* Notes that I1, I2 or I3 is a MULT operation. */
2636 int have_mult = 0;
2637 int swap_i2i3 = 0;
2638 int changed_i3_dest = 0;
2640 int maxreg;
2641 rtx_insn *temp_insn;
2642 rtx temp_expr;
2643 struct insn_link *link;
2644 rtx other_pat = 0;
2645 rtx new_other_notes;
2646 int i;
2647 scalar_int_mode dest_mode, temp_mode;
2649 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2650 never be). */
2651 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2652 return 0;
2654 /* Only try four-insn combinations when there's high likelihood of
2655 success. Look for simple insns, such as loads of constants or
2656 binary operations involving a constant. */
2657 if (i0)
2659 int i;
2660 int ngood = 0;
2661 int nshift = 0;
2662 rtx set0, set3;
2664 if (!flag_expensive_optimizations)
2665 return 0;
2667 for (i = 0; i < 4; i++)
2669 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2670 rtx set = single_set (insn);
2671 rtx src;
2672 if (!set)
2673 continue;
2674 src = SET_SRC (set);
2675 if (CONSTANT_P (src))
2677 ngood += 2;
2678 break;
2680 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2681 ngood++;
2682 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2683 || GET_CODE (src) == LSHIFTRT)
2684 nshift++;
2687 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2688 are likely manipulating its value. Ideally we'll be able to combine
2689 all four insns into a bitfield insertion of some kind.
2691 Note the source in I0 might be inside a sign/zero extension and the
2692 memory modes in I0 and I3 might be different. So extract the address
2693 from the destination of I3 and search for it in the source of I0.
2695 In the event that there's a match but the source/dest do not actually
2696 refer to the same memory, the worst that happens is we try some
2697 combinations that we wouldn't have otherwise. */
2698 if ((set0 = single_set (i0))
2699 /* Ensure the source of SET0 is a MEM, possibly buried inside
2700 an extension. */
2701 && (GET_CODE (SET_SRC (set0)) == MEM
2702 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2703 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2704 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2705 && (set3 = single_set (i3))
2706 /* Ensure the destination of SET3 is a MEM. */
2707 && GET_CODE (SET_DEST (set3)) == MEM
2708 /* Would it be better to extract the base address for the MEM
2709 in SET3 and look for that? I don't have cases where it matters
2710 but I could envision such cases. */
2711 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2712 ngood += 2;
2714 if (ngood < 2 && nshift < 2)
2715 return 0;
2718 /* Exit early if one of the insns involved can't be used for
2719 combinations. */
2720 if (CALL_P (i2)
2721 || (i1 && CALL_P (i1))
2722 || (i0 && CALL_P (i0))
2723 || cant_combine_insn_p (i3)
2724 || cant_combine_insn_p (i2)
2725 || (i1 && cant_combine_insn_p (i1))
2726 || (i0 && cant_combine_insn_p (i0))
2727 || likely_spilled_retval_p (i3))
2728 return 0;
2730 combine_attempts++;
2731 undobuf.other_insn = 0;
2733 /* Reset the hard register usage information. */
2734 CLEAR_HARD_REG_SET (newpat_used_regs);
2736 if (dump_file && (dump_flags & TDF_DETAILS))
2738 if (i0)
2739 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2740 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2741 else if (i1)
2742 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2743 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2744 else
2745 fprintf (dump_file, "\nTrying %d -> %d:\n",
2746 INSN_UID (i2), INSN_UID (i3));
2749 /* If multiple insns feed into one of I2 or I3, they can be in any
2750 order. To simplify the code below, reorder them in sequence. */
2751 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2752 std::swap (i0, i2);
2753 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2754 std::swap (i0, i1);
2755 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2756 std::swap (i1, i2);
2758 added_links_insn = 0;
2760 /* First check for one important special case that the code below will
2761 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2762 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2763 we may be able to replace that destination with the destination of I3.
2764 This occurs in the common code where we compute both a quotient and
2765 remainder into a structure, in which case we want to do the computation
2766 directly into the structure to avoid register-register copies.
2768 Note that this case handles both multiple sets in I2 and also cases
2769 where I2 has a number of CLOBBERs inside the PARALLEL.
2771 We make very conservative checks below and only try to handle the
2772 most common cases of this. For example, we only handle the case
2773 where I2 and I3 are adjacent to avoid making difficult register
2774 usage tests. */
2776 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2777 && REG_P (SET_SRC (PATTERN (i3)))
2778 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2779 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2780 && GET_CODE (PATTERN (i2)) == PARALLEL
2781 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2782 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2783 below would need to check what is inside (and reg_overlap_mentioned_p
2784 doesn't support those codes anyway). Don't allow those destinations;
2785 the resulting insn isn't likely to be recognized anyway. */
2786 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2787 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2788 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2789 SET_DEST (PATTERN (i3)))
2790 && next_active_insn (i2) == i3)
2792 rtx p2 = PATTERN (i2);
2794 /* Make sure that the destination of I3,
2795 which we are going to substitute into one output of I2,
2796 is not used within another output of I2. We must avoid making this:
2797 (parallel [(set (mem (reg 69)) ...)
2798 (set (reg 69) ...)])
2799 which is not well-defined as to order of actions.
2800 (Besides, reload can't handle output reloads for this.)
2802 The problem can also happen if the dest of I3 is a memory ref,
2803 if another dest in I2 is an indirect memory ref.
2805 Neither can this PARALLEL be an asm. We do not allow combining
2806 that usually (see can_combine_p), so do not here either. */
2807 bool ok = true;
2808 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2810 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2811 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2812 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2813 SET_DEST (XVECEXP (p2, 0, i))))
2814 ok = false;
2815 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2816 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2817 ok = false;
2820 if (ok)
2821 for (i = 0; i < XVECLEN (p2, 0); i++)
2822 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2823 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2825 combine_merges++;
2827 subst_insn = i3;
2828 subst_low_luid = DF_INSN_LUID (i2);
2830 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2831 i2src = SET_SRC (XVECEXP (p2, 0, i));
2832 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2833 i2dest_killed = dead_or_set_p (i2, i2dest);
2835 /* Replace the dest in I2 with our dest and make the resulting
2836 insn the new pattern for I3. Then skip to where we validate
2837 the pattern. Everything was set up above. */
2838 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2839 newpat = p2;
2840 i3_subst_into_i2 = 1;
2841 goto validate_replacement;
2845 /* If I2 is setting a pseudo to a constant and I3 is setting some
2846 sub-part of it to another constant, merge them by making a new
2847 constant. */
2848 if (i1 == 0
2849 && (temp_expr = single_set (i2)) != 0
2850 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2851 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2852 && GET_CODE (PATTERN (i3)) == SET
2853 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2854 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2856 rtx dest = SET_DEST (PATTERN (i3));
2857 rtx temp_dest = SET_DEST (temp_expr);
2858 int offset = -1;
2859 int width = 0;
2861 if (GET_CODE (dest) == ZERO_EXTRACT)
2863 if (CONST_INT_P (XEXP (dest, 1))
2864 && CONST_INT_P (XEXP (dest, 2))
2865 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2866 &dest_mode))
2868 width = INTVAL (XEXP (dest, 1));
2869 offset = INTVAL (XEXP (dest, 2));
2870 dest = XEXP (dest, 0);
2871 if (BITS_BIG_ENDIAN)
2872 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2875 else
2877 if (GET_CODE (dest) == STRICT_LOW_PART)
2878 dest = XEXP (dest, 0);
2879 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2881 width = GET_MODE_PRECISION (dest_mode);
2882 offset = 0;
2886 if (offset >= 0)
2888 /* If this is the low part, we're done. */
2889 if (subreg_lowpart_p (dest))
2891 /* Handle the case where inner is twice the size of outer. */
2892 else if (GET_MODE_PRECISION (temp_mode)
2893 == 2 * GET_MODE_PRECISION (dest_mode))
2894 offset += GET_MODE_PRECISION (dest_mode);
2895 /* Otherwise give up for now. */
2896 else
2897 offset = -1;
2900 if (offset >= 0)
2902 rtx inner = SET_SRC (PATTERN (i3));
2903 rtx outer = SET_SRC (temp_expr);
2905 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2906 rtx_mode_t (inner, dest_mode),
2907 offset, width);
2909 combine_merges++;
2910 subst_insn = i3;
2911 subst_low_luid = DF_INSN_LUID (i2);
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2dest = temp_dest;
2914 i2dest_killed = dead_or_set_p (i2, i2dest);
2916 /* Replace the source in I2 with the new constant and make the
2917 resulting insn the new pattern for I3. Then skip to where we
2918 validate the pattern. Everything was set up above. */
2919 SUBST (SET_SRC (temp_expr),
2920 immed_wide_int_const (o, temp_mode));
2922 newpat = PATTERN (i2);
2924 /* The dest of I3 has been replaced with the dest of I2. */
2925 changed_i3_dest = 1;
2926 goto validate_replacement;
2930 /* If we have no I1 and I2 looks like:
2931 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2932 (set Y OP)])
2933 make up a dummy I1 that is
2934 (set Y OP)
2935 and change I2 to be
2936 (set (reg:CC X) (compare:CC Y (const_int 0)))
2938 (We can ignore any trailing CLOBBERs.)
2940 This undoes a previous combination and allows us to match a branch-and-
2941 decrement insn. */
2943 if (!HAVE_cc0 && i1 == 0
2944 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2945 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2946 == MODE_CC)
2947 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2948 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2949 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2950 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2951 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2954 /* We make I1 with the same INSN_UID as I2. This gives it
2955 the same DF_INSN_LUID for value tracking. Our fake I1 will
2956 never appear in the insn stream so giving it the same INSN_UID
2957 as I2 will not cause a problem. */
2959 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2960 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2961 -1, NULL_RTX);
2962 INSN_UID (i1) = INSN_UID (i2);
2964 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2965 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2966 SET_DEST (PATTERN (i1)));
2967 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2968 SUBST_LINK (LOG_LINKS (i2),
2969 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2972 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2973 make those two SETs separate I1 and I2 insns, and make an I0 that is
2974 the original I1. */
2975 if (!HAVE_cc0 && i0 == 0
2976 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2977 && can_split_parallel_of_n_reg_sets (i2, 2)
2978 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2979 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2981 /* If there is no I1, there is no I0 either. */
2982 i0 = i1;
2984 /* We make I1 with the same INSN_UID as I2. This gives it
2985 the same DF_INSN_LUID for value tracking. Our fake I1 will
2986 never appear in the insn stream so giving it the same INSN_UID
2987 as I2 will not cause a problem. */
2989 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2990 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2991 -1, NULL_RTX);
2992 INSN_UID (i1) = INSN_UID (i2);
2994 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2997 /* Verify that I2 and I1 are valid for combining. */
2998 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2999 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3000 &i1dest, &i1src))
3001 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3002 &i0dest, &i0src)))
3004 undo_all ();
3005 return 0;
3008 /* Record whether I2DEST is used in I2SRC and similarly for the other
3009 cases. Knowing this will help in register status updating below. */
3010 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3011 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3012 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3013 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3014 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3015 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3016 i2dest_killed = dead_or_set_p (i2, i2dest);
3017 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3018 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3020 /* For the earlier insns, determine which of the subsequent ones they
3021 feed. */
3022 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3023 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3024 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3025 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3026 && reg_overlap_mentioned_p (i0dest, i2src))));
3028 /* Ensure that I3's pattern can be the destination of combines. */
3029 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3030 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3031 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3032 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3033 &i3dest_killed))
3035 undo_all ();
3036 return 0;
3039 /* See if any of the insns is a MULT operation. Unless one is, we will
3040 reject a combination that is, since it must be slower. Be conservative
3041 here. */
3042 if (GET_CODE (i2src) == MULT
3043 || (i1 != 0 && GET_CODE (i1src) == MULT)
3044 || (i0 != 0 && GET_CODE (i0src) == MULT)
3045 || (GET_CODE (PATTERN (i3)) == SET
3046 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3047 have_mult = 1;
3049 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3050 We used to do this EXCEPT in one case: I3 has a post-inc in an
3051 output operand. However, that exception can give rise to insns like
3052 mov r3,(r3)+
3053 which is a famous insn on the PDP-11 where the value of r3 used as the
3054 source was model-dependent. Avoid this sort of thing. */
3056 #if 0
3057 if (!(GET_CODE (PATTERN (i3)) == SET
3058 && REG_P (SET_SRC (PATTERN (i3)))
3059 && MEM_P (SET_DEST (PATTERN (i3)))
3060 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3061 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3062 /* It's not the exception. */
3063 #endif
3064 if (AUTO_INC_DEC)
3066 rtx link;
3067 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3068 if (REG_NOTE_KIND (link) == REG_INC
3069 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3070 || (i1 != 0
3071 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3073 undo_all ();
3074 return 0;
3078 /* See if the SETs in I1 or I2 need to be kept around in the merged
3079 instruction: whenever the value set there is still needed past I3.
3080 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3082 For the SET in I1, we have two cases: if I1 and I2 independently feed
3083 into I3, the set in I1 needs to be kept around unless I1DEST dies
3084 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3085 in I1 needs to be kept around unless I1DEST dies or is set in either
3086 I2 or I3. The same considerations apply to I0. */
3088 added_sets_2 = !dead_or_set_p (i3, i2dest);
3090 if (i1)
3091 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3092 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3093 else
3094 added_sets_1 = 0;
3096 if (i0)
3097 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3098 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3099 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3100 && dead_or_set_p (i2, i0dest)));
3101 else
3102 added_sets_0 = 0;
3104 /* We are about to copy insns for the case where they need to be kept
3105 around. Check that they can be copied in the merged instruction. */
3107 if (targetm.cannot_copy_insn_p
3108 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3109 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3110 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3112 undo_all ();
3113 return 0;
3116 /* If the set in I2 needs to be kept around, we must make a copy of
3117 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3118 PATTERN (I2), we are only substituting for the original I1DEST, not into
3119 an already-substituted copy. This also prevents making self-referential
3120 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3121 I2DEST. */
3123 if (added_sets_2)
3125 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3126 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3127 else
3128 i2pat = copy_rtx (PATTERN (i2));
3131 if (added_sets_1)
3133 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3134 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3135 else
3136 i1pat = copy_rtx (PATTERN (i1));
3139 if (added_sets_0)
3141 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3142 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3143 else
3144 i0pat = copy_rtx (PATTERN (i0));
3147 combine_merges++;
3149 /* Substitute in the latest insn for the regs set by the earlier ones. */
3151 maxreg = max_reg_num ();
3153 subst_insn = i3;
3155 /* Many machines that don't use CC0 have insns that can both perform an
3156 arithmetic operation and set the condition code. These operations will
3157 be represented as a PARALLEL with the first element of the vector
3158 being a COMPARE of an arithmetic operation with the constant zero.
3159 The second element of the vector will set some pseudo to the result
3160 of the same arithmetic operation. If we simplify the COMPARE, we won't
3161 match such a pattern and so will generate an extra insn. Here we test
3162 for this case, where both the comparison and the operation result are
3163 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3164 I2SRC. Later we will make the PARALLEL that contains I2. */
3166 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3167 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3168 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3169 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3171 rtx newpat_dest;
3172 rtx *cc_use_loc = NULL;
3173 rtx_insn *cc_use_insn = NULL;
3174 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3175 machine_mode compare_mode, orig_compare_mode;
3176 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3177 scalar_int_mode mode;
3179 newpat = PATTERN (i3);
3180 newpat_dest = SET_DEST (newpat);
3181 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3183 if (undobuf.other_insn == 0
3184 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3185 &cc_use_insn)))
3187 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3188 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3189 compare_code = simplify_compare_const (compare_code, mode,
3190 op0, &op1);
3191 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3194 /* Do the rest only if op1 is const0_rtx, which may be the
3195 result of simplification. */
3196 if (op1 == const0_rtx)
3198 /* If a single use of the CC is found, prepare to modify it
3199 when SELECT_CC_MODE returns a new CC-class mode, or when
3200 the above simplify_compare_const() returned a new comparison
3201 operator. undobuf.other_insn is assigned the CC use insn
3202 when modifying it. */
3203 if (cc_use_loc)
3205 #ifdef SELECT_CC_MODE
3206 machine_mode new_mode
3207 = SELECT_CC_MODE (compare_code, op0, op1);
3208 if (new_mode != orig_compare_mode
3209 && can_change_dest_mode (SET_DEST (newpat),
3210 added_sets_2, new_mode))
3212 unsigned int regno = REGNO (newpat_dest);
3213 compare_mode = new_mode;
3214 if (regno < FIRST_PSEUDO_REGISTER)
3215 newpat_dest = gen_rtx_REG (compare_mode, regno);
3216 else
3218 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3219 newpat_dest = regno_reg_rtx[regno];
3222 #endif
3223 /* Cases for modifying the CC-using comparison. */
3224 if (compare_code != orig_compare_code
3225 /* ??? Do we need to verify the zero rtx? */
3226 && XEXP (*cc_use_loc, 1) == const0_rtx)
3228 /* Replace cc_use_loc with entire new RTX. */
3229 SUBST (*cc_use_loc,
3230 gen_rtx_fmt_ee (compare_code, compare_mode,
3231 newpat_dest, const0_rtx));
3232 undobuf.other_insn = cc_use_insn;
3234 else if (compare_mode != orig_compare_mode)
3236 /* Just replace the CC reg with a new mode. */
3237 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3238 undobuf.other_insn = cc_use_insn;
3242 /* Now we modify the current newpat:
3243 First, SET_DEST(newpat) is updated if the CC mode has been
3244 altered. For targets without SELECT_CC_MODE, this should be
3245 optimized away. */
3246 if (compare_mode != orig_compare_mode)
3247 SUBST (SET_DEST (newpat), newpat_dest);
3248 /* This is always done to propagate i2src into newpat. */
3249 SUBST (SET_SRC (newpat),
3250 gen_rtx_COMPARE (compare_mode, op0, op1));
3251 /* Create new version of i2pat if needed; the below PARALLEL
3252 creation needs this to work correctly. */
3253 if (! rtx_equal_p (i2src, op0))
3254 i2pat = gen_rtx_SET (i2dest, op0);
3255 i2_is_used = 1;
3259 if (i2_is_used == 0)
3261 /* It is possible that the source of I2 or I1 may be performing
3262 an unneeded operation, such as a ZERO_EXTEND of something
3263 that is known to have the high part zero. Handle that case
3264 by letting subst look at the inner insns.
3266 Another way to do this would be to have a function that tries
3267 to simplify a single insn instead of merging two or more
3268 insns. We don't do this because of the potential of infinite
3269 loops and because of the potential extra memory required.
3270 However, doing it the way we are is a bit of a kludge and
3271 doesn't catch all cases.
3273 But only do this if -fexpensive-optimizations since it slows
3274 things down and doesn't usually win.
3276 This is not done in the COMPARE case above because the
3277 unmodified I2PAT is used in the PARALLEL and so a pattern
3278 with a modified I2SRC would not match. */
3280 if (flag_expensive_optimizations)
3282 /* Pass pc_rtx so no substitutions are done, just
3283 simplifications. */
3284 if (i1)
3286 subst_low_luid = DF_INSN_LUID (i1);
3287 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3290 subst_low_luid = DF_INSN_LUID (i2);
3291 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3294 n_occurrences = 0; /* `subst' counts here */
3295 subst_low_luid = DF_INSN_LUID (i2);
3297 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3298 copy of I2SRC each time we substitute it, in order to avoid creating
3299 self-referential RTL when we will be substituting I1SRC for I1DEST
3300 later. Likewise if I0 feeds into I2, either directly or indirectly
3301 through I1, and I0DEST is in I0SRC. */
3302 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3303 (i1_feeds_i2_n && i1dest_in_i1src)
3304 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3305 && i0dest_in_i0src));
3306 substed_i2 = 1;
3308 /* Record whether I2's body now appears within I3's body. */
3309 i2_is_used = n_occurrences;
3312 /* If we already got a failure, don't try to do more. Otherwise, try to
3313 substitute I1 if we have it. */
3315 if (i1 && GET_CODE (newpat) != CLOBBER)
3317 /* Check that an autoincrement side-effect on I1 has not been lost.
3318 This happens if I1DEST is mentioned in I2 and dies there, and
3319 has disappeared from the new pattern. */
3320 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3321 && i1_feeds_i2_n
3322 && dead_or_set_p (i2, i1dest)
3323 && !reg_overlap_mentioned_p (i1dest, newpat))
3324 /* Before we can do this substitution, we must redo the test done
3325 above (see detailed comments there) that ensures I1DEST isn't
3326 mentioned in any SETs in NEWPAT that are field assignments. */
3327 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3328 0, 0, 0))
3330 undo_all ();
3331 return 0;
3334 n_occurrences = 0;
3335 subst_low_luid = DF_INSN_LUID (i1);
3337 /* If the following substitution will modify I1SRC, make a copy of it
3338 for the case where it is substituted for I1DEST in I2PAT later. */
3339 if (added_sets_2 && i1_feeds_i2_n)
3340 i1src_copy = copy_rtx (i1src);
3342 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3343 copy of I1SRC each time we substitute it, in order to avoid creating
3344 self-referential RTL when we will be substituting I0SRC for I0DEST
3345 later. */
3346 newpat = subst (newpat, i1dest, i1src, 0, 0,
3347 i0_feeds_i1_n && i0dest_in_i0src);
3348 substed_i1 = 1;
3350 /* Record whether I1's body now appears within I3's body. */
3351 i1_is_used = n_occurrences;
3354 /* Likewise for I0 if we have it. */
3356 if (i0 && GET_CODE (newpat) != CLOBBER)
3358 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3359 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3360 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3361 && !reg_overlap_mentioned_p (i0dest, newpat))
3362 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3363 0, 0, 0))
3365 undo_all ();
3366 return 0;
3369 /* If the following substitution will modify I0SRC, make a copy of it
3370 for the case where it is substituted for I0DEST in I1PAT later. */
3371 if (added_sets_1 && i0_feeds_i1_n)
3372 i0src_copy = copy_rtx (i0src);
3373 /* And a copy for I0DEST in I2PAT substitution. */
3374 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3375 || (i0_feeds_i2_n)))
3376 i0src_copy2 = copy_rtx (i0src);
3378 n_occurrences = 0;
3379 subst_low_luid = DF_INSN_LUID (i0);
3380 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3381 substed_i0 = 1;
3384 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3385 to count all the ways that I2SRC and I1SRC can be used. */
3386 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3387 && i2_is_used + added_sets_2 > 1)
3388 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3389 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3390 > 1))
3391 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3392 && (n_occurrences + added_sets_0
3393 + (added_sets_1 && i0_feeds_i1_n)
3394 + (added_sets_2 && i0_feeds_i2_n)
3395 > 1))
3396 /* Fail if we tried to make a new register. */
3397 || max_reg_num () != maxreg
3398 /* Fail if we couldn't do something and have a CLOBBER. */
3399 || GET_CODE (newpat) == CLOBBER
3400 /* Fail if this new pattern is a MULT and we didn't have one before
3401 at the outer level. */
3402 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3403 && ! have_mult))
3405 undo_all ();
3406 return 0;
3409 /* If the actions of the earlier insns must be kept
3410 in addition to substituting them into the latest one,
3411 we must make a new PARALLEL for the latest insn
3412 to hold additional the SETs. */
3414 if (added_sets_0 || added_sets_1 || added_sets_2)
3416 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3417 combine_extras++;
3419 if (GET_CODE (newpat) == PARALLEL)
3421 rtvec old = XVEC (newpat, 0);
3422 total_sets = XVECLEN (newpat, 0) + extra_sets;
3423 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3424 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3425 sizeof (old->elem[0]) * old->num_elem);
3427 else
3429 rtx old = newpat;
3430 total_sets = 1 + extra_sets;
3431 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3432 XVECEXP (newpat, 0, 0) = old;
3435 if (added_sets_0)
3436 XVECEXP (newpat, 0, --total_sets) = i0pat;
3438 if (added_sets_1)
3440 rtx t = i1pat;
3441 if (i0_feeds_i1_n)
3442 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3444 XVECEXP (newpat, 0, --total_sets) = t;
3446 if (added_sets_2)
3448 rtx t = i2pat;
3449 if (i1_feeds_i2_n)
3450 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3451 i0_feeds_i1_n && i0dest_in_i0src);
3452 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3453 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3455 XVECEXP (newpat, 0, --total_sets) = t;
3459 validate_replacement:
3461 /* Note which hard regs this insn has as inputs. */
3462 mark_used_regs_combine (newpat);
3464 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3465 consider splitting this pattern, we might need these clobbers. */
3466 if (i1 && GET_CODE (newpat) == PARALLEL
3467 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3469 int len = XVECLEN (newpat, 0);
3471 newpat_vec_with_clobbers = rtvec_alloc (len);
3472 for (i = 0; i < len; i++)
3473 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3476 /* We have recognized nothing yet. */
3477 insn_code_number = -1;
3479 /* See if this is a PARALLEL of two SETs where one SET's destination is
3480 a register that is unused and this isn't marked as an instruction that
3481 might trap in an EH region. In that case, we just need the other SET.
3482 We prefer this over the PARALLEL.
3484 This can occur when simplifying a divmod insn. We *must* test for this
3485 case here because the code below that splits two independent SETs doesn't
3486 handle this case correctly when it updates the register status.
3488 It's pointless doing this if we originally had two sets, one from
3489 i3, and one from i2. Combining then splitting the parallel results
3490 in the original i2 again plus an invalid insn (which we delete).
3491 The net effect is only to move instructions around, which makes
3492 debug info less accurate.
3494 If the remaining SET came from I2 its destination should not be used
3495 between I2 and I3. See PR82024. */
3497 if (!(added_sets_2 && i1 == 0)
3498 && is_parallel_of_n_reg_sets (newpat, 2)
3499 && asm_noperands (newpat) < 0)
3501 rtx set0 = XVECEXP (newpat, 0, 0);
3502 rtx set1 = XVECEXP (newpat, 0, 1);
3503 rtx oldpat = newpat;
3505 if (((REG_P (SET_DEST (set1))
3506 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3507 || (GET_CODE (SET_DEST (set1)) == SUBREG
3508 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3509 && insn_nothrow_p (i3)
3510 && !side_effects_p (SET_SRC (set1)))
3512 newpat = set0;
3513 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3516 else if (((REG_P (SET_DEST (set0))
3517 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3518 || (GET_CODE (SET_DEST (set0)) == SUBREG
3519 && find_reg_note (i3, REG_UNUSED,
3520 SUBREG_REG (SET_DEST (set0)))))
3521 && insn_nothrow_p (i3)
3522 && !side_effects_p (SET_SRC (set0)))
3524 rtx dest = SET_DEST (set1);
3525 if (GET_CODE (dest) == SUBREG)
3526 dest = SUBREG_REG (dest);
3527 if (!reg_used_between_p (dest, i2, i3))
3529 newpat = set1;
3530 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3532 if (insn_code_number >= 0)
3533 changed_i3_dest = 1;
3537 if (insn_code_number < 0)
3538 newpat = oldpat;
3541 /* Is the result of combination a valid instruction? */
3542 if (insn_code_number < 0)
3543 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3545 /* If we were combining three insns and the result is a simple SET
3546 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3547 insns. There are two ways to do this. It can be split using a
3548 machine-specific method (like when you have an addition of a large
3549 constant) or by combine in the function find_split_point. */
3551 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3552 && asm_noperands (newpat) < 0)
3554 rtx parallel, *split;
3555 rtx_insn *m_split_insn;
3557 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3558 use I2DEST as a scratch register will help. In the latter case,
3559 convert I2DEST to the mode of the source of NEWPAT if we can. */
3561 m_split_insn = combine_split_insns (newpat, i3);
3563 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3564 inputs of NEWPAT. */
3566 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3567 possible to try that as a scratch reg. This would require adding
3568 more code to make it work though. */
3570 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3572 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3574 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3575 (temporarily, until we are committed to this instruction
3576 combination) does not work: for example, any call to nonzero_bits
3577 on the register (from a splitter in the MD file, for example)
3578 will get the old information, which is invalid.
3580 Since nowadays we can create registers during combine just fine,
3581 we should just create a new one here, not reuse i2dest. */
3583 /* First try to split using the original register as a
3584 scratch register. */
3585 parallel = gen_rtx_PARALLEL (VOIDmode,
3586 gen_rtvec (2, newpat,
3587 gen_rtx_CLOBBER (VOIDmode,
3588 i2dest)));
3589 m_split_insn = combine_split_insns (parallel, i3);
3591 /* If that didn't work, try changing the mode of I2DEST if
3592 we can. */
3593 if (m_split_insn == 0
3594 && new_mode != GET_MODE (i2dest)
3595 && new_mode != VOIDmode
3596 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3598 machine_mode old_mode = GET_MODE (i2dest);
3599 rtx ni2dest;
3601 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3602 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3603 else
3605 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3606 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3609 parallel = (gen_rtx_PARALLEL
3610 (VOIDmode,
3611 gen_rtvec (2, newpat,
3612 gen_rtx_CLOBBER (VOIDmode,
3613 ni2dest))));
3614 m_split_insn = combine_split_insns (parallel, i3);
3616 if (m_split_insn == 0
3617 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3619 struct undo *buf;
3621 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3622 buf = undobuf.undos;
3623 undobuf.undos = buf->next;
3624 buf->next = undobuf.frees;
3625 undobuf.frees = buf;
3629 i2scratch = m_split_insn != 0;
3632 /* If recog_for_combine has discarded clobbers, try to use them
3633 again for the split. */
3634 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3636 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3637 m_split_insn = combine_split_insns (parallel, i3);
3640 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3642 rtx m_split_pat = PATTERN (m_split_insn);
3643 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3644 if (insn_code_number >= 0)
3645 newpat = m_split_pat;
3647 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3648 && (next_nonnote_nondebug_insn (i2) == i3
3649 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3651 rtx i2set, i3set;
3652 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3653 newi2pat = PATTERN (m_split_insn);
3655 i3set = single_set (NEXT_INSN (m_split_insn));
3656 i2set = single_set (m_split_insn);
3658 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3660 /* If I2 or I3 has multiple SETs, we won't know how to track
3661 register status, so don't use these insns. If I2's destination
3662 is used between I2 and I3, we also can't use these insns. */
3664 if (i2_code_number >= 0 && i2set && i3set
3665 && (next_nonnote_nondebug_insn (i2) == i3
3666 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3667 insn_code_number = recog_for_combine (&newi3pat, i3,
3668 &new_i3_notes);
3669 if (insn_code_number >= 0)
3670 newpat = newi3pat;
3672 /* It is possible that both insns now set the destination of I3.
3673 If so, we must show an extra use of it. */
3675 if (insn_code_number >= 0)
3677 rtx new_i3_dest = SET_DEST (i3set);
3678 rtx new_i2_dest = SET_DEST (i2set);
3680 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3681 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3682 || GET_CODE (new_i3_dest) == SUBREG)
3683 new_i3_dest = XEXP (new_i3_dest, 0);
3685 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3686 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3687 || GET_CODE (new_i2_dest) == SUBREG)
3688 new_i2_dest = XEXP (new_i2_dest, 0);
3690 if (REG_P (new_i3_dest)
3691 && REG_P (new_i2_dest)
3692 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3693 && REGNO (new_i2_dest) < reg_n_sets_max)
3694 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3698 /* If we can split it and use I2DEST, go ahead and see if that
3699 helps things be recognized. Verify that none of the registers
3700 are set between I2 and I3. */
3701 if (insn_code_number < 0
3702 && (split = find_split_point (&newpat, i3, false)) != 0
3703 && (!HAVE_cc0 || REG_P (i2dest))
3704 /* We need I2DEST in the proper mode. If it is a hard register
3705 or the only use of a pseudo, we can change its mode.
3706 Make sure we don't change a hard register to have a mode that
3707 isn't valid for it, or change the number of registers. */
3708 && (GET_MODE (*split) == GET_MODE (i2dest)
3709 || GET_MODE (*split) == VOIDmode
3710 || can_change_dest_mode (i2dest, added_sets_2,
3711 GET_MODE (*split)))
3712 && (next_nonnote_nondebug_insn (i2) == i3
3713 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3714 /* We can't overwrite I2DEST if its value is still used by
3715 NEWPAT. */
3716 && ! reg_referenced_p (i2dest, newpat))
3718 rtx newdest = i2dest;
3719 enum rtx_code split_code = GET_CODE (*split);
3720 machine_mode split_mode = GET_MODE (*split);
3721 bool subst_done = false;
3722 newi2pat = NULL_RTX;
3724 i2scratch = true;
3726 /* *SPLIT may be part of I2SRC, so make sure we have the
3727 original expression around for later debug processing.
3728 We should not need I2SRC any more in other cases. */
3729 if (MAY_HAVE_DEBUG_INSNS)
3730 i2src = copy_rtx (i2src);
3731 else
3732 i2src = NULL;
3734 /* Get NEWDEST as a register in the proper mode. We have already
3735 validated that we can do this. */
3736 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3738 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3739 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3740 else
3742 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3743 newdest = regno_reg_rtx[REGNO (i2dest)];
3747 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3748 an ASHIFT. This can occur if it was inside a PLUS and hence
3749 appeared to be a memory address. This is a kludge. */
3750 if (split_code == MULT
3751 && CONST_INT_P (XEXP (*split, 1))
3752 && INTVAL (XEXP (*split, 1)) > 0
3753 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3755 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3756 XEXP (*split, 0), GEN_INT (i)));
3757 /* Update split_code because we may not have a multiply
3758 anymore. */
3759 split_code = GET_CODE (*split);
3762 /* Similarly for (plus (mult FOO (const_int pow2))). */
3763 if (split_code == PLUS
3764 && GET_CODE (XEXP (*split, 0)) == MULT
3765 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3766 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3767 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3769 rtx nsplit = XEXP (*split, 0);
3770 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3771 XEXP (nsplit, 0), GEN_INT (i)));
3772 /* Update split_code because we may not have a multiply
3773 anymore. */
3774 split_code = GET_CODE (*split);
3777 #ifdef INSN_SCHEDULING
3778 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3779 be written as a ZERO_EXTEND. */
3780 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3782 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3783 what it really is. */
3784 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3785 == SIGN_EXTEND)
3786 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3787 SUBREG_REG (*split)));
3788 else
3789 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3790 SUBREG_REG (*split)));
3792 #endif
3794 /* Attempt to split binary operators using arithmetic identities. */
3795 if (BINARY_P (SET_SRC (newpat))
3796 && split_mode == GET_MODE (SET_SRC (newpat))
3797 && ! side_effects_p (SET_SRC (newpat)))
3799 rtx setsrc = SET_SRC (newpat);
3800 machine_mode mode = GET_MODE (setsrc);
3801 enum rtx_code code = GET_CODE (setsrc);
3802 rtx src_op0 = XEXP (setsrc, 0);
3803 rtx src_op1 = XEXP (setsrc, 1);
3805 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3806 if (rtx_equal_p (src_op0, src_op1))
3808 newi2pat = gen_rtx_SET (newdest, src_op0);
3809 SUBST (XEXP (setsrc, 0), newdest);
3810 SUBST (XEXP (setsrc, 1), newdest);
3811 subst_done = true;
3813 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3814 else if ((code == PLUS || code == MULT)
3815 && GET_CODE (src_op0) == code
3816 && GET_CODE (XEXP (src_op0, 0)) == code
3817 && (INTEGRAL_MODE_P (mode)
3818 || (FLOAT_MODE_P (mode)
3819 && flag_unsafe_math_optimizations)))
3821 rtx p = XEXP (XEXP (src_op0, 0), 0);
3822 rtx q = XEXP (XEXP (src_op0, 0), 1);
3823 rtx r = XEXP (src_op0, 1);
3824 rtx s = src_op1;
3826 /* Split both "((X op Y) op X) op Y" and
3827 "((X op Y) op Y) op X" as "T op T" where T is
3828 "X op Y". */
3829 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3830 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3832 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3833 SUBST (XEXP (setsrc, 0), newdest);
3834 SUBST (XEXP (setsrc, 1), newdest);
3835 subst_done = true;
3837 /* Split "((X op X) op Y) op Y)" as "T op T" where
3838 T is "X op Y". */
3839 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3841 rtx tmp = simplify_gen_binary (code, mode, p, r);
3842 newi2pat = gen_rtx_SET (newdest, tmp);
3843 SUBST (XEXP (setsrc, 0), newdest);
3844 SUBST (XEXP (setsrc, 1), newdest);
3845 subst_done = true;
3850 if (!subst_done)
3852 newi2pat = gen_rtx_SET (newdest, *split);
3853 SUBST (*split, newdest);
3856 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3858 /* recog_for_combine might have added CLOBBERs to newi2pat.
3859 Make sure NEWPAT does not depend on the clobbered regs. */
3860 if (GET_CODE (newi2pat) == PARALLEL)
3861 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3862 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3864 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3865 if (reg_overlap_mentioned_p (reg, newpat))
3867 undo_all ();
3868 return 0;
3872 /* If the split point was a MULT and we didn't have one before,
3873 don't use one now. */
3874 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3875 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3879 /* Check for a case where we loaded from memory in a narrow mode and
3880 then sign extended it, but we need both registers. In that case,
3881 we have a PARALLEL with both loads from the same memory location.
3882 We can split this into a load from memory followed by a register-register
3883 copy. This saves at least one insn, more if register allocation can
3884 eliminate the copy.
3886 We cannot do this if the destination of the first assignment is a
3887 condition code register or cc0. We eliminate this case by making sure
3888 the SET_DEST and SET_SRC have the same mode.
3890 We cannot do this if the destination of the second assignment is
3891 a register that we have already assumed is zero-extended. Similarly
3892 for a SUBREG of such a register. */
3894 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3895 && GET_CODE (newpat) == PARALLEL
3896 && XVECLEN (newpat, 0) == 2
3897 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3898 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3899 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3900 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3901 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3902 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3903 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3904 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3905 DF_INSN_LUID (i2))
3906 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3907 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3908 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3909 (REG_P (temp_expr)
3910 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3911 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3912 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3913 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3914 != GET_MODE_MASK (word_mode))))
3915 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3916 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3917 (REG_P (temp_expr)
3918 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3919 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3920 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3921 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3922 != GET_MODE_MASK (word_mode)))))
3923 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3924 SET_SRC (XVECEXP (newpat, 0, 1)))
3925 && ! find_reg_note (i3, REG_UNUSED,
3926 SET_DEST (XVECEXP (newpat, 0, 0))))
3928 rtx ni2dest;
3930 newi2pat = XVECEXP (newpat, 0, 0);
3931 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3932 newpat = XVECEXP (newpat, 0, 1);
3933 SUBST (SET_SRC (newpat),
3934 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3935 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3937 if (i2_code_number >= 0)
3938 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3940 if (insn_code_number >= 0)
3941 swap_i2i3 = 1;
3944 /* Similarly, check for a case where we have a PARALLEL of two independent
3945 SETs but we started with three insns. In this case, we can do the sets
3946 as two separate insns. This case occurs when some SET allows two
3947 other insns to combine, but the destination of that SET is still live.
3949 Also do this if we started with two insns and (at least) one of the
3950 resulting sets is a noop; this noop will be deleted later. */
3952 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3953 && GET_CODE (newpat) == PARALLEL
3954 && XVECLEN (newpat, 0) == 2
3955 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3956 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3957 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3958 || set_noop_p (XVECEXP (newpat, 0, 1)))
3959 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3960 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3961 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3962 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3963 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3964 XVECEXP (newpat, 0, 0))
3965 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3966 XVECEXP (newpat, 0, 1))
3967 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3968 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3970 rtx set0 = XVECEXP (newpat, 0, 0);
3971 rtx set1 = XVECEXP (newpat, 0, 1);
3973 /* Normally, it doesn't matter which of the two is done first,
3974 but the one that references cc0 can't be the second, and
3975 one which uses any regs/memory set in between i2 and i3 can't
3976 be first. The PARALLEL might also have been pre-existing in i3,
3977 so we need to make sure that we won't wrongly hoist a SET to i2
3978 that would conflict with a death note present in there. */
3979 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3980 && !(REG_P (SET_DEST (set1))
3981 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3982 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3983 && find_reg_note (i2, REG_DEAD,
3984 SUBREG_REG (SET_DEST (set1))))
3985 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3986 /* If I3 is a jump, ensure that set0 is a jump so that
3987 we do not create invalid RTL. */
3988 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3991 newi2pat = set1;
3992 newpat = set0;
3994 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3995 && !(REG_P (SET_DEST (set0))
3996 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3997 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3998 && find_reg_note (i2, REG_DEAD,
3999 SUBREG_REG (SET_DEST (set0))))
4000 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4001 /* If I3 is a jump, ensure that set1 is a jump so that
4002 we do not create invalid RTL. */
4003 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4006 newi2pat = set0;
4007 newpat = set1;
4009 else
4011 undo_all ();
4012 return 0;
4015 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4017 if (i2_code_number >= 0)
4019 /* recog_for_combine might have added CLOBBERs to newi2pat.
4020 Make sure NEWPAT does not depend on the clobbered regs. */
4021 if (GET_CODE (newi2pat) == PARALLEL)
4023 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4024 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4026 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4027 if (reg_overlap_mentioned_p (reg, newpat))
4029 undo_all ();
4030 return 0;
4035 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4039 /* If it still isn't recognized, fail and change things back the way they
4040 were. */
4041 if ((insn_code_number < 0
4042 /* Is the result a reasonable ASM_OPERANDS? */
4043 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4045 undo_all ();
4046 return 0;
4049 /* If we had to change another insn, make sure it is valid also. */
4050 if (undobuf.other_insn)
4052 CLEAR_HARD_REG_SET (newpat_used_regs);
4054 other_pat = PATTERN (undobuf.other_insn);
4055 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4056 &new_other_notes);
4058 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4060 undo_all ();
4061 return 0;
4065 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4066 they are adjacent to each other or not. */
4067 if (HAVE_cc0)
4069 rtx_insn *p = prev_nonnote_insn (i3);
4070 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4071 && sets_cc0_p (newi2pat))
4073 undo_all ();
4074 return 0;
4078 /* Only allow this combination if insn_rtx_costs reports that the
4079 replacement instructions are cheaper than the originals. */
4080 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4082 undo_all ();
4083 return 0;
4086 if (MAY_HAVE_DEBUG_INSNS)
4088 struct undo *undo;
4090 for (undo = undobuf.undos; undo; undo = undo->next)
4091 if (undo->kind == UNDO_MODE)
4093 rtx reg = *undo->where.r;
4094 machine_mode new_mode = GET_MODE (reg);
4095 machine_mode old_mode = undo->old_contents.m;
4097 /* Temporarily revert mode back. */
4098 adjust_reg_mode (reg, old_mode);
4100 if (reg == i2dest && i2scratch)
4102 /* If we used i2dest as a scratch register with a
4103 different mode, substitute it for the original
4104 i2src while its original mode is temporarily
4105 restored, and then clear i2scratch so that we don't
4106 do it again later. */
4107 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4108 this_basic_block);
4109 i2scratch = false;
4110 /* Put back the new mode. */
4111 adjust_reg_mode (reg, new_mode);
4113 else
4115 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4116 rtx_insn *first, *last;
4118 if (reg == i2dest)
4120 first = i2;
4121 last = last_combined_insn;
4123 else
4125 first = i3;
4126 last = undobuf.other_insn;
4127 gcc_assert (last);
4128 if (DF_INSN_LUID (last)
4129 < DF_INSN_LUID (last_combined_insn))
4130 last = last_combined_insn;
4133 /* We're dealing with a reg that changed mode but not
4134 meaning, so we want to turn it into a subreg for
4135 the new mode. However, because of REG sharing and
4136 because its mode had already changed, we have to do
4137 it in two steps. First, replace any debug uses of
4138 reg, with its original mode temporarily restored,
4139 with this copy we have created; then, replace the
4140 copy with the SUBREG of the original shared reg,
4141 once again changed to the new mode. */
4142 propagate_for_debug (first, last, reg, tempreg,
4143 this_basic_block);
4144 adjust_reg_mode (reg, new_mode);
4145 propagate_for_debug (first, last, tempreg,
4146 lowpart_subreg (old_mode, reg, new_mode),
4147 this_basic_block);
4152 /* If we will be able to accept this, we have made a
4153 change to the destination of I3. This requires us to
4154 do a few adjustments. */
4156 if (changed_i3_dest)
4158 PATTERN (i3) = newpat;
4159 adjust_for_new_dest (i3);
4162 /* We now know that we can do this combination. Merge the insns and
4163 update the status of registers and LOG_LINKS. */
4165 if (undobuf.other_insn)
4167 rtx note, next;
4169 PATTERN (undobuf.other_insn) = other_pat;
4171 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4172 ensure that they are still valid. Then add any non-duplicate
4173 notes added by recog_for_combine. */
4174 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4176 next = XEXP (note, 1);
4178 if ((REG_NOTE_KIND (note) == REG_DEAD
4179 && !reg_referenced_p (XEXP (note, 0),
4180 PATTERN (undobuf.other_insn)))
4181 ||(REG_NOTE_KIND (note) == REG_UNUSED
4182 && !reg_set_p (XEXP (note, 0),
4183 PATTERN (undobuf.other_insn)))
4184 /* Simply drop equal note since it may be no longer valid
4185 for other_insn. It may be possible to record that CC
4186 register is changed and only discard those notes, but
4187 in practice it's unnecessary complication and doesn't
4188 give any meaningful improvement.
4190 See PR78559. */
4191 || REG_NOTE_KIND (note) == REG_EQUAL
4192 || REG_NOTE_KIND (note) == REG_EQUIV)
4193 remove_note (undobuf.other_insn, note);
4196 distribute_notes (new_other_notes, undobuf.other_insn,
4197 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4198 NULL_RTX);
4201 if (swap_i2i3)
4203 rtx_insn *insn;
4204 struct insn_link *link;
4205 rtx ni2dest;
4207 /* I3 now uses what used to be its destination and which is now
4208 I2's destination. This requires us to do a few adjustments. */
4209 PATTERN (i3) = newpat;
4210 adjust_for_new_dest (i3);
4212 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4213 so we still will.
4215 However, some later insn might be using I2's dest and have
4216 a LOG_LINK pointing at I3. We must remove this link.
4217 The simplest way to remove the link is to point it at I1,
4218 which we know will be a NOTE. */
4220 /* newi2pat is usually a SET here; however, recog_for_combine might
4221 have added some clobbers. */
4222 if (GET_CODE (newi2pat) == PARALLEL)
4223 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4224 else
4225 ni2dest = SET_DEST (newi2pat);
4227 for (insn = NEXT_INSN (i3);
4228 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4229 || insn != BB_HEAD (this_basic_block->next_bb));
4230 insn = NEXT_INSN (insn))
4232 if (NONDEBUG_INSN_P (insn)
4233 && reg_referenced_p (ni2dest, PATTERN (insn)))
4235 FOR_EACH_LOG_LINK (link, insn)
4236 if (link->insn == i3)
4237 link->insn = i1;
4239 break;
4245 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4246 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4247 rtx midnotes = 0;
4248 int from_luid;
4249 /* Compute which registers we expect to eliminate. newi2pat may be setting
4250 either i3dest or i2dest, so we must check it. */
4251 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4252 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4253 || !i2dest_killed
4254 ? 0 : i2dest);
4255 /* For i1, we need to compute both local elimination and global
4256 elimination information with respect to newi2pat because i1dest
4257 may be the same as i3dest, in which case newi2pat may be setting
4258 i1dest. Global information is used when distributing REG_DEAD
4259 note for i2 and i3, in which case it does matter if newi2pat sets
4260 i1dest or not.
4262 Local information is used when distributing REG_DEAD note for i1,
4263 in which case it doesn't matter if newi2pat sets i1dest or not.
4264 See PR62151, if we have four insns combination:
4265 i0: r0 <- i0src
4266 i1: r1 <- i1src (using r0)
4267 REG_DEAD (r0)
4268 i2: r0 <- i2src (using r1)
4269 i3: r3 <- i3src (using r0)
4270 ix: using r0
4271 From i1's point of view, r0 is eliminated, no matter if it is set
4272 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4273 should be discarded.
4275 Note local information only affects cases in forms like "I1->I2->I3",
4276 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4277 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4278 i0dest anyway. */
4279 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4280 || !i1dest_killed
4281 ? 0 : i1dest);
4282 rtx elim_i1 = (local_elim_i1 == 0
4283 || (newi2pat && reg_set_p (i1dest, newi2pat))
4284 ? 0 : i1dest);
4285 /* Same case as i1. */
4286 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4287 ? 0 : i0dest);
4288 rtx elim_i0 = (local_elim_i0 == 0
4289 || (newi2pat && reg_set_p (i0dest, newi2pat))
4290 ? 0 : i0dest);
4292 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4293 clear them. */
4294 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4295 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4296 if (i1)
4297 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4298 if (i0)
4299 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4301 /* Ensure that we do not have something that should not be shared but
4302 occurs multiple times in the new insns. Check this by first
4303 resetting all the `used' flags and then copying anything is shared. */
4305 reset_used_flags (i3notes);
4306 reset_used_flags (i2notes);
4307 reset_used_flags (i1notes);
4308 reset_used_flags (i0notes);
4309 reset_used_flags (newpat);
4310 reset_used_flags (newi2pat);
4311 if (undobuf.other_insn)
4312 reset_used_flags (PATTERN (undobuf.other_insn));
4314 i3notes = copy_rtx_if_shared (i3notes);
4315 i2notes = copy_rtx_if_shared (i2notes);
4316 i1notes = copy_rtx_if_shared (i1notes);
4317 i0notes = copy_rtx_if_shared (i0notes);
4318 newpat = copy_rtx_if_shared (newpat);
4319 newi2pat = copy_rtx_if_shared (newi2pat);
4320 if (undobuf.other_insn)
4321 reset_used_flags (PATTERN (undobuf.other_insn));
4323 INSN_CODE (i3) = insn_code_number;
4324 PATTERN (i3) = newpat;
4326 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4328 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4329 link = XEXP (link, 1))
4331 if (substed_i2)
4333 /* I2SRC must still be meaningful at this point. Some
4334 splitting operations can invalidate I2SRC, but those
4335 operations do not apply to calls. */
4336 gcc_assert (i2src);
4337 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4338 i2dest, i2src);
4340 if (substed_i1)
4341 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4342 i1dest, i1src);
4343 if (substed_i0)
4344 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4345 i0dest, i0src);
4349 if (undobuf.other_insn)
4350 INSN_CODE (undobuf.other_insn) = other_code_number;
4352 /* We had one special case above where I2 had more than one set and
4353 we replaced a destination of one of those sets with the destination
4354 of I3. In that case, we have to update LOG_LINKS of insns later
4355 in this basic block. Note that this (expensive) case is rare.
4357 Also, in this case, we must pretend that all REG_NOTEs for I2
4358 actually came from I3, so that REG_UNUSED notes from I2 will be
4359 properly handled. */
4361 if (i3_subst_into_i2)
4363 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4364 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4365 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4366 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4367 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4368 && ! find_reg_note (i2, REG_UNUSED,
4369 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4370 for (temp_insn = NEXT_INSN (i2);
4371 temp_insn
4372 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4373 || BB_HEAD (this_basic_block) != temp_insn);
4374 temp_insn = NEXT_INSN (temp_insn))
4375 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4376 FOR_EACH_LOG_LINK (link, temp_insn)
4377 if (link->insn == i2)
4378 link->insn = i3;
4380 if (i3notes)
4382 rtx link = i3notes;
4383 while (XEXP (link, 1))
4384 link = XEXP (link, 1);
4385 XEXP (link, 1) = i2notes;
4387 else
4388 i3notes = i2notes;
4389 i2notes = 0;
4392 LOG_LINKS (i3) = NULL;
4393 REG_NOTES (i3) = 0;
4394 LOG_LINKS (i2) = NULL;
4395 REG_NOTES (i2) = 0;
4397 if (newi2pat)
4399 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4400 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4401 this_basic_block);
4402 INSN_CODE (i2) = i2_code_number;
4403 PATTERN (i2) = newi2pat;
4405 else
4407 if (MAY_HAVE_DEBUG_INSNS && i2src)
4408 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4409 this_basic_block);
4410 SET_INSN_DELETED (i2);
4413 if (i1)
4415 LOG_LINKS (i1) = NULL;
4416 REG_NOTES (i1) = 0;
4417 if (MAY_HAVE_DEBUG_INSNS)
4418 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4419 this_basic_block);
4420 SET_INSN_DELETED (i1);
4423 if (i0)
4425 LOG_LINKS (i0) = NULL;
4426 REG_NOTES (i0) = 0;
4427 if (MAY_HAVE_DEBUG_INSNS)
4428 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4429 this_basic_block);
4430 SET_INSN_DELETED (i0);
4433 /* Get death notes for everything that is now used in either I3 or
4434 I2 and used to die in a previous insn. If we built two new
4435 patterns, move from I1 to I2 then I2 to I3 so that we get the
4436 proper movement on registers that I2 modifies. */
4438 if (i0)
4439 from_luid = DF_INSN_LUID (i0);
4440 else if (i1)
4441 from_luid = DF_INSN_LUID (i1);
4442 else
4443 from_luid = DF_INSN_LUID (i2);
4444 if (newi2pat)
4445 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4446 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4448 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4449 if (i3notes)
4450 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4451 elim_i2, elim_i1, elim_i0);
4452 if (i2notes)
4453 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4454 elim_i2, elim_i1, elim_i0);
4455 if (i1notes)
4456 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4457 elim_i2, local_elim_i1, local_elim_i0);
4458 if (i0notes)
4459 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4460 elim_i2, elim_i1, local_elim_i0);
4461 if (midnotes)
4462 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4463 elim_i2, elim_i1, elim_i0);
4465 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4466 know these are REG_UNUSED and want them to go to the desired insn,
4467 so we always pass it as i3. */
4469 if (newi2pat && new_i2_notes)
4470 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4471 NULL_RTX);
4473 if (new_i3_notes)
4474 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4475 NULL_RTX);
4477 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4478 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4479 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4480 in that case, it might delete I2. Similarly for I2 and I1.
4481 Show an additional death due to the REG_DEAD note we make here. If
4482 we discard it in distribute_notes, we will decrement it again. */
4484 if (i3dest_killed)
4486 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4487 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4488 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4489 elim_i1, elim_i0);
4490 else
4491 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4492 elim_i2, elim_i1, elim_i0);
4495 if (i2dest_in_i2src)
4497 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4498 if (newi2pat && reg_set_p (i2dest, newi2pat))
4499 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4500 NULL_RTX, NULL_RTX);
4501 else
4502 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4503 NULL_RTX, NULL_RTX, NULL_RTX);
4506 if (i1dest_in_i1src)
4508 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4509 if (newi2pat && reg_set_p (i1dest, newi2pat))
4510 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4511 NULL_RTX, NULL_RTX);
4512 else
4513 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4514 NULL_RTX, NULL_RTX, NULL_RTX);
4517 if (i0dest_in_i0src)
4519 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4520 if (newi2pat && reg_set_p (i0dest, newi2pat))
4521 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4522 NULL_RTX, NULL_RTX);
4523 else
4524 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4525 NULL_RTX, NULL_RTX, NULL_RTX);
4528 distribute_links (i3links);
4529 distribute_links (i2links);
4530 distribute_links (i1links);
4531 distribute_links (i0links);
4533 if (REG_P (i2dest))
4535 struct insn_link *link;
4536 rtx_insn *i2_insn = 0;
4537 rtx i2_val = 0, set;
4539 /* The insn that used to set this register doesn't exist, and
4540 this life of the register may not exist either. See if one of
4541 I3's links points to an insn that sets I2DEST. If it does,
4542 that is now the last known value for I2DEST. If we don't update
4543 this and I2 set the register to a value that depended on its old
4544 contents, we will get confused. If this insn is used, thing
4545 will be set correctly in combine_instructions. */
4546 FOR_EACH_LOG_LINK (link, i3)
4547 if ((set = single_set (link->insn)) != 0
4548 && rtx_equal_p (i2dest, SET_DEST (set)))
4549 i2_insn = link->insn, i2_val = SET_SRC (set);
4551 record_value_for_reg (i2dest, i2_insn, i2_val);
4553 /* If the reg formerly set in I2 died only once and that was in I3,
4554 zero its use count so it won't make `reload' do any work. */
4555 if (! added_sets_2
4556 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4557 && ! i2dest_in_i2src
4558 && REGNO (i2dest) < reg_n_sets_max)
4559 INC_REG_N_SETS (REGNO (i2dest), -1);
4562 if (i1 && REG_P (i1dest))
4564 struct insn_link *link;
4565 rtx_insn *i1_insn = 0;
4566 rtx i1_val = 0, set;
4568 FOR_EACH_LOG_LINK (link, i3)
4569 if ((set = single_set (link->insn)) != 0
4570 && rtx_equal_p (i1dest, SET_DEST (set)))
4571 i1_insn = link->insn, i1_val = SET_SRC (set);
4573 record_value_for_reg (i1dest, i1_insn, i1_val);
4575 if (! added_sets_1
4576 && ! i1dest_in_i1src
4577 && REGNO (i1dest) < reg_n_sets_max)
4578 INC_REG_N_SETS (REGNO (i1dest), -1);
4581 if (i0 && REG_P (i0dest))
4583 struct insn_link *link;
4584 rtx_insn *i0_insn = 0;
4585 rtx i0_val = 0, set;
4587 FOR_EACH_LOG_LINK (link, i3)
4588 if ((set = single_set (link->insn)) != 0
4589 && rtx_equal_p (i0dest, SET_DEST (set)))
4590 i0_insn = link->insn, i0_val = SET_SRC (set);
4592 record_value_for_reg (i0dest, i0_insn, i0_val);
4594 if (! added_sets_0
4595 && ! i0dest_in_i0src
4596 && REGNO (i0dest) < reg_n_sets_max)
4597 INC_REG_N_SETS (REGNO (i0dest), -1);
4600 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4601 been made to this insn. The order is important, because newi2pat
4602 can affect nonzero_bits of newpat. */
4603 if (newi2pat)
4604 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4605 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4608 if (undobuf.other_insn != NULL_RTX)
4610 if (dump_file)
4612 fprintf (dump_file, "modifying other_insn ");
4613 dump_insn_slim (dump_file, undobuf.other_insn);
4615 df_insn_rescan (undobuf.other_insn);
4618 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4620 if (dump_file)
4622 fprintf (dump_file, "modifying insn i0 ");
4623 dump_insn_slim (dump_file, i0);
4625 df_insn_rescan (i0);
4628 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4630 if (dump_file)
4632 fprintf (dump_file, "modifying insn i1 ");
4633 dump_insn_slim (dump_file, i1);
4635 df_insn_rescan (i1);
4638 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4640 if (dump_file)
4642 fprintf (dump_file, "modifying insn i2 ");
4643 dump_insn_slim (dump_file, i2);
4645 df_insn_rescan (i2);
4648 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4650 if (dump_file)
4652 fprintf (dump_file, "modifying insn i3 ");
4653 dump_insn_slim (dump_file, i3);
4655 df_insn_rescan (i3);
4658 /* Set new_direct_jump_p if a new return or simple jump instruction
4659 has been created. Adjust the CFG accordingly. */
4660 if (returnjump_p (i3) || any_uncondjump_p (i3))
4662 *new_direct_jump_p = 1;
4663 mark_jump_label (PATTERN (i3), i3, 0);
4664 update_cfg_for_uncondjump (i3);
4667 if (undobuf.other_insn != NULL_RTX
4668 && (returnjump_p (undobuf.other_insn)
4669 || any_uncondjump_p (undobuf.other_insn)))
4671 *new_direct_jump_p = 1;
4672 update_cfg_for_uncondjump (undobuf.other_insn);
4675 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4676 && XEXP (PATTERN (i3), 0) == const1_rtx)
4678 basic_block bb = BLOCK_FOR_INSN (i3);
4679 gcc_assert (bb);
4680 remove_edge (split_block (bb, i3));
4681 emit_barrier_after_bb (bb);
4682 *new_direct_jump_p = 1;
4685 if (undobuf.other_insn
4686 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4687 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4689 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4690 gcc_assert (bb);
4691 remove_edge (split_block (bb, undobuf.other_insn));
4692 emit_barrier_after_bb (bb);
4693 *new_direct_jump_p = 1;
4696 /* A noop might also need cleaning up of CFG, if it comes from the
4697 simplification of a jump. */
4698 if (JUMP_P (i3)
4699 && GET_CODE (newpat) == SET
4700 && SET_SRC (newpat) == pc_rtx
4701 && SET_DEST (newpat) == pc_rtx)
4703 *new_direct_jump_p = 1;
4704 update_cfg_for_uncondjump (i3);
4707 if (undobuf.other_insn != NULL_RTX
4708 && JUMP_P (undobuf.other_insn)
4709 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4710 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4711 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4713 *new_direct_jump_p = 1;
4714 update_cfg_for_uncondjump (undobuf.other_insn);
4717 combine_successes++;
4718 undo_commit ();
4720 if (added_links_insn
4721 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4722 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4723 return added_links_insn;
4724 else
4725 return newi2pat ? i2 : i3;
4728 /* Get a marker for undoing to the current state. */
4730 static void *
4731 get_undo_marker (void)
4733 return undobuf.undos;
4736 /* Undo the modifications up to the marker. */
4738 static void
4739 undo_to_marker (void *marker)
4741 struct undo *undo, *next;
4743 for (undo = undobuf.undos; undo != marker; undo = next)
4745 gcc_assert (undo);
4747 next = undo->next;
4748 switch (undo->kind)
4750 case UNDO_RTX:
4751 *undo->where.r = undo->old_contents.r;
4752 break;
4753 case UNDO_INT:
4754 *undo->where.i = undo->old_contents.i;
4755 break;
4756 case UNDO_MODE:
4757 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4758 break;
4759 case UNDO_LINKS:
4760 *undo->where.l = undo->old_contents.l;
4761 break;
4762 default:
4763 gcc_unreachable ();
4766 undo->next = undobuf.frees;
4767 undobuf.frees = undo;
4770 undobuf.undos = (struct undo *) marker;
4773 /* Undo all the modifications recorded in undobuf. */
4775 static void
4776 undo_all (void)
4778 undo_to_marker (0);
4781 /* We've committed to accepting the changes we made. Move all
4782 of the undos to the free list. */
4784 static void
4785 undo_commit (void)
4787 struct undo *undo, *next;
4789 for (undo = undobuf.undos; undo; undo = next)
4791 next = undo->next;
4792 undo->next = undobuf.frees;
4793 undobuf.frees = undo;
4795 undobuf.undos = 0;
4798 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4799 where we have an arithmetic expression and return that point. LOC will
4800 be inside INSN.
4802 try_combine will call this function to see if an insn can be split into
4803 two insns. */
4805 static rtx *
4806 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4808 rtx x = *loc;
4809 enum rtx_code code = GET_CODE (x);
4810 rtx *split;
4811 unsigned HOST_WIDE_INT len = 0;
4812 HOST_WIDE_INT pos = 0;
4813 int unsignedp = 0;
4814 rtx inner = NULL_RTX;
4815 scalar_int_mode mode, inner_mode;
4817 /* First special-case some codes. */
4818 switch (code)
4820 case SUBREG:
4821 #ifdef INSN_SCHEDULING
4822 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4823 point. */
4824 if (MEM_P (SUBREG_REG (x)))
4825 return loc;
4826 #endif
4827 return find_split_point (&SUBREG_REG (x), insn, false);
4829 case MEM:
4830 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4831 using LO_SUM and HIGH. */
4832 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4833 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4835 machine_mode address_mode = get_address_mode (x);
4837 SUBST (XEXP (x, 0),
4838 gen_rtx_LO_SUM (address_mode,
4839 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4840 XEXP (x, 0)));
4841 return &XEXP (XEXP (x, 0), 0);
4844 /* If we have a PLUS whose second operand is a constant and the
4845 address is not valid, perhaps will can split it up using
4846 the machine-specific way to split large constants. We use
4847 the first pseudo-reg (one of the virtual regs) as a placeholder;
4848 it will not remain in the result. */
4849 if (GET_CODE (XEXP (x, 0)) == PLUS
4850 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4851 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4852 MEM_ADDR_SPACE (x)))
4854 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4855 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4856 subst_insn);
4858 /* This should have produced two insns, each of which sets our
4859 placeholder. If the source of the second is a valid address,
4860 we can make put both sources together and make a split point
4861 in the middle. */
4863 if (seq
4864 && NEXT_INSN (seq) != NULL_RTX
4865 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4866 && NONJUMP_INSN_P (seq)
4867 && GET_CODE (PATTERN (seq)) == SET
4868 && SET_DEST (PATTERN (seq)) == reg
4869 && ! reg_mentioned_p (reg,
4870 SET_SRC (PATTERN (seq)))
4871 && NONJUMP_INSN_P (NEXT_INSN (seq))
4872 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4873 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4874 && memory_address_addr_space_p
4875 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4876 MEM_ADDR_SPACE (x)))
4878 rtx src1 = SET_SRC (PATTERN (seq));
4879 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4881 /* Replace the placeholder in SRC2 with SRC1. If we can
4882 find where in SRC2 it was placed, that can become our
4883 split point and we can replace this address with SRC2.
4884 Just try two obvious places. */
4886 src2 = replace_rtx (src2, reg, src1);
4887 split = 0;
4888 if (XEXP (src2, 0) == src1)
4889 split = &XEXP (src2, 0);
4890 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4891 && XEXP (XEXP (src2, 0), 0) == src1)
4892 split = &XEXP (XEXP (src2, 0), 0);
4894 if (split)
4896 SUBST (XEXP (x, 0), src2);
4897 return split;
4901 /* If that didn't work, perhaps the first operand is complex and
4902 needs to be computed separately, so make a split point there.
4903 This will occur on machines that just support REG + CONST
4904 and have a constant moved through some previous computation. */
4906 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4907 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4908 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4909 return &XEXP (XEXP (x, 0), 0);
4912 /* If we have a PLUS whose first operand is complex, try computing it
4913 separately by making a split there. */
4914 if (GET_CODE (XEXP (x, 0)) == PLUS
4915 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4916 MEM_ADDR_SPACE (x))
4917 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4918 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4919 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4920 return &XEXP (XEXP (x, 0), 0);
4921 break;
4923 case SET:
4924 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4925 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4926 we need to put the operand into a register. So split at that
4927 point. */
4929 if (SET_DEST (x) == cc0_rtx
4930 && GET_CODE (SET_SRC (x)) != COMPARE
4931 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4932 && !OBJECT_P (SET_SRC (x))
4933 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4934 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4935 return &SET_SRC (x);
4937 /* See if we can split SET_SRC as it stands. */
4938 split = find_split_point (&SET_SRC (x), insn, true);
4939 if (split && split != &SET_SRC (x))
4940 return split;
4942 /* See if we can split SET_DEST as it stands. */
4943 split = find_split_point (&SET_DEST (x), insn, false);
4944 if (split && split != &SET_DEST (x))
4945 return split;
4947 /* See if this is a bitfield assignment with everything constant. If
4948 so, this is an IOR of an AND, so split it into that. */
4949 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4950 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4951 &inner_mode)
4952 && HWI_COMPUTABLE_MODE_P (inner_mode)
4953 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4954 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4955 && CONST_INT_P (SET_SRC (x))
4956 && ((INTVAL (XEXP (SET_DEST (x), 1))
4957 + INTVAL (XEXP (SET_DEST (x), 2)))
4958 <= GET_MODE_PRECISION (inner_mode))
4959 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4961 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4962 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4963 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4964 rtx dest = XEXP (SET_DEST (x), 0);
4965 unsigned HOST_WIDE_INT mask
4966 = (HOST_WIDE_INT_1U << len) - 1;
4967 rtx or_mask;
4969 if (BITS_BIG_ENDIAN)
4970 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
4972 or_mask = gen_int_mode (src << pos, inner_mode);
4973 if (src == mask)
4974 SUBST (SET_SRC (x),
4975 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
4976 else
4978 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
4979 SUBST (SET_SRC (x),
4980 simplify_gen_binary (IOR, inner_mode,
4981 simplify_gen_binary (AND, inner_mode,
4982 dest, negmask),
4983 or_mask));
4986 SUBST (SET_DEST (x), dest);
4988 split = find_split_point (&SET_SRC (x), insn, true);
4989 if (split && split != &SET_SRC (x))
4990 return split;
4993 /* Otherwise, see if this is an operation that we can split into two.
4994 If so, try to split that. */
4995 code = GET_CODE (SET_SRC (x));
4997 switch (code)
4999 case AND:
5000 /* If we are AND'ing with a large constant that is only a single
5001 bit and the result is only being used in a context where we
5002 need to know if it is zero or nonzero, replace it with a bit
5003 extraction. This will avoid the large constant, which might
5004 have taken more than one insn to make. If the constant were
5005 not a valid argument to the AND but took only one insn to make,
5006 this is no worse, but if it took more than one insn, it will
5007 be better. */
5009 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5010 && REG_P (XEXP (SET_SRC (x), 0))
5011 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5012 && REG_P (SET_DEST (x))
5013 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5014 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5015 && XEXP (*split, 0) == SET_DEST (x)
5016 && XEXP (*split, 1) == const0_rtx)
5018 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5019 XEXP (SET_SRC (x), 0),
5020 pos, NULL_RTX, 1, 1, 0, 0);
5021 if (extraction != 0)
5023 SUBST (SET_SRC (x), extraction);
5024 return find_split_point (loc, insn, false);
5027 break;
5029 case NE:
5030 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5031 is known to be on, this can be converted into a NEG of a shift. */
5032 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5033 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5034 && 1 <= (pos = exact_log2
5035 (nonzero_bits (XEXP (SET_SRC (x), 0),
5036 GET_MODE (XEXP (SET_SRC (x), 0))))))
5038 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5040 SUBST (SET_SRC (x),
5041 gen_rtx_NEG (mode,
5042 gen_rtx_LSHIFTRT (mode,
5043 XEXP (SET_SRC (x), 0),
5044 GEN_INT (pos))));
5046 split = find_split_point (&SET_SRC (x), insn, true);
5047 if (split && split != &SET_SRC (x))
5048 return split;
5050 break;
5052 case SIGN_EXTEND:
5053 inner = XEXP (SET_SRC (x), 0);
5055 /* We can't optimize if either mode is a partial integer
5056 mode as we don't know how many bits are significant
5057 in those modes. */
5058 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5059 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5060 break;
5062 pos = 0;
5063 len = GET_MODE_PRECISION (inner_mode);
5064 unsignedp = 0;
5065 break;
5067 case SIGN_EXTRACT:
5068 case ZERO_EXTRACT:
5069 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5070 &inner_mode)
5071 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5072 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5074 inner = XEXP (SET_SRC (x), 0);
5075 len = INTVAL (XEXP (SET_SRC (x), 1));
5076 pos = INTVAL (XEXP (SET_SRC (x), 2));
5078 if (BITS_BIG_ENDIAN)
5079 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5080 unsignedp = (code == ZERO_EXTRACT);
5082 break;
5084 default:
5085 break;
5088 if (len && pos >= 0
5089 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5090 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5092 /* For unsigned, we have a choice of a shift followed by an
5093 AND or two shifts. Use two shifts for field sizes where the
5094 constant might be too large. We assume here that we can
5095 always at least get 8-bit constants in an AND insn, which is
5096 true for every current RISC. */
5098 if (unsignedp && len <= 8)
5100 unsigned HOST_WIDE_INT mask
5101 = (HOST_WIDE_INT_1U << len) - 1;
5102 SUBST (SET_SRC (x),
5103 gen_rtx_AND (mode,
5104 gen_rtx_LSHIFTRT
5105 (mode, gen_lowpart (mode, inner),
5106 GEN_INT (pos)),
5107 gen_int_mode (mask, mode)));
5109 split = find_split_point (&SET_SRC (x), insn, true);
5110 if (split && split != &SET_SRC (x))
5111 return split;
5113 else
5115 SUBST (SET_SRC (x),
5116 gen_rtx_fmt_ee
5117 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5118 gen_rtx_ASHIFT (mode,
5119 gen_lowpart (mode, inner),
5120 GEN_INT (GET_MODE_PRECISION (mode)
5121 - len - pos)),
5122 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5124 split = find_split_point (&SET_SRC (x), insn, true);
5125 if (split && split != &SET_SRC (x))
5126 return split;
5130 /* See if this is a simple operation with a constant as the second
5131 operand. It might be that this constant is out of range and hence
5132 could be used as a split point. */
5133 if (BINARY_P (SET_SRC (x))
5134 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5135 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5136 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5137 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5138 return &XEXP (SET_SRC (x), 1);
5140 /* Finally, see if this is a simple operation with its first operand
5141 not in a register. The operation might require this operand in a
5142 register, so return it as a split point. We can always do this
5143 because if the first operand were another operation, we would have
5144 already found it as a split point. */
5145 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5146 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5147 return &XEXP (SET_SRC (x), 0);
5149 return 0;
5151 case AND:
5152 case IOR:
5153 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5154 it is better to write this as (not (ior A B)) so we can split it.
5155 Similarly for IOR. */
5156 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5158 SUBST (*loc,
5159 gen_rtx_NOT (GET_MODE (x),
5160 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5161 GET_MODE (x),
5162 XEXP (XEXP (x, 0), 0),
5163 XEXP (XEXP (x, 1), 0))));
5164 return find_split_point (loc, insn, set_src);
5167 /* Many RISC machines have a large set of logical insns. If the
5168 second operand is a NOT, put it first so we will try to split the
5169 other operand first. */
5170 if (GET_CODE (XEXP (x, 1)) == NOT)
5172 rtx tem = XEXP (x, 0);
5173 SUBST (XEXP (x, 0), XEXP (x, 1));
5174 SUBST (XEXP (x, 1), tem);
5176 break;
5178 case PLUS:
5179 case MINUS:
5180 /* Canonicalization can produce (minus A (mult B C)), where C is a
5181 constant. It may be better to try splitting (plus (mult B -C) A)
5182 instead if this isn't a multiply by a power of two. */
5183 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5184 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5185 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5187 machine_mode mode = GET_MODE (x);
5188 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5189 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5190 SUBST (*loc, gen_rtx_PLUS (mode,
5191 gen_rtx_MULT (mode,
5192 XEXP (XEXP (x, 1), 0),
5193 gen_int_mode (other_int,
5194 mode)),
5195 XEXP (x, 0)));
5196 return find_split_point (loc, insn, set_src);
5199 /* Split at a multiply-accumulate instruction. However if this is
5200 the SET_SRC, we likely do not have such an instruction and it's
5201 worthless to try this split. */
5202 if (!set_src
5203 && (GET_CODE (XEXP (x, 0)) == MULT
5204 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5205 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5206 return loc;
5208 default:
5209 break;
5212 /* Otherwise, select our actions depending on our rtx class. */
5213 switch (GET_RTX_CLASS (code))
5215 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5216 case RTX_TERNARY:
5217 split = find_split_point (&XEXP (x, 2), insn, false);
5218 if (split)
5219 return split;
5220 /* fall through */
5221 case RTX_BIN_ARITH:
5222 case RTX_COMM_ARITH:
5223 case RTX_COMPARE:
5224 case RTX_COMM_COMPARE:
5225 split = find_split_point (&XEXP (x, 1), insn, false);
5226 if (split)
5227 return split;
5228 /* fall through */
5229 case RTX_UNARY:
5230 /* Some machines have (and (shift ...) ...) insns. If X is not
5231 an AND, but XEXP (X, 0) is, use it as our split point. */
5232 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5233 return &XEXP (x, 0);
5235 split = find_split_point (&XEXP (x, 0), insn, false);
5236 if (split)
5237 return split;
5238 return loc;
5240 default:
5241 /* Otherwise, we don't have a split point. */
5242 return 0;
5246 /* Throughout X, replace FROM with TO, and return the result.
5247 The result is TO if X is FROM;
5248 otherwise the result is X, but its contents may have been modified.
5249 If they were modified, a record was made in undobuf so that
5250 undo_all will (among other things) return X to its original state.
5252 If the number of changes necessary is too much to record to undo,
5253 the excess changes are not made, so the result is invalid.
5254 The changes already made can still be undone.
5255 undobuf.num_undo is incremented for such changes, so by testing that
5256 the caller can tell whether the result is valid.
5258 `n_occurrences' is incremented each time FROM is replaced.
5260 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5262 IN_COND is nonzero if we are at the top level of a condition.
5264 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5265 by copying if `n_occurrences' is nonzero. */
5267 static rtx
5268 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5270 enum rtx_code code = GET_CODE (x);
5271 machine_mode op0_mode = VOIDmode;
5272 const char *fmt;
5273 int len, i;
5274 rtx new_rtx;
5276 /* Two expressions are equal if they are identical copies of a shared
5277 RTX or if they are both registers with the same register number
5278 and mode. */
5280 #define COMBINE_RTX_EQUAL_P(X,Y) \
5281 ((X) == (Y) \
5282 || (REG_P (X) && REG_P (Y) \
5283 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5285 /* Do not substitute into clobbers of regs -- this will never result in
5286 valid RTL. */
5287 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5288 return x;
5290 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5292 n_occurrences++;
5293 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5296 /* If X and FROM are the same register but different modes, they
5297 will not have been seen as equal above. However, the log links code
5298 will make a LOG_LINKS entry for that case. If we do nothing, we
5299 will try to rerecognize our original insn and, when it succeeds,
5300 we will delete the feeding insn, which is incorrect.
5302 So force this insn not to match in this (rare) case. */
5303 if (! in_dest && code == REG && REG_P (from)
5304 && reg_overlap_mentioned_p (x, from))
5305 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5307 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5308 of which may contain things that can be combined. */
5309 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5310 return x;
5312 /* It is possible to have a subexpression appear twice in the insn.
5313 Suppose that FROM is a register that appears within TO.
5314 Then, after that subexpression has been scanned once by `subst',
5315 the second time it is scanned, TO may be found. If we were
5316 to scan TO here, we would find FROM within it and create a
5317 self-referent rtl structure which is completely wrong. */
5318 if (COMBINE_RTX_EQUAL_P (x, to))
5319 return to;
5321 /* Parallel asm_operands need special attention because all of the
5322 inputs are shared across the arms. Furthermore, unsharing the
5323 rtl results in recognition failures. Failure to handle this case
5324 specially can result in circular rtl.
5326 Solve this by doing a normal pass across the first entry of the
5327 parallel, and only processing the SET_DESTs of the subsequent
5328 entries. Ug. */
5330 if (code == PARALLEL
5331 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5332 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5334 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5336 /* If this substitution failed, this whole thing fails. */
5337 if (GET_CODE (new_rtx) == CLOBBER
5338 && XEXP (new_rtx, 0) == const0_rtx)
5339 return new_rtx;
5341 SUBST (XVECEXP (x, 0, 0), new_rtx);
5343 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5345 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5347 if (!REG_P (dest)
5348 && GET_CODE (dest) != CC0
5349 && GET_CODE (dest) != PC)
5351 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5353 /* If this substitution failed, this whole thing fails. */
5354 if (GET_CODE (new_rtx) == CLOBBER
5355 && XEXP (new_rtx, 0) == const0_rtx)
5356 return new_rtx;
5358 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5362 else
5364 len = GET_RTX_LENGTH (code);
5365 fmt = GET_RTX_FORMAT (code);
5367 /* We don't need to process a SET_DEST that is a register, CC0,
5368 or PC, so set up to skip this common case. All other cases
5369 where we want to suppress replacing something inside a
5370 SET_SRC are handled via the IN_DEST operand. */
5371 if (code == SET
5372 && (REG_P (SET_DEST (x))
5373 || GET_CODE (SET_DEST (x)) == CC0
5374 || GET_CODE (SET_DEST (x)) == PC))
5375 fmt = "ie";
5377 /* Trying to simplify the operands of a widening MULT is not likely
5378 to create RTL matching a machine insn. */
5379 if (code == MULT
5380 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5381 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5382 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5383 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5384 && REG_P (XEXP (XEXP (x, 0), 0))
5385 && REG_P (XEXP (XEXP (x, 1), 0))
5386 && from == to)
5387 return x;
5390 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5391 constant. */
5392 if (fmt[0] == 'e')
5393 op0_mode = GET_MODE (XEXP (x, 0));
5395 for (i = 0; i < len; i++)
5397 if (fmt[i] == 'E')
5399 int j;
5400 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5402 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5404 new_rtx = (unique_copy && n_occurrences
5405 ? copy_rtx (to) : to);
5406 n_occurrences++;
5408 else
5410 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5411 unique_copy);
5413 /* If this substitution failed, this whole thing
5414 fails. */
5415 if (GET_CODE (new_rtx) == CLOBBER
5416 && XEXP (new_rtx, 0) == const0_rtx)
5417 return new_rtx;
5420 SUBST (XVECEXP (x, i, j), new_rtx);
5423 else if (fmt[i] == 'e')
5425 /* If this is a register being set, ignore it. */
5426 new_rtx = XEXP (x, i);
5427 if (in_dest
5428 && i == 0
5429 && (((code == SUBREG || code == ZERO_EXTRACT)
5430 && REG_P (new_rtx))
5431 || code == STRICT_LOW_PART))
5434 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5436 /* In general, don't install a subreg involving two
5437 modes not tieable. It can worsen register
5438 allocation, and can even make invalid reload
5439 insns, since the reg inside may need to be copied
5440 from in the outside mode, and that may be invalid
5441 if it is an fp reg copied in integer mode.
5443 We allow two exceptions to this: It is valid if
5444 it is inside another SUBREG and the mode of that
5445 SUBREG and the mode of the inside of TO is
5446 tieable and it is valid if X is a SET that copies
5447 FROM to CC0. */
5449 if (GET_CODE (to) == SUBREG
5450 && !targetm.modes_tieable_p (GET_MODE (to),
5451 GET_MODE (SUBREG_REG (to)))
5452 && ! (code == SUBREG
5453 && (targetm.modes_tieable_p
5454 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5455 && (!HAVE_cc0
5456 || (! (code == SET
5457 && i == 1
5458 && XEXP (x, 0) == cc0_rtx))))
5459 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5461 if (code == SUBREG
5462 && REG_P (to)
5463 && REGNO (to) < FIRST_PSEUDO_REGISTER
5464 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5465 SUBREG_BYTE (x),
5466 GET_MODE (x)) < 0)
5467 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5469 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5470 n_occurrences++;
5472 else
5473 /* If we are in a SET_DEST, suppress most cases unless we
5474 have gone inside a MEM, in which case we want to
5475 simplify the address. We assume here that things that
5476 are actually part of the destination have their inner
5477 parts in the first expression. This is true for SUBREG,
5478 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5479 things aside from REG and MEM that should appear in a
5480 SET_DEST. */
5481 new_rtx = subst (XEXP (x, i), from, to,
5482 (((in_dest
5483 && (code == SUBREG || code == STRICT_LOW_PART
5484 || code == ZERO_EXTRACT))
5485 || code == SET)
5486 && i == 0),
5487 code == IF_THEN_ELSE && i == 0,
5488 unique_copy);
5490 /* If we found that we will have to reject this combination,
5491 indicate that by returning the CLOBBER ourselves, rather than
5492 an expression containing it. This will speed things up as
5493 well as prevent accidents where two CLOBBERs are considered
5494 to be equal, thus producing an incorrect simplification. */
5496 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5497 return new_rtx;
5499 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5501 machine_mode mode = GET_MODE (x);
5503 x = simplify_subreg (GET_MODE (x), new_rtx,
5504 GET_MODE (SUBREG_REG (x)),
5505 SUBREG_BYTE (x));
5506 if (! x)
5507 x = gen_rtx_CLOBBER (mode, const0_rtx);
5509 else if (CONST_SCALAR_INT_P (new_rtx)
5510 && GET_CODE (x) == ZERO_EXTEND)
5512 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5513 new_rtx, GET_MODE (XEXP (x, 0)));
5514 gcc_assert (x);
5516 else
5517 SUBST (XEXP (x, i), new_rtx);
5522 /* Check if we are loading something from the constant pool via float
5523 extension; in this case we would undo compress_float_constant
5524 optimization and degenerate constant load to an immediate value. */
5525 if (GET_CODE (x) == FLOAT_EXTEND
5526 && MEM_P (XEXP (x, 0))
5527 && MEM_READONLY_P (XEXP (x, 0)))
5529 rtx tmp = avoid_constant_pool_reference (x);
5530 if (x != tmp)
5531 return x;
5534 /* Try to simplify X. If the simplification changed the code, it is likely
5535 that further simplification will help, so loop, but limit the number
5536 of repetitions that will be performed. */
5538 for (i = 0; i < 4; i++)
5540 /* If X is sufficiently simple, don't bother trying to do anything
5541 with it. */
5542 if (code != CONST_INT && code != REG && code != CLOBBER)
5543 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5545 if (GET_CODE (x) == code)
5546 break;
5548 code = GET_CODE (x);
5550 /* We no longer know the original mode of operand 0 since we
5551 have changed the form of X) */
5552 op0_mode = VOIDmode;
5555 return x;
5558 /* If X is a commutative operation whose operands are not in the canonical
5559 order, use substitutions to swap them. */
5561 static void
5562 maybe_swap_commutative_operands (rtx x)
5564 if (COMMUTATIVE_ARITH_P (x)
5565 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5567 rtx temp = XEXP (x, 0);
5568 SUBST (XEXP (x, 0), XEXP (x, 1));
5569 SUBST (XEXP (x, 1), temp);
5573 /* Simplify X, a piece of RTL. We just operate on the expression at the
5574 outer level; call `subst' to simplify recursively. Return the new
5575 expression.
5577 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5578 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5579 of a condition. */
5581 static rtx
5582 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5583 int in_cond)
5585 enum rtx_code code = GET_CODE (x);
5586 machine_mode mode = GET_MODE (x);
5587 scalar_int_mode int_mode;
5588 rtx temp;
5589 int i;
5591 /* If this is a commutative operation, put a constant last and a complex
5592 expression first. We don't need to do this for comparisons here. */
5593 maybe_swap_commutative_operands (x);
5595 /* Try to fold this expression in case we have constants that weren't
5596 present before. */
5597 temp = 0;
5598 switch (GET_RTX_CLASS (code))
5600 case RTX_UNARY:
5601 if (op0_mode == VOIDmode)
5602 op0_mode = GET_MODE (XEXP (x, 0));
5603 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5604 break;
5605 case RTX_COMPARE:
5606 case RTX_COMM_COMPARE:
5608 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5609 if (cmp_mode == VOIDmode)
5611 cmp_mode = GET_MODE (XEXP (x, 1));
5612 if (cmp_mode == VOIDmode)
5613 cmp_mode = op0_mode;
5615 temp = simplify_relational_operation (code, mode, cmp_mode,
5616 XEXP (x, 0), XEXP (x, 1));
5618 break;
5619 case RTX_COMM_ARITH:
5620 case RTX_BIN_ARITH:
5621 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5622 break;
5623 case RTX_BITFIELD_OPS:
5624 case RTX_TERNARY:
5625 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5626 XEXP (x, 1), XEXP (x, 2));
5627 break;
5628 default:
5629 break;
5632 if (temp)
5634 x = temp;
5635 code = GET_CODE (temp);
5636 op0_mode = VOIDmode;
5637 mode = GET_MODE (temp);
5640 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5641 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5642 things. Check for cases where both arms are testing the same
5643 condition.
5645 Don't do anything if all operands are very simple. */
5647 if ((BINARY_P (x)
5648 && ((!OBJECT_P (XEXP (x, 0))
5649 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5650 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5651 || (!OBJECT_P (XEXP (x, 1))
5652 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5653 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5654 || (UNARY_P (x)
5655 && (!OBJECT_P (XEXP (x, 0))
5656 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5657 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5659 rtx cond, true_rtx, false_rtx;
5661 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5662 if (cond != 0
5663 /* If everything is a comparison, what we have is highly unlikely
5664 to be simpler, so don't use it. */
5665 && ! (COMPARISON_P (x)
5666 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5668 rtx cop1 = const0_rtx;
5669 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5671 if (cond_code == NE && COMPARISON_P (cond))
5672 return x;
5674 /* Simplify the alternative arms; this may collapse the true and
5675 false arms to store-flag values. Be careful to use copy_rtx
5676 here since true_rtx or false_rtx might share RTL with x as a
5677 result of the if_then_else_cond call above. */
5678 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5679 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5681 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5682 is unlikely to be simpler. */
5683 if (general_operand (true_rtx, VOIDmode)
5684 && general_operand (false_rtx, VOIDmode))
5686 enum rtx_code reversed;
5688 /* Restarting if we generate a store-flag expression will cause
5689 us to loop. Just drop through in this case. */
5691 /* If the result values are STORE_FLAG_VALUE and zero, we can
5692 just make the comparison operation. */
5693 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5694 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5695 cond, cop1);
5696 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5697 && ((reversed = reversed_comparison_code_parts
5698 (cond_code, cond, cop1, NULL))
5699 != UNKNOWN))
5700 x = simplify_gen_relational (reversed, mode, VOIDmode,
5701 cond, cop1);
5703 /* Likewise, we can make the negate of a comparison operation
5704 if the result values are - STORE_FLAG_VALUE and zero. */
5705 else if (CONST_INT_P (true_rtx)
5706 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5707 && false_rtx == const0_rtx)
5708 x = simplify_gen_unary (NEG, mode,
5709 simplify_gen_relational (cond_code,
5710 mode, VOIDmode,
5711 cond, cop1),
5712 mode);
5713 else if (CONST_INT_P (false_rtx)
5714 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5715 && true_rtx == const0_rtx
5716 && ((reversed = reversed_comparison_code_parts
5717 (cond_code, cond, cop1, NULL))
5718 != UNKNOWN))
5719 x = simplify_gen_unary (NEG, mode,
5720 simplify_gen_relational (reversed,
5721 mode, VOIDmode,
5722 cond, cop1),
5723 mode);
5724 else
5725 return gen_rtx_IF_THEN_ELSE (mode,
5726 simplify_gen_relational (cond_code,
5727 mode,
5728 VOIDmode,
5729 cond,
5730 cop1),
5731 true_rtx, false_rtx);
5733 code = GET_CODE (x);
5734 op0_mode = VOIDmode;
5739 /* First see if we can apply the inverse distributive law. */
5740 if (code == PLUS || code == MINUS
5741 || code == AND || code == IOR || code == XOR)
5743 x = apply_distributive_law (x);
5744 code = GET_CODE (x);
5745 op0_mode = VOIDmode;
5748 /* If CODE is an associative operation not otherwise handled, see if we
5749 can associate some operands. This can win if they are constants or
5750 if they are logically related (i.e. (a & b) & a). */
5751 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5752 || code == AND || code == IOR || code == XOR
5753 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5754 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5755 || (flag_associative_math && FLOAT_MODE_P (mode))))
5757 if (GET_CODE (XEXP (x, 0)) == code)
5759 rtx other = XEXP (XEXP (x, 0), 0);
5760 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5761 rtx inner_op1 = XEXP (x, 1);
5762 rtx inner;
5764 /* Make sure we pass the constant operand if any as the second
5765 one if this is a commutative operation. */
5766 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5767 std::swap (inner_op0, inner_op1);
5768 inner = simplify_binary_operation (code == MINUS ? PLUS
5769 : code == DIV ? MULT
5770 : code,
5771 mode, inner_op0, inner_op1);
5773 /* For commutative operations, try the other pair if that one
5774 didn't simplify. */
5775 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5777 other = XEXP (XEXP (x, 0), 1);
5778 inner = simplify_binary_operation (code, mode,
5779 XEXP (XEXP (x, 0), 0),
5780 XEXP (x, 1));
5783 if (inner)
5784 return simplify_gen_binary (code, mode, other, inner);
5788 /* A little bit of algebraic simplification here. */
5789 switch (code)
5791 case MEM:
5792 /* Ensure that our address has any ASHIFTs converted to MULT in case
5793 address-recognizing predicates are called later. */
5794 temp = make_compound_operation (XEXP (x, 0), MEM);
5795 SUBST (XEXP (x, 0), temp);
5796 break;
5798 case SUBREG:
5799 if (op0_mode == VOIDmode)
5800 op0_mode = GET_MODE (SUBREG_REG (x));
5802 /* See if this can be moved to simplify_subreg. */
5803 if (CONSTANT_P (SUBREG_REG (x))
5804 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5805 /* Don't call gen_lowpart if the inner mode
5806 is VOIDmode and we cannot simplify it, as SUBREG without
5807 inner mode is invalid. */
5808 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5809 || gen_lowpart_common (mode, SUBREG_REG (x))))
5810 return gen_lowpart (mode, SUBREG_REG (x));
5812 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5813 break;
5815 rtx temp;
5816 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5817 SUBREG_BYTE (x));
5818 if (temp)
5819 return temp;
5821 /* If op is known to have all lower bits zero, the result is zero. */
5822 scalar_int_mode int_mode, int_op0_mode;
5823 if (!in_dest
5824 && is_a <scalar_int_mode> (mode, &int_mode)
5825 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5826 && (GET_MODE_PRECISION (int_mode)
5827 < GET_MODE_PRECISION (int_op0_mode))
5828 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5829 == SUBREG_BYTE (x))
5830 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5831 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5832 & GET_MODE_MASK (int_mode)) == 0)
5833 return CONST0_RTX (int_mode);
5836 /* Don't change the mode of the MEM if that would change the meaning
5837 of the address. */
5838 if (MEM_P (SUBREG_REG (x))
5839 && (MEM_VOLATILE_P (SUBREG_REG (x))
5840 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5841 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5842 return gen_rtx_CLOBBER (mode, const0_rtx);
5844 /* Note that we cannot do any narrowing for non-constants since
5845 we might have been counting on using the fact that some bits were
5846 zero. We now do this in the SET. */
5848 break;
5850 case NEG:
5851 temp = expand_compound_operation (XEXP (x, 0));
5853 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5854 replaced by (lshiftrt X C). This will convert
5855 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5857 if (GET_CODE (temp) == ASHIFTRT
5858 && CONST_INT_P (XEXP (temp, 1))
5859 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5860 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5861 INTVAL (XEXP (temp, 1)));
5863 /* If X has only a single bit that might be nonzero, say, bit I, convert
5864 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5865 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5866 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5867 or a SUBREG of one since we'd be making the expression more
5868 complex if it was just a register. */
5870 if (!REG_P (temp)
5871 && ! (GET_CODE (temp) == SUBREG
5872 && REG_P (SUBREG_REG (temp)))
5873 && is_a <scalar_int_mode> (mode, &int_mode)
5874 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5876 rtx temp1 = simplify_shift_const
5877 (NULL_RTX, ASHIFTRT, int_mode,
5878 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5879 GET_MODE_PRECISION (int_mode) - 1 - i),
5880 GET_MODE_PRECISION (int_mode) - 1 - i);
5882 /* If all we did was surround TEMP with the two shifts, we
5883 haven't improved anything, so don't use it. Otherwise,
5884 we are better off with TEMP1. */
5885 if (GET_CODE (temp1) != ASHIFTRT
5886 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5887 || XEXP (XEXP (temp1, 0), 0) != temp)
5888 return temp1;
5890 break;
5892 case TRUNCATE:
5893 /* We can't handle truncation to a partial integer mode here
5894 because we don't know the real bitsize of the partial
5895 integer mode. */
5896 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5897 break;
5899 if (HWI_COMPUTABLE_MODE_P (mode))
5900 SUBST (XEXP (x, 0),
5901 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5902 GET_MODE_MASK (mode), 0));
5904 /* We can truncate a constant value and return it. */
5905 if (CONST_INT_P (XEXP (x, 0)))
5906 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5908 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5909 whose value is a comparison can be replaced with a subreg if
5910 STORE_FLAG_VALUE permits. */
5911 if (HWI_COMPUTABLE_MODE_P (mode)
5912 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5913 && (temp = get_last_value (XEXP (x, 0)))
5914 && COMPARISON_P (temp))
5915 return gen_lowpart (mode, XEXP (x, 0));
5916 break;
5918 case CONST:
5919 /* (const (const X)) can become (const X). Do it this way rather than
5920 returning the inner CONST since CONST can be shared with a
5921 REG_EQUAL note. */
5922 if (GET_CODE (XEXP (x, 0)) == CONST)
5923 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5924 break;
5926 case LO_SUM:
5927 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5928 can add in an offset. find_split_point will split this address up
5929 again if it doesn't match. */
5930 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5931 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5932 return XEXP (x, 1);
5933 break;
5935 case PLUS:
5936 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5937 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5938 bit-field and can be replaced by either a sign_extend or a
5939 sign_extract. The `and' may be a zero_extend and the two
5940 <c>, -<c> constants may be reversed. */
5941 if (GET_CODE (XEXP (x, 0)) == XOR
5942 && is_a <scalar_int_mode> (mode, &int_mode)
5943 && CONST_INT_P (XEXP (x, 1))
5944 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5945 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5946 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5947 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5948 && HWI_COMPUTABLE_MODE_P (int_mode)
5949 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5950 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5951 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5952 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5953 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5954 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5955 == (unsigned int) i + 1))))
5956 return simplify_shift_const
5957 (NULL_RTX, ASHIFTRT, int_mode,
5958 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5959 XEXP (XEXP (XEXP (x, 0), 0), 0),
5960 GET_MODE_PRECISION (int_mode) - (i + 1)),
5961 GET_MODE_PRECISION (int_mode) - (i + 1));
5963 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5964 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5965 the bitsize of the mode - 1. This allows simplification of
5966 "a = (b & 8) == 0;" */
5967 if (XEXP (x, 1) == constm1_rtx
5968 && !REG_P (XEXP (x, 0))
5969 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5970 && REG_P (SUBREG_REG (XEXP (x, 0))))
5971 && is_a <scalar_int_mode> (mode, &int_mode)
5972 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
5973 return simplify_shift_const
5974 (NULL_RTX, ASHIFTRT, int_mode,
5975 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5976 gen_rtx_XOR (int_mode, XEXP (x, 0),
5977 const1_rtx),
5978 GET_MODE_PRECISION (int_mode) - 1),
5979 GET_MODE_PRECISION (int_mode) - 1);
5981 /* If we are adding two things that have no bits in common, convert
5982 the addition into an IOR. This will often be further simplified,
5983 for example in cases like ((a & 1) + (a & 2)), which can
5984 become a & 3. */
5986 if (HWI_COMPUTABLE_MODE_P (mode)
5987 && (nonzero_bits (XEXP (x, 0), mode)
5988 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5990 /* Try to simplify the expression further. */
5991 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5992 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5994 /* If we could, great. If not, do not go ahead with the IOR
5995 replacement, since PLUS appears in many special purpose
5996 address arithmetic instructions. */
5997 if (GET_CODE (temp) != CLOBBER
5998 && (GET_CODE (temp) != IOR
5999 || ((XEXP (temp, 0) != XEXP (x, 0)
6000 || XEXP (temp, 1) != XEXP (x, 1))
6001 && (XEXP (temp, 0) != XEXP (x, 1)
6002 || XEXP (temp, 1) != XEXP (x, 0)))))
6003 return temp;
6006 /* Canonicalize x + x into x << 1. */
6007 if (GET_MODE_CLASS (mode) == MODE_INT
6008 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6009 && !side_effects_p (XEXP (x, 0)))
6010 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6012 break;
6014 case MINUS:
6015 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6016 (and <foo> (const_int pow2-1)) */
6017 if (is_a <scalar_int_mode> (mode, &int_mode)
6018 && GET_CODE (XEXP (x, 1)) == AND
6019 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6020 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6021 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6022 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6023 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6024 break;
6026 case MULT:
6027 /* If we have (mult (plus A B) C), apply the distributive law and then
6028 the inverse distributive law to see if things simplify. This
6029 occurs mostly in addresses, often when unrolling loops. */
6031 if (GET_CODE (XEXP (x, 0)) == PLUS)
6033 rtx result = distribute_and_simplify_rtx (x, 0);
6034 if (result)
6035 return result;
6038 /* Try simplify a*(b/c) as (a*b)/c. */
6039 if (FLOAT_MODE_P (mode) && flag_associative_math
6040 && GET_CODE (XEXP (x, 0)) == DIV)
6042 rtx tem = simplify_binary_operation (MULT, mode,
6043 XEXP (XEXP (x, 0), 0),
6044 XEXP (x, 1));
6045 if (tem)
6046 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6048 break;
6050 case UDIV:
6051 /* If this is a divide by a power of two, treat it as a shift if
6052 its first operand is a shift. */
6053 if (is_a <scalar_int_mode> (mode, &int_mode)
6054 && CONST_INT_P (XEXP (x, 1))
6055 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6056 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6057 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6058 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6059 || GET_CODE (XEXP (x, 0)) == ROTATE
6060 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6061 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6062 XEXP (x, 0), i);
6063 break;
6065 case EQ: case NE:
6066 case GT: case GTU: case GE: case GEU:
6067 case LT: case LTU: case LE: case LEU:
6068 case UNEQ: case LTGT:
6069 case UNGT: case UNGE:
6070 case UNLT: case UNLE:
6071 case UNORDERED: case ORDERED:
6072 /* If the first operand is a condition code, we can't do anything
6073 with it. */
6074 if (GET_CODE (XEXP (x, 0)) == COMPARE
6075 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6076 && ! CC0_P (XEXP (x, 0))))
6078 rtx op0 = XEXP (x, 0);
6079 rtx op1 = XEXP (x, 1);
6080 enum rtx_code new_code;
6082 if (GET_CODE (op0) == COMPARE)
6083 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6085 /* Simplify our comparison, if possible. */
6086 new_code = simplify_comparison (code, &op0, &op1);
6088 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6089 if only the low-order bit is possibly nonzero in X (such as when
6090 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6091 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6092 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6093 (plus X 1).
6095 Remove any ZERO_EXTRACT we made when thinking this was a
6096 comparison. It may now be simpler to use, e.g., an AND. If a
6097 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6098 the call to make_compound_operation in the SET case.
6100 Don't apply these optimizations if the caller would
6101 prefer a comparison rather than a value.
6102 E.g., for the condition in an IF_THEN_ELSE most targets need
6103 an explicit comparison. */
6105 if (in_cond)
6108 else if (STORE_FLAG_VALUE == 1
6109 && new_code == NE
6110 && is_int_mode (mode, &int_mode)
6111 && op1 == const0_rtx
6112 && int_mode == GET_MODE (op0)
6113 && nonzero_bits (op0, int_mode) == 1)
6114 return gen_lowpart (int_mode,
6115 expand_compound_operation (op0));
6117 else if (STORE_FLAG_VALUE == 1
6118 && new_code == NE
6119 && is_int_mode (mode, &int_mode)
6120 && op1 == const0_rtx
6121 && int_mode == GET_MODE (op0)
6122 && (num_sign_bit_copies (op0, int_mode)
6123 == GET_MODE_PRECISION (int_mode)))
6125 op0 = expand_compound_operation (op0);
6126 return simplify_gen_unary (NEG, int_mode,
6127 gen_lowpart (int_mode, op0),
6128 int_mode);
6131 else if (STORE_FLAG_VALUE == 1
6132 && new_code == EQ
6133 && is_int_mode (mode, &int_mode)
6134 && op1 == const0_rtx
6135 && int_mode == GET_MODE (op0)
6136 && nonzero_bits (op0, int_mode) == 1)
6138 op0 = expand_compound_operation (op0);
6139 return simplify_gen_binary (XOR, int_mode,
6140 gen_lowpart (int_mode, op0),
6141 const1_rtx);
6144 else if (STORE_FLAG_VALUE == 1
6145 && new_code == EQ
6146 && is_int_mode (mode, &int_mode)
6147 && op1 == const0_rtx
6148 && int_mode == GET_MODE (op0)
6149 && (num_sign_bit_copies (op0, int_mode)
6150 == GET_MODE_PRECISION (int_mode)))
6152 op0 = expand_compound_operation (op0);
6153 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6156 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6157 those above. */
6158 if (in_cond)
6161 else if (STORE_FLAG_VALUE == -1
6162 && new_code == NE
6163 && is_int_mode (mode, &int_mode)
6164 && op1 == const0_rtx
6165 && int_mode == GET_MODE (op0)
6166 && (num_sign_bit_copies (op0, int_mode)
6167 == GET_MODE_PRECISION (int_mode)))
6168 return gen_lowpart (int_mode, expand_compound_operation (op0));
6170 else if (STORE_FLAG_VALUE == -1
6171 && new_code == NE
6172 && is_int_mode (mode, &int_mode)
6173 && op1 == const0_rtx
6174 && int_mode == GET_MODE (op0)
6175 && nonzero_bits (op0, int_mode) == 1)
6177 op0 = expand_compound_operation (op0);
6178 return simplify_gen_unary (NEG, int_mode,
6179 gen_lowpart (int_mode, op0),
6180 int_mode);
6183 else if (STORE_FLAG_VALUE == -1
6184 && new_code == EQ
6185 && is_int_mode (mode, &int_mode)
6186 && op1 == const0_rtx
6187 && int_mode == GET_MODE (op0)
6188 && (num_sign_bit_copies (op0, int_mode)
6189 == GET_MODE_PRECISION (int_mode)))
6191 op0 = expand_compound_operation (op0);
6192 return simplify_gen_unary (NOT, int_mode,
6193 gen_lowpart (int_mode, op0),
6194 int_mode);
6197 /* If X is 0/1, (eq X 0) is X-1. */
6198 else if (STORE_FLAG_VALUE == -1
6199 && new_code == EQ
6200 && is_int_mode (mode, &int_mode)
6201 && op1 == const0_rtx
6202 && int_mode == GET_MODE (op0)
6203 && nonzero_bits (op0, int_mode) == 1)
6205 op0 = expand_compound_operation (op0);
6206 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6209 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6210 one bit that might be nonzero, we can convert (ne x 0) to
6211 (ashift x c) where C puts the bit in the sign bit. Remove any
6212 AND with STORE_FLAG_VALUE when we are done, since we are only
6213 going to test the sign bit. */
6214 if (new_code == NE
6215 && is_int_mode (mode, &int_mode)
6216 && HWI_COMPUTABLE_MODE_P (int_mode)
6217 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6218 && op1 == const0_rtx
6219 && int_mode == GET_MODE (op0)
6220 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6222 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6223 expand_compound_operation (op0),
6224 GET_MODE_PRECISION (int_mode) - 1 - i);
6225 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6226 return XEXP (x, 0);
6227 else
6228 return x;
6231 /* If the code changed, return a whole new comparison.
6232 We also need to avoid using SUBST in cases where
6233 simplify_comparison has widened a comparison with a CONST_INT,
6234 since in that case the wider CONST_INT may fail the sanity
6235 checks in do_SUBST. */
6236 if (new_code != code
6237 || (CONST_INT_P (op1)
6238 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6239 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6240 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6242 /* Otherwise, keep this operation, but maybe change its operands.
6243 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6244 SUBST (XEXP (x, 0), op0);
6245 SUBST (XEXP (x, 1), op1);
6247 break;
6249 case IF_THEN_ELSE:
6250 return simplify_if_then_else (x);
6252 case ZERO_EXTRACT:
6253 case SIGN_EXTRACT:
6254 case ZERO_EXTEND:
6255 case SIGN_EXTEND:
6256 /* If we are processing SET_DEST, we are done. */
6257 if (in_dest)
6258 return x;
6260 return expand_compound_operation (x);
6262 case SET:
6263 return simplify_set (x);
6265 case AND:
6266 case IOR:
6267 return simplify_logical (x);
6269 case ASHIFT:
6270 case LSHIFTRT:
6271 case ASHIFTRT:
6272 case ROTATE:
6273 case ROTATERT:
6274 /* If this is a shift by a constant amount, simplify it. */
6275 if (CONST_INT_P (XEXP (x, 1)))
6276 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6277 INTVAL (XEXP (x, 1)));
6279 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6280 SUBST (XEXP (x, 1),
6281 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6282 (HOST_WIDE_INT_1U
6283 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6284 - 1,
6285 0));
6286 break;
6288 default:
6289 break;
6292 return x;
6295 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6297 static rtx
6298 simplify_if_then_else (rtx x)
6300 machine_mode mode = GET_MODE (x);
6301 rtx cond = XEXP (x, 0);
6302 rtx true_rtx = XEXP (x, 1);
6303 rtx false_rtx = XEXP (x, 2);
6304 enum rtx_code true_code = GET_CODE (cond);
6305 int comparison_p = COMPARISON_P (cond);
6306 rtx temp;
6307 int i;
6308 enum rtx_code false_code;
6309 rtx reversed;
6310 scalar_int_mode int_mode, inner_mode;
6312 /* Simplify storing of the truth value. */
6313 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6314 return simplify_gen_relational (true_code, mode, VOIDmode,
6315 XEXP (cond, 0), XEXP (cond, 1));
6317 /* Also when the truth value has to be reversed. */
6318 if (comparison_p
6319 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6320 && (reversed = reversed_comparison (cond, mode)))
6321 return reversed;
6323 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6324 in it is being compared against certain values. Get the true and false
6325 comparisons and see if that says anything about the value of each arm. */
6327 if (comparison_p
6328 && ((false_code = reversed_comparison_code (cond, NULL))
6329 != UNKNOWN)
6330 && REG_P (XEXP (cond, 0)))
6332 HOST_WIDE_INT nzb;
6333 rtx from = XEXP (cond, 0);
6334 rtx true_val = XEXP (cond, 1);
6335 rtx false_val = true_val;
6336 int swapped = 0;
6338 /* If FALSE_CODE is EQ, swap the codes and arms. */
6340 if (false_code == EQ)
6342 swapped = 1, true_code = EQ, false_code = NE;
6343 std::swap (true_rtx, false_rtx);
6346 scalar_int_mode from_mode;
6347 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6349 /* If we are comparing against zero and the expression being
6350 tested has only a single bit that might be nonzero, that is
6351 its value when it is not equal to zero. Similarly if it is
6352 known to be -1 or 0. */
6353 if (true_code == EQ
6354 && true_val == const0_rtx
6355 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6357 false_code = EQ;
6358 false_val = gen_int_mode (nzb, from_mode);
6360 else if (true_code == EQ
6361 && true_val == const0_rtx
6362 && (num_sign_bit_copies (from, from_mode)
6363 == GET_MODE_PRECISION (from_mode)))
6365 false_code = EQ;
6366 false_val = constm1_rtx;
6370 /* Now simplify an arm if we know the value of the register in the
6371 branch and it is used in the arm. Be careful due to the potential
6372 of locally-shared RTL. */
6374 if (reg_mentioned_p (from, true_rtx))
6375 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6376 from, true_val),
6377 pc_rtx, pc_rtx, 0, 0, 0);
6378 if (reg_mentioned_p (from, false_rtx))
6379 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6380 from, false_val),
6381 pc_rtx, pc_rtx, 0, 0, 0);
6383 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6384 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6386 true_rtx = XEXP (x, 1);
6387 false_rtx = XEXP (x, 2);
6388 true_code = GET_CODE (cond);
6391 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6392 reversed, do so to avoid needing two sets of patterns for
6393 subtract-and-branch insns. Similarly if we have a constant in the true
6394 arm, the false arm is the same as the first operand of the comparison, or
6395 the false arm is more complicated than the true arm. */
6397 if (comparison_p
6398 && reversed_comparison_code (cond, NULL) != UNKNOWN
6399 && (true_rtx == pc_rtx
6400 || (CONSTANT_P (true_rtx)
6401 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6402 || true_rtx == const0_rtx
6403 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6404 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6405 && !OBJECT_P (false_rtx))
6406 || reg_mentioned_p (true_rtx, false_rtx)
6407 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6409 true_code = reversed_comparison_code (cond, NULL);
6410 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6411 SUBST (XEXP (x, 1), false_rtx);
6412 SUBST (XEXP (x, 2), true_rtx);
6414 std::swap (true_rtx, false_rtx);
6415 cond = XEXP (x, 0);
6417 /* It is possible that the conditional has been simplified out. */
6418 true_code = GET_CODE (cond);
6419 comparison_p = COMPARISON_P (cond);
6422 /* If the two arms are identical, we don't need the comparison. */
6424 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6425 return true_rtx;
6427 /* Convert a == b ? b : a to "a". */
6428 if (true_code == EQ && ! side_effects_p (cond)
6429 && !HONOR_NANS (mode)
6430 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6431 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6432 return false_rtx;
6433 else if (true_code == NE && ! side_effects_p (cond)
6434 && !HONOR_NANS (mode)
6435 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6436 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6437 return true_rtx;
6439 /* Look for cases where we have (abs x) or (neg (abs X)). */
6441 if (GET_MODE_CLASS (mode) == MODE_INT
6442 && comparison_p
6443 && XEXP (cond, 1) == const0_rtx
6444 && GET_CODE (false_rtx) == NEG
6445 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6446 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6447 && ! side_effects_p (true_rtx))
6448 switch (true_code)
6450 case GT:
6451 case GE:
6452 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6453 case LT:
6454 case LE:
6455 return
6456 simplify_gen_unary (NEG, mode,
6457 simplify_gen_unary (ABS, mode, true_rtx, mode),
6458 mode);
6459 default:
6460 break;
6463 /* Look for MIN or MAX. */
6465 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6466 && comparison_p
6467 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6468 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6469 && ! side_effects_p (cond))
6470 switch (true_code)
6472 case GE:
6473 case GT:
6474 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6475 case LE:
6476 case LT:
6477 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6478 case GEU:
6479 case GTU:
6480 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6481 case LEU:
6482 case LTU:
6483 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6484 default:
6485 break;
6488 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6489 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6490 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6491 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6492 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6493 neither 1 or -1, but it isn't worth checking for. */
6495 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6496 && comparison_p
6497 && is_int_mode (mode, &int_mode)
6498 && ! side_effects_p (x))
6500 rtx t = make_compound_operation (true_rtx, SET);
6501 rtx f = make_compound_operation (false_rtx, SET);
6502 rtx cond_op0 = XEXP (cond, 0);
6503 rtx cond_op1 = XEXP (cond, 1);
6504 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6505 scalar_int_mode m = int_mode;
6506 rtx z = 0, c1 = NULL_RTX;
6508 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6509 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6510 || GET_CODE (t) == ASHIFT
6511 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6512 && rtx_equal_p (XEXP (t, 0), f))
6513 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6515 /* If an identity-zero op is commutative, check whether there
6516 would be a match if we swapped the operands. */
6517 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6518 || GET_CODE (t) == XOR)
6519 && rtx_equal_p (XEXP (t, 1), f))
6520 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6521 else if (GET_CODE (t) == SIGN_EXTEND
6522 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6523 && (GET_CODE (XEXP (t, 0)) == PLUS
6524 || GET_CODE (XEXP (t, 0)) == MINUS
6525 || GET_CODE (XEXP (t, 0)) == IOR
6526 || GET_CODE (XEXP (t, 0)) == XOR
6527 || GET_CODE (XEXP (t, 0)) == ASHIFT
6528 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6529 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6530 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6531 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6532 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6533 && (num_sign_bit_copies (f, GET_MODE (f))
6534 > (unsigned int)
6535 (GET_MODE_PRECISION (int_mode)
6536 - GET_MODE_PRECISION (inner_mode))))
6538 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6539 extend_op = SIGN_EXTEND;
6540 m = inner_mode;
6542 else if (GET_CODE (t) == SIGN_EXTEND
6543 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6544 && (GET_CODE (XEXP (t, 0)) == PLUS
6545 || GET_CODE (XEXP (t, 0)) == IOR
6546 || GET_CODE (XEXP (t, 0)) == XOR)
6547 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6548 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6549 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6550 && (num_sign_bit_copies (f, GET_MODE (f))
6551 > (unsigned int)
6552 (GET_MODE_PRECISION (int_mode)
6553 - GET_MODE_PRECISION (inner_mode))))
6555 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6556 extend_op = SIGN_EXTEND;
6557 m = inner_mode;
6559 else if (GET_CODE (t) == ZERO_EXTEND
6560 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6561 && (GET_CODE (XEXP (t, 0)) == PLUS
6562 || GET_CODE (XEXP (t, 0)) == MINUS
6563 || GET_CODE (XEXP (t, 0)) == IOR
6564 || GET_CODE (XEXP (t, 0)) == XOR
6565 || GET_CODE (XEXP (t, 0)) == ASHIFT
6566 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6567 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6568 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6569 && HWI_COMPUTABLE_MODE_P (int_mode)
6570 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6571 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6572 && ((nonzero_bits (f, GET_MODE (f))
6573 & ~GET_MODE_MASK (inner_mode))
6574 == 0))
6576 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6577 extend_op = ZERO_EXTEND;
6578 m = inner_mode;
6580 else if (GET_CODE (t) == ZERO_EXTEND
6581 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6582 && (GET_CODE (XEXP (t, 0)) == PLUS
6583 || GET_CODE (XEXP (t, 0)) == IOR
6584 || GET_CODE (XEXP (t, 0)) == XOR)
6585 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6586 && HWI_COMPUTABLE_MODE_P (int_mode)
6587 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6588 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6589 && ((nonzero_bits (f, GET_MODE (f))
6590 & ~GET_MODE_MASK (inner_mode))
6591 == 0))
6593 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6594 extend_op = ZERO_EXTEND;
6595 m = inner_mode;
6598 if (z)
6600 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6601 cond_op0, cond_op1),
6602 pc_rtx, pc_rtx, 0, 0, 0);
6603 temp = simplify_gen_binary (MULT, m, temp,
6604 simplify_gen_binary (MULT, m, c1,
6605 const_true_rtx));
6606 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6607 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6609 if (extend_op != UNKNOWN)
6610 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6612 return temp;
6616 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6617 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6618 negation of a single bit, we can convert this operation to a shift. We
6619 can actually do this more generally, but it doesn't seem worth it. */
6621 if (true_code == NE
6622 && is_a <scalar_int_mode> (mode, &int_mode)
6623 && XEXP (cond, 1) == const0_rtx
6624 && false_rtx == const0_rtx
6625 && CONST_INT_P (true_rtx)
6626 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6627 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6628 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6629 == GET_MODE_PRECISION (int_mode))
6630 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6631 return
6632 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6633 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6635 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6636 non-zero bit in A is C1. */
6637 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6638 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6639 && is_a <scalar_int_mode> (mode, &int_mode)
6640 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6641 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6642 == nonzero_bits (XEXP (cond, 0), inner_mode)
6643 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6645 rtx val = XEXP (cond, 0);
6646 if (inner_mode == int_mode)
6647 return val;
6648 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6649 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6652 return x;
6655 /* Simplify X, a SET expression. Return the new expression. */
6657 static rtx
6658 simplify_set (rtx x)
6660 rtx src = SET_SRC (x);
6661 rtx dest = SET_DEST (x);
6662 machine_mode mode
6663 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6664 rtx_insn *other_insn;
6665 rtx *cc_use;
6666 scalar_int_mode int_mode;
6668 /* (set (pc) (return)) gets written as (return). */
6669 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6670 return src;
6672 /* Now that we know for sure which bits of SRC we are using, see if we can
6673 simplify the expression for the object knowing that we only need the
6674 low-order bits. */
6676 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6678 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6679 SUBST (SET_SRC (x), src);
6682 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6683 the comparison result and try to simplify it unless we already have used
6684 undobuf.other_insn. */
6685 if ((GET_MODE_CLASS (mode) == MODE_CC
6686 || GET_CODE (src) == COMPARE
6687 || CC0_P (dest))
6688 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6689 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6690 && COMPARISON_P (*cc_use)
6691 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6693 enum rtx_code old_code = GET_CODE (*cc_use);
6694 enum rtx_code new_code;
6695 rtx op0, op1, tmp;
6696 int other_changed = 0;
6697 rtx inner_compare = NULL_RTX;
6698 machine_mode compare_mode = GET_MODE (dest);
6700 if (GET_CODE (src) == COMPARE)
6702 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6703 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6705 inner_compare = op0;
6706 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6709 else
6710 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6712 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6713 op0, op1);
6714 if (!tmp)
6715 new_code = old_code;
6716 else if (!CONSTANT_P (tmp))
6718 new_code = GET_CODE (tmp);
6719 op0 = XEXP (tmp, 0);
6720 op1 = XEXP (tmp, 1);
6722 else
6724 rtx pat = PATTERN (other_insn);
6725 undobuf.other_insn = other_insn;
6726 SUBST (*cc_use, tmp);
6728 /* Attempt to simplify CC user. */
6729 if (GET_CODE (pat) == SET)
6731 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6732 if (new_rtx != NULL_RTX)
6733 SUBST (SET_SRC (pat), new_rtx);
6736 /* Convert X into a no-op move. */
6737 SUBST (SET_DEST (x), pc_rtx);
6738 SUBST (SET_SRC (x), pc_rtx);
6739 return x;
6742 /* Simplify our comparison, if possible. */
6743 new_code = simplify_comparison (new_code, &op0, &op1);
6745 #ifdef SELECT_CC_MODE
6746 /* If this machine has CC modes other than CCmode, check to see if we
6747 need to use a different CC mode here. */
6748 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6749 compare_mode = GET_MODE (op0);
6750 else if (inner_compare
6751 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6752 && new_code == old_code
6753 && op0 == XEXP (inner_compare, 0)
6754 && op1 == XEXP (inner_compare, 1))
6755 compare_mode = GET_MODE (inner_compare);
6756 else
6757 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6759 /* If the mode changed, we have to change SET_DEST, the mode in the
6760 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6761 a hard register, just build new versions with the proper mode. If it
6762 is a pseudo, we lose unless it is only time we set the pseudo, in
6763 which case we can safely change its mode. */
6764 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6766 if (can_change_dest_mode (dest, 0, compare_mode))
6768 unsigned int regno = REGNO (dest);
6769 rtx new_dest;
6771 if (regno < FIRST_PSEUDO_REGISTER)
6772 new_dest = gen_rtx_REG (compare_mode, regno);
6773 else
6775 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6776 new_dest = regno_reg_rtx[regno];
6779 SUBST (SET_DEST (x), new_dest);
6780 SUBST (XEXP (*cc_use, 0), new_dest);
6781 other_changed = 1;
6783 dest = new_dest;
6786 #endif /* SELECT_CC_MODE */
6788 /* If the code changed, we have to build a new comparison in
6789 undobuf.other_insn. */
6790 if (new_code != old_code)
6792 int other_changed_previously = other_changed;
6793 unsigned HOST_WIDE_INT mask;
6794 rtx old_cc_use = *cc_use;
6796 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6797 dest, const0_rtx));
6798 other_changed = 1;
6800 /* If the only change we made was to change an EQ into an NE or
6801 vice versa, OP0 has only one bit that might be nonzero, and OP1
6802 is zero, check if changing the user of the condition code will
6803 produce a valid insn. If it won't, we can keep the original code
6804 in that insn by surrounding our operation with an XOR. */
6806 if (((old_code == NE && new_code == EQ)
6807 || (old_code == EQ && new_code == NE))
6808 && ! other_changed_previously && op1 == const0_rtx
6809 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6810 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6812 rtx pat = PATTERN (other_insn), note = 0;
6814 if ((recog_for_combine (&pat, other_insn, &note) < 0
6815 && ! check_asm_operands (pat)))
6817 *cc_use = old_cc_use;
6818 other_changed = 0;
6820 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6821 gen_int_mode (mask,
6822 GET_MODE (op0)));
6827 if (other_changed)
6828 undobuf.other_insn = other_insn;
6830 /* Don't generate a compare of a CC with 0, just use that CC. */
6831 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6833 SUBST (SET_SRC (x), op0);
6834 src = SET_SRC (x);
6836 /* Otherwise, if we didn't previously have the same COMPARE we
6837 want, create it from scratch. */
6838 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6839 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6841 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6842 src = SET_SRC (x);
6845 else
6847 /* Get SET_SRC in a form where we have placed back any
6848 compound expressions. Then do the checks below. */
6849 src = make_compound_operation (src, SET);
6850 SUBST (SET_SRC (x), src);
6853 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6854 and X being a REG or (subreg (reg)), we may be able to convert this to
6855 (set (subreg:m2 x) (op)).
6857 We can always do this if M1 is narrower than M2 because that means that
6858 we only care about the low bits of the result.
6860 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6861 perform a narrower operation than requested since the high-order bits will
6862 be undefined. On machine where it is defined, this transformation is safe
6863 as long as M1 and M2 have the same number of words. */
6865 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6866 && !OBJECT_P (SUBREG_REG (src))
6867 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6868 / UNITS_PER_WORD)
6869 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6870 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6871 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6872 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6873 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6874 GET_MODE (SUBREG_REG (src)),
6875 GET_MODE (src)))
6876 && (REG_P (dest)
6877 || (GET_CODE (dest) == SUBREG
6878 && REG_P (SUBREG_REG (dest)))))
6880 SUBST (SET_DEST (x),
6881 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6882 dest));
6883 SUBST (SET_SRC (x), SUBREG_REG (src));
6885 src = SET_SRC (x), dest = SET_DEST (x);
6888 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6889 in SRC. */
6890 if (dest == cc0_rtx
6891 && partial_subreg_p (src)
6892 && subreg_lowpart_p (src))
6894 rtx inner = SUBREG_REG (src);
6895 machine_mode inner_mode = GET_MODE (inner);
6897 /* Here we make sure that we don't have a sign bit on. */
6898 if (val_signbit_known_clear_p (GET_MODE (src),
6899 nonzero_bits (inner, inner_mode)))
6901 SUBST (SET_SRC (x), inner);
6902 src = SET_SRC (x);
6906 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6907 would require a paradoxical subreg. Replace the subreg with a
6908 zero_extend to avoid the reload that would otherwise be required. */
6910 enum rtx_code extend_op;
6911 if (paradoxical_subreg_p (src)
6912 && MEM_P (SUBREG_REG (src))
6913 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6915 SUBST (SET_SRC (x),
6916 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6918 src = SET_SRC (x);
6921 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6922 are comparing an item known to be 0 or -1 against 0, use a logical
6923 operation instead. Check for one of the arms being an IOR of the other
6924 arm with some value. We compute three terms to be IOR'ed together. In
6925 practice, at most two will be nonzero. Then we do the IOR's. */
6927 if (GET_CODE (dest) != PC
6928 && GET_CODE (src) == IF_THEN_ELSE
6929 && is_int_mode (GET_MODE (src), &int_mode)
6930 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6931 && XEXP (XEXP (src, 0), 1) == const0_rtx
6932 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6933 && (!HAVE_conditional_move
6934 || ! can_conditionally_move_p (int_mode))
6935 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6936 == GET_MODE_PRECISION (int_mode))
6937 && ! side_effects_p (src))
6939 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6940 ? XEXP (src, 1) : XEXP (src, 2));
6941 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6942 ? XEXP (src, 2) : XEXP (src, 1));
6943 rtx term1 = const0_rtx, term2, term3;
6945 if (GET_CODE (true_rtx) == IOR
6946 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6947 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6948 else if (GET_CODE (true_rtx) == IOR
6949 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6950 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6951 else if (GET_CODE (false_rtx) == IOR
6952 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6953 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6954 else if (GET_CODE (false_rtx) == IOR
6955 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6956 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6958 term2 = simplify_gen_binary (AND, int_mode,
6959 XEXP (XEXP (src, 0), 0), true_rtx);
6960 term3 = simplify_gen_binary (AND, int_mode,
6961 simplify_gen_unary (NOT, int_mode,
6962 XEXP (XEXP (src, 0), 0),
6963 int_mode),
6964 false_rtx);
6966 SUBST (SET_SRC (x),
6967 simplify_gen_binary (IOR, int_mode,
6968 simplify_gen_binary (IOR, int_mode,
6969 term1, term2),
6970 term3));
6972 src = SET_SRC (x);
6975 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6976 whole thing fail. */
6977 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6978 return src;
6979 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6980 return dest;
6981 else
6982 /* Convert this into a field assignment operation, if possible. */
6983 return make_field_assignment (x);
6986 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6987 result. */
6989 static rtx
6990 simplify_logical (rtx x)
6992 rtx op0 = XEXP (x, 0);
6993 rtx op1 = XEXP (x, 1);
6994 scalar_int_mode mode;
6996 switch (GET_CODE (x))
6998 case AND:
6999 /* We can call simplify_and_const_int only if we don't lose
7000 any (sign) bits when converting INTVAL (op1) to
7001 "unsigned HOST_WIDE_INT". */
7002 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7003 && CONST_INT_P (op1)
7004 && (HWI_COMPUTABLE_MODE_P (mode)
7005 || INTVAL (op1) > 0))
7007 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7008 if (GET_CODE (x) != AND)
7009 return x;
7011 op0 = XEXP (x, 0);
7012 op1 = XEXP (x, 1);
7015 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7016 apply the distributive law and then the inverse distributive
7017 law to see if things simplify. */
7018 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7020 rtx result = distribute_and_simplify_rtx (x, 0);
7021 if (result)
7022 return result;
7024 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7026 rtx result = distribute_and_simplify_rtx (x, 1);
7027 if (result)
7028 return result;
7030 break;
7032 case IOR:
7033 /* If we have (ior (and A B) C), apply the distributive law and then
7034 the inverse distributive law to see if things simplify. */
7036 if (GET_CODE (op0) == AND)
7038 rtx result = distribute_and_simplify_rtx (x, 0);
7039 if (result)
7040 return result;
7043 if (GET_CODE (op1) == AND)
7045 rtx result = distribute_and_simplify_rtx (x, 1);
7046 if (result)
7047 return result;
7049 break;
7051 default:
7052 gcc_unreachable ();
7055 return x;
7058 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7059 operations" because they can be replaced with two more basic operations.
7060 ZERO_EXTEND is also considered "compound" because it can be replaced with
7061 an AND operation, which is simpler, though only one operation.
7063 The function expand_compound_operation is called with an rtx expression
7064 and will convert it to the appropriate shifts and AND operations,
7065 simplifying at each stage.
7067 The function make_compound_operation is called to convert an expression
7068 consisting of shifts and ANDs into the equivalent compound expression.
7069 It is the inverse of this function, loosely speaking. */
7071 static rtx
7072 expand_compound_operation (rtx x)
7074 unsigned HOST_WIDE_INT pos = 0, len;
7075 int unsignedp = 0;
7076 unsigned int modewidth;
7077 rtx tem;
7078 scalar_int_mode inner_mode;
7080 switch (GET_CODE (x))
7082 case ZERO_EXTEND:
7083 unsignedp = 1;
7084 /* FALLTHRU */
7085 case SIGN_EXTEND:
7086 /* We can't necessarily use a const_int for a multiword mode;
7087 it depends on implicitly extending the value.
7088 Since we don't know the right way to extend it,
7089 we can't tell whether the implicit way is right.
7091 Even for a mode that is no wider than a const_int,
7092 we can't win, because we need to sign extend one of its bits through
7093 the rest of it, and we don't know which bit. */
7094 if (CONST_INT_P (XEXP (x, 0)))
7095 return x;
7097 /* Reject modes that aren't scalar integers because turning vector
7098 or complex modes into shifts causes problems. */
7099 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7100 return x;
7102 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7103 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7104 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7105 reloaded. If not for that, MEM's would very rarely be safe.
7107 Reject modes bigger than a word, because we might not be able
7108 to reference a two-register group starting with an arbitrary register
7109 (and currently gen_lowpart might crash for a SUBREG). */
7111 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7112 return x;
7114 len = GET_MODE_PRECISION (inner_mode);
7115 /* If the inner object has VOIDmode (the only way this can happen
7116 is if it is an ASM_OPERANDS), we can't do anything since we don't
7117 know how much masking to do. */
7118 if (len == 0)
7119 return x;
7121 break;
7123 case ZERO_EXTRACT:
7124 unsignedp = 1;
7126 /* fall through */
7128 case SIGN_EXTRACT:
7129 /* If the operand is a CLOBBER, just return it. */
7130 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7131 return XEXP (x, 0);
7133 if (!CONST_INT_P (XEXP (x, 1))
7134 || !CONST_INT_P (XEXP (x, 2)))
7135 return x;
7137 /* Reject modes that aren't scalar integers because turning vector
7138 or complex modes into shifts causes problems. */
7139 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7140 return x;
7142 len = INTVAL (XEXP (x, 1));
7143 pos = INTVAL (XEXP (x, 2));
7145 /* This should stay within the object being extracted, fail otherwise. */
7146 if (len + pos > GET_MODE_PRECISION (inner_mode))
7147 return x;
7149 if (BITS_BIG_ENDIAN)
7150 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7152 break;
7154 default:
7155 return x;
7158 /* We've rejected non-scalar operations by now. */
7159 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7161 /* Convert sign extension to zero extension, if we know that the high
7162 bit is not set, as this is easier to optimize. It will be converted
7163 back to cheaper alternative in make_extraction. */
7164 if (GET_CODE (x) == SIGN_EXTEND
7165 && HWI_COMPUTABLE_MODE_P (mode)
7166 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7167 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7168 == 0))
7170 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7171 rtx temp2 = expand_compound_operation (temp);
7173 /* Make sure this is a profitable operation. */
7174 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7175 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7176 return temp2;
7177 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7178 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7179 return temp;
7180 else
7181 return x;
7184 /* We can optimize some special cases of ZERO_EXTEND. */
7185 if (GET_CODE (x) == ZERO_EXTEND)
7187 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7188 know that the last value didn't have any inappropriate bits
7189 set. */
7190 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7191 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7192 && HWI_COMPUTABLE_MODE_P (mode)
7193 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7194 & ~GET_MODE_MASK (inner_mode)) == 0)
7195 return XEXP (XEXP (x, 0), 0);
7197 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7198 if (GET_CODE (XEXP (x, 0)) == SUBREG
7199 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7200 && subreg_lowpart_p (XEXP (x, 0))
7201 && HWI_COMPUTABLE_MODE_P (mode)
7202 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7203 & ~GET_MODE_MASK (inner_mode)) == 0)
7204 return SUBREG_REG (XEXP (x, 0));
7206 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7207 is a comparison and STORE_FLAG_VALUE permits. This is like
7208 the first case, but it works even when MODE is larger
7209 than HOST_WIDE_INT. */
7210 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7211 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7212 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7213 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7214 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7215 return XEXP (XEXP (x, 0), 0);
7217 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7218 if (GET_CODE (XEXP (x, 0)) == SUBREG
7219 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7220 && subreg_lowpart_p (XEXP (x, 0))
7221 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7222 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7223 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7224 return SUBREG_REG (XEXP (x, 0));
7228 /* If we reach here, we want to return a pair of shifts. The inner
7229 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7230 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7231 logical depending on the value of UNSIGNEDP.
7233 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7234 converted into an AND of a shift.
7236 We must check for the case where the left shift would have a negative
7237 count. This can happen in a case like (x >> 31) & 255 on machines
7238 that can't shift by a constant. On those machines, we would first
7239 combine the shift with the AND to produce a variable-position
7240 extraction. Then the constant of 31 would be substituted in
7241 to produce such a position. */
7243 modewidth = GET_MODE_PRECISION (mode);
7244 if (modewidth >= pos + len)
7246 tem = gen_lowpart (mode, XEXP (x, 0));
7247 if (!tem || GET_CODE (tem) == CLOBBER)
7248 return x;
7249 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7250 tem, modewidth - pos - len);
7251 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7252 mode, tem, modewidth - len);
7254 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7255 tem = simplify_and_const_int (NULL_RTX, mode,
7256 simplify_shift_const (NULL_RTX, LSHIFTRT,
7257 mode, XEXP (x, 0),
7258 pos),
7259 (HOST_WIDE_INT_1U << len) - 1);
7260 else
7261 /* Any other cases we can't handle. */
7262 return x;
7264 /* If we couldn't do this for some reason, return the original
7265 expression. */
7266 if (GET_CODE (tem) == CLOBBER)
7267 return x;
7269 return tem;
7272 /* X is a SET which contains an assignment of one object into
7273 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7274 or certain SUBREGS). If possible, convert it into a series of
7275 logical operations.
7277 We half-heartedly support variable positions, but do not at all
7278 support variable lengths. */
7280 static const_rtx
7281 expand_field_assignment (const_rtx x)
7283 rtx inner;
7284 rtx pos; /* Always counts from low bit. */
7285 int len;
7286 rtx mask, cleared, masked;
7287 scalar_int_mode compute_mode;
7289 /* Loop until we find something we can't simplify. */
7290 while (1)
7292 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7293 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7295 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7296 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7297 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7299 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7300 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7302 inner = XEXP (SET_DEST (x), 0);
7303 len = INTVAL (XEXP (SET_DEST (x), 1));
7304 pos = XEXP (SET_DEST (x), 2);
7306 /* A constant position should stay within the width of INNER. */
7307 if (CONST_INT_P (pos)
7308 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7309 break;
7311 if (BITS_BIG_ENDIAN)
7313 if (CONST_INT_P (pos))
7314 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7315 - INTVAL (pos));
7316 else if (GET_CODE (pos) == MINUS
7317 && CONST_INT_P (XEXP (pos, 1))
7318 && (INTVAL (XEXP (pos, 1))
7319 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7320 /* If position is ADJUST - X, new position is X. */
7321 pos = XEXP (pos, 0);
7322 else
7324 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7325 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7326 gen_int_mode (prec - len,
7327 GET_MODE (pos)),
7328 pos);
7333 /* A SUBREG between two modes that occupy the same numbers of words
7334 can be done by moving the SUBREG to the source. */
7335 else if (GET_CODE (SET_DEST (x)) == SUBREG
7336 /* We need SUBREGs to compute nonzero_bits properly. */
7337 && nonzero_sign_valid
7338 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7339 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7340 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7341 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7343 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7344 gen_lowpart
7345 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7346 SET_SRC (x)));
7347 continue;
7349 else
7350 break;
7352 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7353 inner = SUBREG_REG (inner);
7355 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7356 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7358 /* Don't do anything for vector or complex integral types. */
7359 if (! FLOAT_MODE_P (GET_MODE (inner)))
7360 break;
7362 /* Try to find an integral mode to pun with. */
7363 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7364 .exists (&compute_mode))
7365 break;
7367 inner = gen_lowpart (compute_mode, inner);
7370 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7371 if (len >= HOST_BITS_PER_WIDE_INT)
7372 break;
7374 /* Don't try to compute in too wide unsupported modes. */
7375 if (!targetm.scalar_mode_supported_p (compute_mode))
7376 break;
7378 /* Now compute the equivalent expression. Make a copy of INNER
7379 for the SET_DEST in case it is a MEM into which we will substitute;
7380 we don't want shared RTL in that case. */
7381 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7382 compute_mode);
7383 cleared = simplify_gen_binary (AND, compute_mode,
7384 simplify_gen_unary (NOT, compute_mode,
7385 simplify_gen_binary (ASHIFT,
7386 compute_mode,
7387 mask, pos),
7388 compute_mode),
7389 inner);
7390 masked = simplify_gen_binary (ASHIFT, compute_mode,
7391 simplify_gen_binary (
7392 AND, compute_mode,
7393 gen_lowpart (compute_mode, SET_SRC (x)),
7394 mask),
7395 pos);
7397 x = gen_rtx_SET (copy_rtx (inner),
7398 simplify_gen_binary (IOR, compute_mode,
7399 cleared, masked));
7402 return x;
7405 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7406 it is an RTX that represents the (variable) starting position; otherwise,
7407 POS is the (constant) starting bit position. Both are counted from the LSB.
7409 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7411 IN_DEST is nonzero if this is a reference in the destination of a SET.
7412 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7413 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7414 be used.
7416 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7417 ZERO_EXTRACT should be built even for bits starting at bit 0.
7419 MODE is the desired mode of the result (if IN_DEST == 0).
7421 The result is an RTX for the extraction or NULL_RTX if the target
7422 can't handle it. */
7424 static rtx
7425 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7426 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7427 int in_dest, int in_compare)
7429 /* This mode describes the size of the storage area
7430 to fetch the overall value from. Within that, we
7431 ignore the POS lowest bits, etc. */
7432 machine_mode is_mode = GET_MODE (inner);
7433 machine_mode inner_mode;
7434 scalar_int_mode wanted_inner_mode;
7435 scalar_int_mode wanted_inner_reg_mode = word_mode;
7436 scalar_int_mode pos_mode = word_mode;
7437 machine_mode extraction_mode = word_mode;
7438 rtx new_rtx = 0;
7439 rtx orig_pos_rtx = pos_rtx;
7440 HOST_WIDE_INT orig_pos;
7442 if (pos_rtx && CONST_INT_P (pos_rtx))
7443 pos = INTVAL (pos_rtx), pos_rtx = 0;
7445 if (GET_CODE (inner) == SUBREG
7446 && subreg_lowpart_p (inner)
7447 && (paradoxical_subreg_p (inner)
7448 /* If trying or potentionally trying to extract
7449 bits outside of is_mode, don't look through
7450 non-paradoxical SUBREGs. See PR82192. */
7451 || (pos_rtx == NULL_RTX
7452 && pos + len <= GET_MODE_PRECISION (is_mode))))
7454 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7455 consider just the QI as the memory to extract from.
7456 The subreg adds or removes high bits; its mode is
7457 irrelevant to the meaning of this extraction,
7458 since POS and LEN count from the lsb. */
7459 if (MEM_P (SUBREG_REG (inner)))
7460 is_mode = GET_MODE (SUBREG_REG (inner));
7461 inner = SUBREG_REG (inner);
7463 else if (GET_CODE (inner) == ASHIFT
7464 && CONST_INT_P (XEXP (inner, 1))
7465 && pos_rtx == 0 && pos == 0
7466 && len > UINTVAL (XEXP (inner, 1)))
7468 /* We're extracting the least significant bits of an rtx
7469 (ashift X (const_int C)), where LEN > C. Extract the
7470 least significant (LEN - C) bits of X, giving an rtx
7471 whose mode is MODE, then shift it left C times. */
7472 new_rtx = make_extraction (mode, XEXP (inner, 0),
7473 0, 0, len - INTVAL (XEXP (inner, 1)),
7474 unsignedp, in_dest, in_compare);
7475 if (new_rtx != 0)
7476 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7478 else if (GET_CODE (inner) == TRUNCATE
7479 /* If trying or potentionally trying to extract
7480 bits outside of is_mode, don't look through
7481 TRUNCATE. See PR82192. */
7482 && pos_rtx == NULL_RTX
7483 && pos + len <= GET_MODE_PRECISION (is_mode))
7484 inner = XEXP (inner, 0);
7486 inner_mode = GET_MODE (inner);
7488 /* See if this can be done without an extraction. We never can if the
7489 width of the field is not the same as that of some integer mode. For
7490 registers, we can only avoid the extraction if the position is at the
7491 low-order bit and this is either not in the destination or we have the
7492 appropriate STRICT_LOW_PART operation available.
7494 For MEM, we can avoid an extract if the field starts on an appropriate
7495 boundary and we can change the mode of the memory reference. */
7497 scalar_int_mode tmode;
7498 if (int_mode_for_size (len, 1).exists (&tmode)
7499 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7500 && !MEM_P (inner)
7501 && (pos == 0 || REG_P (inner))
7502 && (inner_mode == tmode
7503 || !REG_P (inner)
7504 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7505 || reg_truncated_to_mode (tmode, inner))
7506 && (! in_dest
7507 || (REG_P (inner)
7508 && have_insn_for (STRICT_LOW_PART, tmode))))
7509 || (MEM_P (inner) && pos_rtx == 0
7510 && (pos
7511 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7512 : BITS_PER_UNIT)) == 0
7513 /* We can't do this if we are widening INNER_MODE (it
7514 may not be aligned, for one thing). */
7515 && !paradoxical_subreg_p (tmode, inner_mode)
7516 && (inner_mode == tmode
7517 || (! mode_dependent_address_p (XEXP (inner, 0),
7518 MEM_ADDR_SPACE (inner))
7519 && ! MEM_VOLATILE_P (inner))))))
7521 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7522 field. If the original and current mode are the same, we need not
7523 adjust the offset. Otherwise, we do if bytes big endian.
7525 If INNER is not a MEM, get a piece consisting of just the field
7526 of interest (in this case POS % BITS_PER_WORD must be 0). */
7528 if (MEM_P (inner))
7530 HOST_WIDE_INT offset;
7532 /* POS counts from lsb, but make OFFSET count in memory order. */
7533 if (BYTES_BIG_ENDIAN)
7534 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7535 else
7536 offset = pos / BITS_PER_UNIT;
7538 new_rtx = adjust_address_nv (inner, tmode, offset);
7540 else if (REG_P (inner))
7542 if (tmode != inner_mode)
7544 /* We can't call gen_lowpart in a DEST since we
7545 always want a SUBREG (see below) and it would sometimes
7546 return a new hard register. */
7547 if (pos || in_dest)
7549 unsigned int offset
7550 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7552 /* Avoid creating invalid subregs, for example when
7553 simplifying (x>>32)&255. */
7554 if (!validate_subreg (tmode, inner_mode, inner, offset))
7555 return NULL_RTX;
7557 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7559 else
7560 new_rtx = gen_lowpart (tmode, inner);
7562 else
7563 new_rtx = inner;
7565 else
7566 new_rtx = force_to_mode (inner, tmode,
7567 len >= HOST_BITS_PER_WIDE_INT
7568 ? HOST_WIDE_INT_M1U
7569 : (HOST_WIDE_INT_1U << len) - 1, 0);
7571 /* If this extraction is going into the destination of a SET,
7572 make a STRICT_LOW_PART unless we made a MEM. */
7574 if (in_dest)
7575 return (MEM_P (new_rtx) ? new_rtx
7576 : (GET_CODE (new_rtx) != SUBREG
7577 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7578 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7580 if (mode == tmode)
7581 return new_rtx;
7583 if (CONST_SCALAR_INT_P (new_rtx))
7584 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7585 mode, new_rtx, tmode);
7587 /* If we know that no extraneous bits are set, and that the high
7588 bit is not set, convert the extraction to the cheaper of
7589 sign and zero extension, that are equivalent in these cases. */
7590 if (flag_expensive_optimizations
7591 && (HWI_COMPUTABLE_MODE_P (tmode)
7592 && ((nonzero_bits (new_rtx, tmode)
7593 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7594 == 0)))
7596 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7597 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7599 /* Prefer ZERO_EXTENSION, since it gives more information to
7600 backends. */
7601 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7602 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7603 return temp;
7604 return temp1;
7607 /* Otherwise, sign- or zero-extend unless we already are in the
7608 proper mode. */
7610 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7611 mode, new_rtx));
7614 /* Unless this is a COMPARE or we have a funny memory reference,
7615 don't do anything with zero-extending field extracts starting at
7616 the low-order bit since they are simple AND operations. */
7617 if (pos_rtx == 0 && pos == 0 && ! in_dest
7618 && ! in_compare && unsignedp)
7619 return 0;
7621 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7622 if the position is not a constant and the length is not 1. In all
7623 other cases, we would only be going outside our object in cases when
7624 an original shift would have been undefined. */
7625 if (MEM_P (inner)
7626 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7627 || (pos_rtx != 0 && len != 1)))
7628 return 0;
7630 enum extraction_pattern pattern = (in_dest ? EP_insv
7631 : unsignedp ? EP_extzv : EP_extv);
7633 /* If INNER is not from memory, we want it to have the mode of a register
7634 extraction pattern's structure operand, or word_mode if there is no
7635 such pattern. The same applies to extraction_mode and pos_mode
7636 and their respective operands.
7638 For memory, assume that the desired extraction_mode and pos_mode
7639 are the same as for a register operation, since at present we don't
7640 have named patterns for aligned memory structures. */
7641 struct extraction_insn insn;
7642 if (get_best_reg_extraction_insn (&insn, pattern,
7643 GET_MODE_BITSIZE (inner_mode), mode))
7645 wanted_inner_reg_mode = insn.struct_mode.require ();
7646 pos_mode = insn.pos_mode;
7647 extraction_mode = insn.field_mode;
7650 /* Never narrow an object, since that might not be safe. */
7652 if (mode != VOIDmode
7653 && partial_subreg_p (extraction_mode, mode))
7654 extraction_mode = mode;
7656 if (!MEM_P (inner))
7657 wanted_inner_mode = wanted_inner_reg_mode;
7658 else
7660 /* Be careful not to go beyond the extracted object and maintain the
7661 natural alignment of the memory. */
7662 wanted_inner_mode = smallest_int_mode_for_size (len);
7663 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7664 > GET_MODE_BITSIZE (wanted_inner_mode))
7665 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7668 orig_pos = pos;
7670 if (BITS_BIG_ENDIAN)
7672 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7673 BITS_BIG_ENDIAN style. If position is constant, compute new
7674 position. Otherwise, build subtraction.
7675 Note that POS is relative to the mode of the original argument.
7676 If it's a MEM we need to recompute POS relative to that.
7677 However, if we're extracting from (or inserting into) a register,
7678 we want to recompute POS relative to wanted_inner_mode. */
7679 int width = (MEM_P (inner)
7680 ? GET_MODE_BITSIZE (is_mode)
7681 : GET_MODE_BITSIZE (wanted_inner_mode));
7683 if (pos_rtx == 0)
7684 pos = width - len - pos;
7685 else
7686 pos_rtx
7687 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7688 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7689 pos_rtx);
7690 /* POS may be less than 0 now, but we check for that below.
7691 Note that it can only be less than 0 if !MEM_P (inner). */
7694 /* If INNER has a wider mode, and this is a constant extraction, try to
7695 make it smaller and adjust the byte to point to the byte containing
7696 the value. */
7697 if (wanted_inner_mode != VOIDmode
7698 && inner_mode != wanted_inner_mode
7699 && ! pos_rtx
7700 && partial_subreg_p (wanted_inner_mode, is_mode)
7701 && MEM_P (inner)
7702 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7703 && ! MEM_VOLATILE_P (inner))
7705 int offset = 0;
7707 /* The computations below will be correct if the machine is big
7708 endian in both bits and bytes or little endian in bits and bytes.
7709 If it is mixed, we must adjust. */
7711 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7712 adjust OFFSET to compensate. */
7713 if (BYTES_BIG_ENDIAN
7714 && paradoxical_subreg_p (is_mode, inner_mode))
7715 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7717 /* We can now move to the desired byte. */
7718 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7719 * GET_MODE_SIZE (wanted_inner_mode);
7720 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7722 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7723 && is_mode != wanted_inner_mode)
7724 offset = (GET_MODE_SIZE (is_mode)
7725 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7727 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7730 /* If INNER is not memory, get it into the proper mode. If we are changing
7731 its mode, POS must be a constant and smaller than the size of the new
7732 mode. */
7733 else if (!MEM_P (inner))
7735 /* On the LHS, don't create paradoxical subregs implicitely truncating
7736 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7737 if (in_dest
7738 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7739 wanted_inner_mode))
7740 return NULL_RTX;
7742 if (GET_MODE (inner) != wanted_inner_mode
7743 && (pos_rtx != 0
7744 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7745 return NULL_RTX;
7747 if (orig_pos < 0)
7748 return NULL_RTX;
7750 inner = force_to_mode (inner, wanted_inner_mode,
7751 pos_rtx
7752 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7753 ? HOST_WIDE_INT_M1U
7754 : (((HOST_WIDE_INT_1U << len) - 1)
7755 << orig_pos),
7759 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7760 have to zero extend. Otherwise, we can just use a SUBREG.
7762 We dealt with constant rtxes earlier, so pos_rtx cannot
7763 have VOIDmode at this point. */
7764 if (pos_rtx != 0
7765 && (GET_MODE_SIZE (pos_mode)
7766 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7768 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7769 GET_MODE (pos_rtx));
7771 /* If we know that no extraneous bits are set, and that the high
7772 bit is not set, convert extraction to cheaper one - either
7773 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7774 cases. */
7775 if (flag_expensive_optimizations
7776 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7777 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7778 & ~(((unsigned HOST_WIDE_INT)
7779 GET_MODE_MASK (GET_MODE (pos_rtx)))
7780 >> 1))
7781 == 0)))
7783 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7784 GET_MODE (pos_rtx));
7786 /* Prefer ZERO_EXTENSION, since it gives more information to
7787 backends. */
7788 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7789 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7790 temp = temp1;
7792 pos_rtx = temp;
7795 /* Make POS_RTX unless we already have it and it is correct. If we don't
7796 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7797 be a CONST_INT. */
7798 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7799 pos_rtx = orig_pos_rtx;
7801 else if (pos_rtx == 0)
7802 pos_rtx = GEN_INT (pos);
7804 /* Make the required operation. See if we can use existing rtx. */
7805 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7806 extraction_mode, inner, GEN_INT (len), pos_rtx);
7807 if (! in_dest)
7808 new_rtx = gen_lowpart (mode, new_rtx);
7810 return new_rtx;
7813 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7814 can be commuted with any other operations in X. Return X without
7815 that shift if so. */
7817 static rtx
7818 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7820 enum rtx_code code = GET_CODE (x);
7821 rtx tem;
7823 switch (code)
7825 case ASHIFT:
7826 /* This is the shift itself. If it is wide enough, we will return
7827 either the value being shifted if the shift count is equal to
7828 COUNT or a shift for the difference. */
7829 if (CONST_INT_P (XEXP (x, 1))
7830 && INTVAL (XEXP (x, 1)) >= count)
7831 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7832 INTVAL (XEXP (x, 1)) - count);
7833 break;
7835 case NEG: case NOT:
7836 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7837 return simplify_gen_unary (code, mode, tem, mode);
7839 break;
7841 case PLUS: case IOR: case XOR: case AND:
7842 /* If we can safely shift this constant and we find the inner shift,
7843 make a new operation. */
7844 if (CONST_INT_P (XEXP (x, 1))
7845 && (UINTVAL (XEXP (x, 1))
7846 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7847 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7849 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7850 return simplify_gen_binary (code, mode, tem,
7851 gen_int_mode (val, mode));
7853 break;
7855 default:
7856 break;
7859 return 0;
7862 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7863 level of the expression and MODE is its mode. IN_CODE is as for
7864 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7865 that should be used when recursing on operands of *X_PTR.
7867 There are two possible actions:
7869 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7870 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7872 - Return a new rtx, which the caller returns directly. */
7874 static rtx
7875 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7876 enum rtx_code in_code,
7877 enum rtx_code *next_code_ptr)
7879 rtx x = *x_ptr;
7880 enum rtx_code next_code = *next_code_ptr;
7881 enum rtx_code code = GET_CODE (x);
7882 int mode_width = GET_MODE_PRECISION (mode);
7883 rtx rhs, lhs;
7884 rtx new_rtx = 0;
7885 int i;
7886 rtx tem;
7887 scalar_int_mode inner_mode;
7888 bool equality_comparison = false;
7890 if (in_code == EQ)
7892 equality_comparison = true;
7893 in_code = COMPARE;
7896 /* Process depending on the code of this operation. If NEW is set
7897 nonzero, it will be returned. */
7899 switch (code)
7901 case ASHIFT:
7902 /* Convert shifts by constants into multiplications if inside
7903 an address. */
7904 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7905 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7906 && INTVAL (XEXP (x, 1)) >= 0)
7908 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7909 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7911 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7912 if (GET_CODE (new_rtx) == NEG)
7914 new_rtx = XEXP (new_rtx, 0);
7915 multval = -multval;
7917 multval = trunc_int_for_mode (multval, mode);
7918 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7920 break;
7922 case PLUS:
7923 lhs = XEXP (x, 0);
7924 rhs = XEXP (x, 1);
7925 lhs = make_compound_operation (lhs, next_code);
7926 rhs = make_compound_operation (rhs, next_code);
7927 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7929 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7930 XEXP (lhs, 1));
7931 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7933 else if (GET_CODE (lhs) == MULT
7934 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7936 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7937 simplify_gen_unary (NEG, mode,
7938 XEXP (lhs, 1),
7939 mode));
7940 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7942 else
7944 SUBST (XEXP (x, 0), lhs);
7945 SUBST (XEXP (x, 1), rhs);
7947 maybe_swap_commutative_operands (x);
7948 return x;
7950 case MINUS:
7951 lhs = XEXP (x, 0);
7952 rhs = XEXP (x, 1);
7953 lhs = make_compound_operation (lhs, next_code);
7954 rhs = make_compound_operation (rhs, next_code);
7955 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7957 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7958 XEXP (rhs, 1));
7959 return simplify_gen_binary (PLUS, mode, tem, lhs);
7961 else if (GET_CODE (rhs) == MULT
7962 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7964 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7965 simplify_gen_unary (NEG, mode,
7966 XEXP (rhs, 1),
7967 mode));
7968 return simplify_gen_binary (PLUS, mode, tem, lhs);
7970 else
7972 SUBST (XEXP (x, 0), lhs);
7973 SUBST (XEXP (x, 1), rhs);
7974 return x;
7977 case AND:
7978 /* If the second operand is not a constant, we can't do anything
7979 with it. */
7980 if (!CONST_INT_P (XEXP (x, 1)))
7981 break;
7983 /* If the constant is a power of two minus one and the first operand
7984 is a logical right shift, make an extraction. */
7985 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7986 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7988 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7989 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
7990 i, 1, 0, in_code == COMPARE);
7993 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7994 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7995 && subreg_lowpart_p (XEXP (x, 0))
7996 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
7997 &inner_mode)
7998 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7999 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8001 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8002 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8003 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8004 XEXP (inner_x0, 1),
8005 i, 1, 0, in_code == COMPARE);
8007 /* If we narrowed the mode when dropping the subreg, then we lose. */
8008 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8009 new_rtx = NULL;
8011 /* If that didn't give anything, see if the AND simplifies on
8012 its own. */
8013 if (!new_rtx && i >= 0)
8015 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8016 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8017 0, in_code == COMPARE);
8020 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8021 else if ((GET_CODE (XEXP (x, 0)) == XOR
8022 || GET_CODE (XEXP (x, 0)) == IOR)
8023 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8024 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8025 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8027 /* Apply the distributive law, and then try to make extractions. */
8028 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8029 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8030 XEXP (x, 1)),
8031 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8032 XEXP (x, 1)));
8033 new_rtx = make_compound_operation (new_rtx, in_code);
8036 /* If we are have (and (rotate X C) M) and C is larger than the number
8037 of bits in M, this is an extraction. */
8039 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8040 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8041 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8042 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8044 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8045 new_rtx = make_extraction (mode, new_rtx,
8046 (GET_MODE_PRECISION (mode)
8047 - INTVAL (XEXP (XEXP (x, 0), 1))),
8048 NULL_RTX, i, 1, 0, in_code == COMPARE);
8051 /* On machines without logical shifts, if the operand of the AND is
8052 a logical shift and our mask turns off all the propagated sign
8053 bits, we can replace the logical shift with an arithmetic shift. */
8054 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8055 && !have_insn_for (LSHIFTRT, mode)
8056 && have_insn_for (ASHIFTRT, mode)
8057 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8058 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8059 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8060 && mode_width <= HOST_BITS_PER_WIDE_INT)
8062 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8064 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8065 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8066 SUBST (XEXP (x, 0),
8067 gen_rtx_ASHIFTRT (mode,
8068 make_compound_operation (XEXP (XEXP (x,
8071 next_code),
8072 XEXP (XEXP (x, 0), 1)));
8075 /* If the constant is one less than a power of two, this might be
8076 representable by an extraction even if no shift is present.
8077 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8078 we are in a COMPARE. */
8079 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8080 new_rtx = make_extraction (mode,
8081 make_compound_operation (XEXP (x, 0),
8082 next_code),
8083 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8085 /* If we are in a comparison and this is an AND with a power of two,
8086 convert this into the appropriate bit extract. */
8087 else if (in_code == COMPARE
8088 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8089 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8090 new_rtx = make_extraction (mode,
8091 make_compound_operation (XEXP (x, 0),
8092 next_code),
8093 i, NULL_RTX, 1, 1, 0, 1);
8095 /* If the one operand is a paradoxical subreg of a register or memory and
8096 the constant (limited to the smaller mode) has only zero bits where
8097 the sub expression has known zero bits, this can be expressed as
8098 a zero_extend. */
8099 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8101 rtx sub;
8103 sub = XEXP (XEXP (x, 0), 0);
8104 machine_mode sub_mode = GET_MODE (sub);
8105 if ((REG_P (sub) || MEM_P (sub))
8106 && GET_MODE_PRECISION (sub_mode) < mode_width)
8108 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8109 unsigned HOST_WIDE_INT mask;
8111 /* original AND constant with all the known zero bits set */
8112 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8113 if ((mask & mode_mask) == mode_mask)
8115 new_rtx = make_compound_operation (sub, next_code);
8116 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8117 GET_MODE_PRECISION (sub_mode),
8118 1, 0, in_code == COMPARE);
8123 break;
8125 case LSHIFTRT:
8126 /* If the sign bit is known to be zero, replace this with an
8127 arithmetic shift. */
8128 if (have_insn_for (ASHIFTRT, mode)
8129 && ! have_insn_for (LSHIFTRT, mode)
8130 && mode_width <= HOST_BITS_PER_WIDE_INT
8131 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8133 new_rtx = gen_rtx_ASHIFTRT (mode,
8134 make_compound_operation (XEXP (x, 0),
8135 next_code),
8136 XEXP (x, 1));
8137 break;
8140 /* fall through */
8142 case ASHIFTRT:
8143 lhs = XEXP (x, 0);
8144 rhs = XEXP (x, 1);
8146 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8147 this is a SIGN_EXTRACT. */
8148 if (CONST_INT_P (rhs)
8149 && GET_CODE (lhs) == ASHIFT
8150 && CONST_INT_P (XEXP (lhs, 1))
8151 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8152 && INTVAL (XEXP (lhs, 1)) >= 0
8153 && INTVAL (rhs) < mode_width)
8155 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8156 new_rtx = make_extraction (mode, new_rtx,
8157 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8158 NULL_RTX, mode_width - INTVAL (rhs),
8159 code == LSHIFTRT, 0, in_code == COMPARE);
8160 break;
8163 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8164 If so, try to merge the shifts into a SIGN_EXTEND. We could
8165 also do this for some cases of SIGN_EXTRACT, but it doesn't
8166 seem worth the effort; the case checked for occurs on Alpha. */
8168 if (!OBJECT_P (lhs)
8169 && ! (GET_CODE (lhs) == SUBREG
8170 && (OBJECT_P (SUBREG_REG (lhs))))
8171 && CONST_INT_P (rhs)
8172 && INTVAL (rhs) >= 0
8173 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8174 && INTVAL (rhs) < mode_width
8175 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8176 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8177 next_code),
8178 0, NULL_RTX, mode_width - INTVAL (rhs),
8179 code == LSHIFTRT, 0, in_code == COMPARE);
8181 break;
8183 case SUBREG:
8184 /* Call ourselves recursively on the inner expression. If we are
8185 narrowing the object and it has a different RTL code from
8186 what it originally did, do this SUBREG as a force_to_mode. */
8188 rtx inner = SUBREG_REG (x), simplified;
8189 enum rtx_code subreg_code = in_code;
8191 /* If the SUBREG is masking of a logical right shift,
8192 make an extraction. */
8193 if (GET_CODE (inner) == LSHIFTRT
8194 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8195 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8196 && CONST_INT_P (XEXP (inner, 1))
8197 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8198 && subreg_lowpart_p (x))
8200 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8201 int width = GET_MODE_PRECISION (inner_mode)
8202 - INTVAL (XEXP (inner, 1));
8203 if (width > mode_width)
8204 width = mode_width;
8205 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8206 width, 1, 0, in_code == COMPARE);
8207 break;
8210 /* If in_code is COMPARE, it isn't always safe to pass it through
8211 to the recursive make_compound_operation call. */
8212 if (subreg_code == COMPARE
8213 && (!subreg_lowpart_p (x)
8214 || GET_CODE (inner) == SUBREG
8215 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8216 is (const_int 0), rather than
8217 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8218 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8219 for non-equality comparisons against 0 is not equivalent
8220 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8221 || (GET_CODE (inner) == AND
8222 && CONST_INT_P (XEXP (inner, 1))
8223 && partial_subreg_p (x)
8224 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8225 >= GET_MODE_BITSIZE (mode) - 1)))
8226 subreg_code = SET;
8228 tem = make_compound_operation (inner, subreg_code);
8230 simplified
8231 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8232 if (simplified)
8233 tem = simplified;
8235 if (GET_CODE (tem) != GET_CODE (inner)
8236 && partial_subreg_p (x)
8237 && subreg_lowpart_p (x))
8239 rtx newer
8240 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8242 /* If we have something other than a SUBREG, we might have
8243 done an expansion, so rerun ourselves. */
8244 if (GET_CODE (newer) != SUBREG)
8245 newer = make_compound_operation (newer, in_code);
8247 /* force_to_mode can expand compounds. If it just re-expanded
8248 the compound, use gen_lowpart to convert to the desired
8249 mode. */
8250 if (rtx_equal_p (newer, x)
8251 /* Likewise if it re-expanded the compound only partially.
8252 This happens for SUBREG of ZERO_EXTRACT if they extract
8253 the same number of bits. */
8254 || (GET_CODE (newer) == SUBREG
8255 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8256 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8257 && GET_CODE (inner) == AND
8258 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8259 return gen_lowpart (GET_MODE (x), tem);
8261 return newer;
8264 if (simplified)
8265 return tem;
8267 break;
8269 default:
8270 break;
8273 if (new_rtx)
8274 *x_ptr = gen_lowpart (mode, new_rtx);
8275 *next_code_ptr = next_code;
8276 return NULL_RTX;
8279 /* Look at the expression rooted at X. Look for expressions
8280 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8281 Form these expressions.
8283 Return the new rtx, usually just X.
8285 Also, for machines like the VAX that don't have logical shift insns,
8286 try to convert logical to arithmetic shift operations in cases where
8287 they are equivalent. This undoes the canonicalizations to logical
8288 shifts done elsewhere.
8290 We try, as much as possible, to re-use rtl expressions to save memory.
8292 IN_CODE says what kind of expression we are processing. Normally, it is
8293 SET. In a memory address it is MEM. When processing the arguments of
8294 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8295 precisely it is an equality comparison against zero. */
8298 make_compound_operation (rtx x, enum rtx_code in_code)
8300 enum rtx_code code = GET_CODE (x);
8301 const char *fmt;
8302 int i, j;
8303 enum rtx_code next_code;
8304 rtx new_rtx, tem;
8306 /* Select the code to be used in recursive calls. Once we are inside an
8307 address, we stay there. If we have a comparison, set to COMPARE,
8308 but once inside, go back to our default of SET. */
8310 next_code = (code == MEM ? MEM
8311 : ((code == COMPARE || COMPARISON_P (x))
8312 && XEXP (x, 1) == const0_rtx) ? COMPARE
8313 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8315 scalar_int_mode mode;
8316 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8318 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8319 &next_code);
8320 if (new_rtx)
8321 return new_rtx;
8322 code = GET_CODE (x);
8325 /* Now recursively process each operand of this operation. We need to
8326 handle ZERO_EXTEND specially so that we don't lose track of the
8327 inner mode. */
8328 if (code == ZERO_EXTEND)
8330 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8331 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8332 new_rtx, GET_MODE (XEXP (x, 0)));
8333 if (tem)
8334 return tem;
8335 SUBST (XEXP (x, 0), new_rtx);
8336 return x;
8339 fmt = GET_RTX_FORMAT (code);
8340 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8341 if (fmt[i] == 'e')
8343 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8344 SUBST (XEXP (x, i), new_rtx);
8346 else if (fmt[i] == 'E')
8347 for (j = 0; j < XVECLEN (x, i); j++)
8349 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8350 SUBST (XVECEXP (x, i, j), new_rtx);
8353 maybe_swap_commutative_operands (x);
8354 return x;
8357 /* Given M see if it is a value that would select a field of bits
8358 within an item, but not the entire word. Return -1 if not.
8359 Otherwise, return the starting position of the field, where 0 is the
8360 low-order bit.
8362 *PLEN is set to the length of the field. */
8364 static int
8365 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8367 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8368 int pos = m ? ctz_hwi (m) : -1;
8369 int len = 0;
8371 if (pos >= 0)
8372 /* Now shift off the low-order zero bits and see if we have a
8373 power of two minus 1. */
8374 len = exact_log2 ((m >> pos) + 1);
8376 if (len <= 0)
8377 pos = -1;
8379 *plen = len;
8380 return pos;
8383 /* If X refers to a register that equals REG in value, replace these
8384 references with REG. */
8385 static rtx
8386 canon_reg_for_combine (rtx x, rtx reg)
8388 rtx op0, op1, op2;
8389 const char *fmt;
8390 int i;
8391 bool copied;
8393 enum rtx_code code = GET_CODE (x);
8394 switch (GET_RTX_CLASS (code))
8396 case RTX_UNARY:
8397 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8398 if (op0 != XEXP (x, 0))
8399 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8400 GET_MODE (reg));
8401 break;
8403 case RTX_BIN_ARITH:
8404 case RTX_COMM_ARITH:
8405 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8406 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8407 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8408 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8409 break;
8411 case RTX_COMPARE:
8412 case RTX_COMM_COMPARE:
8413 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8414 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8415 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8416 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8417 GET_MODE (op0), op0, op1);
8418 break;
8420 case RTX_TERNARY:
8421 case RTX_BITFIELD_OPS:
8422 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8423 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8424 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8425 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8426 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8427 GET_MODE (op0), op0, op1, op2);
8428 /* FALLTHRU */
8430 case RTX_OBJ:
8431 if (REG_P (x))
8433 if (rtx_equal_p (get_last_value (reg), x)
8434 || rtx_equal_p (reg, get_last_value (x)))
8435 return reg;
8436 else
8437 break;
8440 /* fall through */
8442 default:
8443 fmt = GET_RTX_FORMAT (code);
8444 copied = false;
8445 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8446 if (fmt[i] == 'e')
8448 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8449 if (op != XEXP (x, i))
8451 if (!copied)
8453 copied = true;
8454 x = copy_rtx (x);
8456 XEXP (x, i) = op;
8459 else if (fmt[i] == 'E')
8461 int j;
8462 for (j = 0; j < XVECLEN (x, i); j++)
8464 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8465 if (op != XVECEXP (x, i, j))
8467 if (!copied)
8469 copied = true;
8470 x = copy_rtx (x);
8472 XVECEXP (x, i, j) = op;
8477 break;
8480 return x;
8483 /* Return X converted to MODE. If the value is already truncated to
8484 MODE we can just return a subreg even though in the general case we
8485 would need an explicit truncation. */
8487 static rtx
8488 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8490 if (!CONST_INT_P (x)
8491 && partial_subreg_p (mode, GET_MODE (x))
8492 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8493 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8495 /* Bit-cast X into an integer mode. */
8496 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8497 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8498 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8499 x, GET_MODE (x));
8502 return gen_lowpart (mode, x);
8505 /* See if X can be simplified knowing that we will only refer to it in
8506 MODE and will only refer to those bits that are nonzero in MASK.
8507 If other bits are being computed or if masking operations are done
8508 that select a superset of the bits in MASK, they can sometimes be
8509 ignored.
8511 Return a possibly simplified expression, but always convert X to
8512 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8514 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8515 are all off in X. This is used when X will be complemented, by either
8516 NOT, NEG, or XOR. */
8518 static rtx
8519 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8520 int just_select)
8522 enum rtx_code code = GET_CODE (x);
8523 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8524 machine_mode op_mode;
8525 unsigned HOST_WIDE_INT nonzero;
8527 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8528 code below will do the wrong thing since the mode of such an
8529 expression is VOIDmode.
8531 Also do nothing if X is a CLOBBER; this can happen if X was
8532 the return value from a call to gen_lowpart. */
8533 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8534 return x;
8536 /* We want to perform the operation in its present mode unless we know
8537 that the operation is valid in MODE, in which case we do the operation
8538 in MODE. */
8539 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8540 && have_insn_for (code, mode))
8541 ? mode : GET_MODE (x));
8543 /* It is not valid to do a right-shift in a narrower mode
8544 than the one it came in with. */
8545 if ((code == LSHIFTRT || code == ASHIFTRT)
8546 && partial_subreg_p (mode, GET_MODE (x)))
8547 op_mode = GET_MODE (x);
8549 /* Truncate MASK to fit OP_MODE. */
8550 if (op_mode)
8551 mask &= GET_MODE_MASK (op_mode);
8553 /* Determine what bits of X are guaranteed to be (non)zero. */
8554 nonzero = nonzero_bits (x, mode);
8556 /* If none of the bits in X are needed, return a zero. */
8557 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8558 x = const0_rtx;
8560 /* If X is a CONST_INT, return a new one. Do this here since the
8561 test below will fail. */
8562 if (CONST_INT_P (x))
8564 if (SCALAR_INT_MODE_P (mode))
8565 return gen_int_mode (INTVAL (x) & mask, mode);
8566 else
8568 x = GEN_INT (INTVAL (x) & mask);
8569 return gen_lowpart_common (mode, x);
8573 /* If X is narrower than MODE and we want all the bits in X's mode, just
8574 get X in the proper mode. */
8575 if (paradoxical_subreg_p (mode, GET_MODE (x))
8576 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8577 return gen_lowpart (mode, x);
8579 /* We can ignore the effect of a SUBREG if it narrows the mode or
8580 if the constant masks to zero all the bits the mode doesn't have. */
8581 if (GET_CODE (x) == SUBREG
8582 && subreg_lowpart_p (x)
8583 && (partial_subreg_p (x)
8584 || (0 == (mask
8585 & GET_MODE_MASK (GET_MODE (x))
8586 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8587 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8589 scalar_int_mode int_mode, xmode;
8590 if (is_a <scalar_int_mode> (mode, &int_mode)
8591 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8592 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8593 integer too. */
8594 return force_int_to_mode (x, int_mode, xmode,
8595 as_a <scalar_int_mode> (op_mode),
8596 mask, just_select);
8598 return gen_lowpart_or_truncate (mode, x);
8601 /* Subroutine of force_to_mode that handles cases in which both X and
8602 the result are scalar integers. MODE is the mode of the result,
8603 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8604 is preferred for simplified versions of X. The other arguments
8605 are as for force_to_mode. */
8607 static rtx
8608 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8609 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8610 int just_select)
8612 enum rtx_code code = GET_CODE (x);
8613 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8614 unsigned HOST_WIDE_INT fuller_mask;
8615 rtx op0, op1, temp;
8617 /* When we have an arithmetic operation, or a shift whose count we
8618 do not know, we need to assume that all bits up to the highest-order
8619 bit in MASK will be needed. This is how we form such a mask. */
8620 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8621 fuller_mask = HOST_WIDE_INT_M1U;
8622 else
8623 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8624 - 1);
8626 switch (code)
8628 case CLOBBER:
8629 /* If X is a (clobber (const_int)), return it since we know we are
8630 generating something that won't match. */
8631 return x;
8633 case SIGN_EXTEND:
8634 case ZERO_EXTEND:
8635 case ZERO_EXTRACT:
8636 case SIGN_EXTRACT:
8637 x = expand_compound_operation (x);
8638 if (GET_CODE (x) != code)
8639 return force_to_mode (x, mode, mask, next_select);
8640 break;
8642 case TRUNCATE:
8643 /* Similarly for a truncate. */
8644 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8646 case AND:
8647 /* If this is an AND with a constant, convert it into an AND
8648 whose constant is the AND of that constant with MASK. If it
8649 remains an AND of MASK, delete it since it is redundant. */
8651 if (CONST_INT_P (XEXP (x, 1)))
8653 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8654 mask & INTVAL (XEXP (x, 1)));
8655 xmode = op_mode;
8657 /* If X is still an AND, see if it is an AND with a mask that
8658 is just some low-order bits. If so, and it is MASK, we don't
8659 need it. */
8661 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8662 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8663 x = XEXP (x, 0);
8665 /* If it remains an AND, try making another AND with the bits
8666 in the mode mask that aren't in MASK turned on. If the
8667 constant in the AND is wide enough, this might make a
8668 cheaper constant. */
8670 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8671 && GET_MODE_MASK (xmode) != mask
8672 && HWI_COMPUTABLE_MODE_P (xmode))
8674 unsigned HOST_WIDE_INT cval
8675 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8676 rtx y;
8678 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8679 gen_int_mode (cval, xmode));
8680 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8681 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8682 x = y;
8685 break;
8688 goto binop;
8690 case PLUS:
8691 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8692 low-order bits (as in an alignment operation) and FOO is already
8693 aligned to that boundary, mask C1 to that boundary as well.
8694 This may eliminate that PLUS and, later, the AND. */
8697 unsigned int width = GET_MODE_PRECISION (mode);
8698 unsigned HOST_WIDE_INT smask = mask;
8700 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8701 number, sign extend it. */
8703 if (width < HOST_BITS_PER_WIDE_INT
8704 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8705 smask |= HOST_WIDE_INT_M1U << width;
8707 if (CONST_INT_P (XEXP (x, 1))
8708 && pow2p_hwi (- smask)
8709 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8710 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8711 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8712 (INTVAL (XEXP (x, 1)) & smask)),
8713 mode, smask, next_select);
8716 /* fall through */
8718 case MULT:
8719 /* Substituting into the operands of a widening MULT is not likely to
8720 create RTL matching a machine insn. */
8721 if (code == MULT
8722 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8723 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8724 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8725 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8726 && REG_P (XEXP (XEXP (x, 0), 0))
8727 && REG_P (XEXP (XEXP (x, 1), 0)))
8728 return gen_lowpart_or_truncate (mode, x);
8730 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8731 most significant bit in MASK since carries from those bits will
8732 affect the bits we are interested in. */
8733 mask = fuller_mask;
8734 goto binop;
8736 case MINUS:
8737 /* If X is (minus C Y) where C's least set bit is larger than any bit
8738 in the mask, then we may replace with (neg Y). */
8739 if (CONST_INT_P (XEXP (x, 0))
8740 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8742 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8743 return force_to_mode (x, mode, mask, next_select);
8746 /* Similarly, if C contains every bit in the fuller_mask, then we may
8747 replace with (not Y). */
8748 if (CONST_INT_P (XEXP (x, 0))
8749 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8751 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8752 return force_to_mode (x, mode, mask, next_select);
8755 mask = fuller_mask;
8756 goto binop;
8758 case IOR:
8759 case XOR:
8760 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8761 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8762 operation which may be a bitfield extraction. Ensure that the
8763 constant we form is not wider than the mode of X. */
8765 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8766 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8767 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8768 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8769 && CONST_INT_P (XEXP (x, 1))
8770 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8771 + floor_log2 (INTVAL (XEXP (x, 1))))
8772 < GET_MODE_PRECISION (xmode))
8773 && (UINTVAL (XEXP (x, 1))
8774 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8776 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8777 << INTVAL (XEXP (XEXP (x, 0), 1)),
8778 xmode);
8779 temp = simplify_gen_binary (GET_CODE (x), xmode,
8780 XEXP (XEXP (x, 0), 0), temp);
8781 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8782 XEXP (XEXP (x, 0), 1));
8783 return force_to_mode (x, mode, mask, next_select);
8786 binop:
8787 /* For most binary operations, just propagate into the operation and
8788 change the mode if we have an operation of that mode. */
8790 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8791 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8793 /* If we ended up truncating both operands, truncate the result of the
8794 operation instead. */
8795 if (GET_CODE (op0) == TRUNCATE
8796 && GET_CODE (op1) == TRUNCATE)
8798 op0 = XEXP (op0, 0);
8799 op1 = XEXP (op1, 0);
8802 op0 = gen_lowpart_or_truncate (op_mode, op0);
8803 op1 = gen_lowpart_or_truncate (op_mode, op1);
8805 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8807 x = simplify_gen_binary (code, op_mode, op0, op1);
8808 xmode = op_mode;
8810 break;
8812 case ASHIFT:
8813 /* For left shifts, do the same, but just for the first operand.
8814 However, we cannot do anything with shifts where we cannot
8815 guarantee that the counts are smaller than the size of the mode
8816 because such a count will have a different meaning in a
8817 wider mode. */
8819 if (! (CONST_INT_P (XEXP (x, 1))
8820 && INTVAL (XEXP (x, 1)) >= 0
8821 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8822 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8823 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8824 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8825 break;
8827 /* If the shift count is a constant and we can do arithmetic in
8828 the mode of the shift, refine which bits we need. Otherwise, use the
8829 conservative form of the mask. */
8830 if (CONST_INT_P (XEXP (x, 1))
8831 && INTVAL (XEXP (x, 1)) >= 0
8832 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8833 && HWI_COMPUTABLE_MODE_P (op_mode))
8834 mask >>= INTVAL (XEXP (x, 1));
8835 else
8836 mask = fuller_mask;
8838 op0 = gen_lowpart_or_truncate (op_mode,
8839 force_to_mode (XEXP (x, 0), op_mode,
8840 mask, next_select));
8842 if (op_mode != xmode || op0 != XEXP (x, 0))
8844 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8845 xmode = op_mode;
8847 break;
8849 case LSHIFTRT:
8850 /* Here we can only do something if the shift count is a constant,
8851 this shift constant is valid for the host, and we can do arithmetic
8852 in OP_MODE. */
8854 if (CONST_INT_P (XEXP (x, 1))
8855 && INTVAL (XEXP (x, 1)) >= 0
8856 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8857 && HWI_COMPUTABLE_MODE_P (op_mode))
8859 rtx inner = XEXP (x, 0);
8860 unsigned HOST_WIDE_INT inner_mask;
8862 /* Select the mask of the bits we need for the shift operand. */
8863 inner_mask = mask << INTVAL (XEXP (x, 1));
8865 /* We can only change the mode of the shift if we can do arithmetic
8866 in the mode of the shift and INNER_MASK is no wider than the
8867 width of X's mode. */
8868 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8869 op_mode = xmode;
8871 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8873 if (xmode != op_mode || inner != XEXP (x, 0))
8875 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8876 xmode = op_mode;
8880 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8881 shift and AND produces only copies of the sign bit (C2 is one less
8882 than a power of two), we can do this with just a shift. */
8884 if (GET_CODE (x) == LSHIFTRT
8885 && CONST_INT_P (XEXP (x, 1))
8886 /* The shift puts one of the sign bit copies in the least significant
8887 bit. */
8888 && ((INTVAL (XEXP (x, 1))
8889 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8890 >= GET_MODE_PRECISION (xmode))
8891 && pow2p_hwi (mask + 1)
8892 /* Number of bits left after the shift must be more than the mask
8893 needs. */
8894 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8895 <= GET_MODE_PRECISION (xmode))
8896 /* Must be more sign bit copies than the mask needs. */
8897 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8898 >= exact_log2 (mask + 1)))
8899 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8900 GEN_INT (GET_MODE_PRECISION (xmode)
8901 - exact_log2 (mask + 1)));
8903 goto shiftrt;
8905 case ASHIFTRT:
8906 /* If we are just looking for the sign bit, we don't need this shift at
8907 all, even if it has a variable count. */
8908 if (val_signbit_p (xmode, mask))
8909 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8911 /* If this is a shift by a constant, get a mask that contains those bits
8912 that are not copies of the sign bit. We then have two cases: If
8913 MASK only includes those bits, this can be a logical shift, which may
8914 allow simplifications. If MASK is a single-bit field not within
8915 those bits, we are requesting a copy of the sign bit and hence can
8916 shift the sign bit to the appropriate location. */
8918 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8919 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8921 unsigned HOST_WIDE_INT nonzero;
8922 int i;
8924 /* If the considered data is wider than HOST_WIDE_INT, we can't
8925 represent a mask for all its bits in a single scalar.
8926 But we only care about the lower bits, so calculate these. */
8928 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8930 nonzero = HOST_WIDE_INT_M1U;
8932 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8933 is the number of bits a full-width mask would have set.
8934 We need only shift if these are fewer than nonzero can
8935 hold. If not, we must keep all bits set in nonzero. */
8937 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8938 < HOST_BITS_PER_WIDE_INT)
8939 nonzero >>= INTVAL (XEXP (x, 1))
8940 + HOST_BITS_PER_WIDE_INT
8941 - GET_MODE_PRECISION (xmode);
8943 else
8945 nonzero = GET_MODE_MASK (xmode);
8946 nonzero >>= INTVAL (XEXP (x, 1));
8949 if ((mask & ~nonzero) == 0)
8951 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8952 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8953 if (GET_CODE (x) != ASHIFTRT)
8954 return force_to_mode (x, mode, mask, next_select);
8957 else if ((i = exact_log2 (mask)) >= 0)
8959 x = simplify_shift_const
8960 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8961 GET_MODE_PRECISION (xmode) - 1 - i);
8963 if (GET_CODE (x) != ASHIFTRT)
8964 return force_to_mode (x, mode, mask, next_select);
8968 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8969 even if the shift count isn't a constant. */
8970 if (mask == 1)
8971 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
8973 shiftrt:
8975 /* If this is a zero- or sign-extension operation that just affects bits
8976 we don't care about, remove it. Be sure the call above returned
8977 something that is still a shift. */
8979 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8980 && CONST_INT_P (XEXP (x, 1))
8981 && INTVAL (XEXP (x, 1)) >= 0
8982 && (INTVAL (XEXP (x, 1))
8983 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
8984 && GET_CODE (XEXP (x, 0)) == ASHIFT
8985 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8986 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8987 next_select);
8989 break;
8991 case ROTATE:
8992 case ROTATERT:
8993 /* If the shift count is constant and we can do computations
8994 in the mode of X, compute where the bits we care about are.
8995 Otherwise, we can't do anything. Don't change the mode of
8996 the shift or propagate MODE into the shift, though. */
8997 if (CONST_INT_P (XEXP (x, 1))
8998 && INTVAL (XEXP (x, 1)) >= 0)
9000 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9001 xmode, gen_int_mode (mask, xmode),
9002 XEXP (x, 1));
9003 if (temp && CONST_INT_P (temp))
9004 x = simplify_gen_binary (code, xmode,
9005 force_to_mode (XEXP (x, 0), xmode,
9006 INTVAL (temp), next_select),
9007 XEXP (x, 1));
9009 break;
9011 case NEG:
9012 /* If we just want the low-order bit, the NEG isn't needed since it
9013 won't change the low-order bit. */
9014 if (mask == 1)
9015 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9017 /* We need any bits less significant than the most significant bit in
9018 MASK since carries from those bits will affect the bits we are
9019 interested in. */
9020 mask = fuller_mask;
9021 goto unop;
9023 case NOT:
9024 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9025 same as the XOR case above. Ensure that the constant we form is not
9026 wider than the mode of X. */
9028 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9029 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9030 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9031 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9032 < GET_MODE_PRECISION (xmode))
9033 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9035 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9036 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9037 x = simplify_gen_binary (LSHIFTRT, xmode,
9038 temp, XEXP (XEXP (x, 0), 1));
9040 return force_to_mode (x, mode, mask, next_select);
9043 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9044 use the full mask inside the NOT. */
9045 mask = fuller_mask;
9047 unop:
9048 op0 = gen_lowpart_or_truncate (op_mode,
9049 force_to_mode (XEXP (x, 0), mode, mask,
9050 next_select));
9051 if (op_mode != xmode || op0 != XEXP (x, 0))
9053 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9054 xmode = op_mode;
9056 break;
9058 case NE:
9059 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9060 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9061 which is equal to STORE_FLAG_VALUE. */
9062 if ((mask & ~STORE_FLAG_VALUE) == 0
9063 && XEXP (x, 1) == const0_rtx
9064 && GET_MODE (XEXP (x, 0)) == mode
9065 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9066 && (nonzero_bits (XEXP (x, 0), mode)
9067 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9068 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9070 break;
9072 case IF_THEN_ELSE:
9073 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9074 written in a narrower mode. We play it safe and do not do so. */
9076 op0 = gen_lowpart_or_truncate (xmode,
9077 force_to_mode (XEXP (x, 1), mode,
9078 mask, next_select));
9079 op1 = gen_lowpart_or_truncate (xmode,
9080 force_to_mode (XEXP (x, 2), mode,
9081 mask, next_select));
9082 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9083 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9084 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9085 op0, op1);
9086 break;
9088 default:
9089 break;
9092 /* Ensure we return a value of the proper mode. */
9093 return gen_lowpart_or_truncate (mode, x);
9096 /* Return nonzero if X is an expression that has one of two values depending on
9097 whether some other value is zero or nonzero. In that case, we return the
9098 value that is being tested, *PTRUE is set to the value if the rtx being
9099 returned has a nonzero value, and *PFALSE is set to the other alternative.
9101 If we return zero, we set *PTRUE and *PFALSE to X. */
9103 static rtx
9104 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9106 machine_mode mode = GET_MODE (x);
9107 enum rtx_code code = GET_CODE (x);
9108 rtx cond0, cond1, true0, true1, false0, false1;
9109 unsigned HOST_WIDE_INT nz;
9110 scalar_int_mode int_mode;
9112 /* If we are comparing a value against zero, we are done. */
9113 if ((code == NE || code == EQ)
9114 && XEXP (x, 1) == const0_rtx)
9116 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9117 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9118 return XEXP (x, 0);
9121 /* If this is a unary operation whose operand has one of two values, apply
9122 our opcode to compute those values. */
9123 else if (UNARY_P (x)
9124 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9126 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9127 *pfalse = simplify_gen_unary (code, mode, false0,
9128 GET_MODE (XEXP (x, 0)));
9129 return cond0;
9132 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9133 make can't possibly match and would suppress other optimizations. */
9134 else if (code == COMPARE)
9137 /* If this is a binary operation, see if either side has only one of two
9138 values. If either one does or if both do and they are conditional on
9139 the same value, compute the new true and false values. */
9140 else if (BINARY_P (x))
9142 rtx op0 = XEXP (x, 0);
9143 rtx op1 = XEXP (x, 1);
9144 cond0 = if_then_else_cond (op0, &true0, &false0);
9145 cond1 = if_then_else_cond (op1, &true1, &false1);
9147 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9148 && (REG_P (op0) || REG_P (op1)))
9150 /* Try to enable a simplification by undoing work done by
9151 if_then_else_cond if it converted a REG into something more
9152 complex. */
9153 if (REG_P (op0))
9155 cond0 = 0;
9156 true0 = false0 = op0;
9158 else
9160 cond1 = 0;
9161 true1 = false1 = op1;
9165 if ((cond0 != 0 || cond1 != 0)
9166 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9168 /* If if_then_else_cond returned zero, then true/false are the
9169 same rtl. We must copy one of them to prevent invalid rtl
9170 sharing. */
9171 if (cond0 == 0)
9172 true0 = copy_rtx (true0);
9173 else if (cond1 == 0)
9174 true1 = copy_rtx (true1);
9176 if (COMPARISON_P (x))
9178 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9179 true0, true1);
9180 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9181 false0, false1);
9183 else
9185 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9186 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9189 return cond0 ? cond0 : cond1;
9192 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9193 operands is zero when the other is nonzero, and vice-versa,
9194 and STORE_FLAG_VALUE is 1 or -1. */
9196 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9197 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9198 || code == UMAX)
9199 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9201 rtx op0 = XEXP (XEXP (x, 0), 1);
9202 rtx op1 = XEXP (XEXP (x, 1), 1);
9204 cond0 = XEXP (XEXP (x, 0), 0);
9205 cond1 = XEXP (XEXP (x, 1), 0);
9207 if (COMPARISON_P (cond0)
9208 && COMPARISON_P (cond1)
9209 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9210 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9211 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9212 || ((swap_condition (GET_CODE (cond0))
9213 == reversed_comparison_code (cond1, NULL))
9214 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9215 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9216 && ! side_effects_p (x))
9218 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9219 *pfalse = simplify_gen_binary (MULT, mode,
9220 (code == MINUS
9221 ? simplify_gen_unary (NEG, mode,
9222 op1, mode)
9223 : op1),
9224 const_true_rtx);
9225 return cond0;
9229 /* Similarly for MULT, AND and UMIN, except that for these the result
9230 is always zero. */
9231 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9232 && (code == MULT || code == AND || code == UMIN)
9233 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9235 cond0 = XEXP (XEXP (x, 0), 0);
9236 cond1 = XEXP (XEXP (x, 1), 0);
9238 if (COMPARISON_P (cond0)
9239 && COMPARISON_P (cond1)
9240 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9241 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9242 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9243 || ((swap_condition (GET_CODE (cond0))
9244 == reversed_comparison_code (cond1, NULL))
9245 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9246 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9247 && ! side_effects_p (x))
9249 *ptrue = *pfalse = const0_rtx;
9250 return cond0;
9255 else if (code == IF_THEN_ELSE)
9257 /* If we have IF_THEN_ELSE already, extract the condition and
9258 canonicalize it if it is NE or EQ. */
9259 cond0 = XEXP (x, 0);
9260 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9261 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9262 return XEXP (cond0, 0);
9263 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9265 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9266 return XEXP (cond0, 0);
9268 else
9269 return cond0;
9272 /* If X is a SUBREG, we can narrow both the true and false values
9273 if the inner expression, if there is a condition. */
9274 else if (code == SUBREG
9275 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9276 &true0, &false0)))
9278 true0 = simplify_gen_subreg (mode, true0,
9279 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9280 false0 = simplify_gen_subreg (mode, false0,
9281 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9282 if (true0 && false0)
9284 *ptrue = true0;
9285 *pfalse = false0;
9286 return cond0;
9290 /* If X is a constant, this isn't special and will cause confusions
9291 if we treat it as such. Likewise if it is equivalent to a constant. */
9292 else if (CONSTANT_P (x)
9293 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9296 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9297 will be least confusing to the rest of the compiler. */
9298 else if (mode == BImode)
9300 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9301 return x;
9304 /* If X is known to be either 0 or -1, those are the true and
9305 false values when testing X. */
9306 else if (x == constm1_rtx || x == const0_rtx
9307 || (is_a <scalar_int_mode> (mode, &int_mode)
9308 && (num_sign_bit_copies (x, int_mode)
9309 == GET_MODE_PRECISION (int_mode))))
9311 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9312 return x;
9315 /* Likewise for 0 or a single bit. */
9316 else if (HWI_COMPUTABLE_MODE_P (mode)
9317 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9319 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9320 return x;
9323 /* Otherwise fail; show no condition with true and false values the same. */
9324 *ptrue = *pfalse = x;
9325 return 0;
9328 /* Return the value of expression X given the fact that condition COND
9329 is known to be true when applied to REG as its first operand and VAL
9330 as its second. X is known to not be shared and so can be modified in
9331 place.
9333 We only handle the simplest cases, and specifically those cases that
9334 arise with IF_THEN_ELSE expressions. */
9336 static rtx
9337 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9339 enum rtx_code code = GET_CODE (x);
9340 const char *fmt;
9341 int i, j;
9343 if (side_effects_p (x))
9344 return x;
9346 /* If either operand of the condition is a floating point value,
9347 then we have to avoid collapsing an EQ comparison. */
9348 if (cond == EQ
9349 && rtx_equal_p (x, reg)
9350 && ! FLOAT_MODE_P (GET_MODE (x))
9351 && ! FLOAT_MODE_P (GET_MODE (val)))
9352 return val;
9354 if (cond == UNEQ && rtx_equal_p (x, reg))
9355 return val;
9357 /* If X is (abs REG) and we know something about REG's relationship
9358 with zero, we may be able to simplify this. */
9360 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9361 switch (cond)
9363 case GE: case GT: case EQ:
9364 return XEXP (x, 0);
9365 case LT: case LE:
9366 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9367 XEXP (x, 0),
9368 GET_MODE (XEXP (x, 0)));
9369 default:
9370 break;
9373 /* The only other cases we handle are MIN, MAX, and comparisons if the
9374 operands are the same as REG and VAL. */
9376 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9378 if (rtx_equal_p (XEXP (x, 0), val))
9380 std::swap (val, reg);
9381 cond = swap_condition (cond);
9384 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9386 if (COMPARISON_P (x))
9388 if (comparison_dominates_p (cond, code))
9389 return const_true_rtx;
9391 code = reversed_comparison_code (x, NULL);
9392 if (code != UNKNOWN
9393 && comparison_dominates_p (cond, code))
9394 return const0_rtx;
9395 else
9396 return x;
9398 else if (code == SMAX || code == SMIN
9399 || code == UMIN || code == UMAX)
9401 int unsignedp = (code == UMIN || code == UMAX);
9403 /* Do not reverse the condition when it is NE or EQ.
9404 This is because we cannot conclude anything about
9405 the value of 'SMAX (x, y)' when x is not equal to y,
9406 but we can when x equals y. */
9407 if ((code == SMAX || code == UMAX)
9408 && ! (cond == EQ || cond == NE))
9409 cond = reverse_condition (cond);
9411 switch (cond)
9413 case GE: case GT:
9414 return unsignedp ? x : XEXP (x, 1);
9415 case LE: case LT:
9416 return unsignedp ? x : XEXP (x, 0);
9417 case GEU: case GTU:
9418 return unsignedp ? XEXP (x, 1) : x;
9419 case LEU: case LTU:
9420 return unsignedp ? XEXP (x, 0) : x;
9421 default:
9422 break;
9427 else if (code == SUBREG)
9429 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9430 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9432 if (SUBREG_REG (x) != r)
9434 /* We must simplify subreg here, before we lose track of the
9435 original inner_mode. */
9436 new_rtx = simplify_subreg (GET_MODE (x), r,
9437 inner_mode, SUBREG_BYTE (x));
9438 if (new_rtx)
9439 return new_rtx;
9440 else
9441 SUBST (SUBREG_REG (x), r);
9444 return x;
9446 /* We don't have to handle SIGN_EXTEND here, because even in the
9447 case of replacing something with a modeless CONST_INT, a
9448 CONST_INT is already (supposed to be) a valid sign extension for
9449 its narrower mode, which implies it's already properly
9450 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9451 story is different. */
9452 else if (code == ZERO_EXTEND)
9454 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9455 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9457 if (XEXP (x, 0) != r)
9459 /* We must simplify the zero_extend here, before we lose
9460 track of the original inner_mode. */
9461 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9462 r, inner_mode);
9463 if (new_rtx)
9464 return new_rtx;
9465 else
9466 SUBST (XEXP (x, 0), r);
9469 return x;
9472 fmt = GET_RTX_FORMAT (code);
9473 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9475 if (fmt[i] == 'e')
9476 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9477 else if (fmt[i] == 'E')
9478 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9479 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9480 cond, reg, val));
9483 return x;
9486 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9487 assignment as a field assignment. */
9489 static int
9490 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9492 if (widen_x && GET_MODE (x) != GET_MODE (y))
9494 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9495 return 0;
9496 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9497 return 0;
9498 /* For big endian, adjust the memory offset. */
9499 if (BYTES_BIG_ENDIAN)
9500 x = adjust_address_nv (x, GET_MODE (y),
9501 -subreg_lowpart_offset (GET_MODE (x),
9502 GET_MODE (y)));
9503 else
9504 x = adjust_address_nv (x, GET_MODE (y), 0);
9507 if (x == y || rtx_equal_p (x, y))
9508 return 1;
9510 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9511 return 0;
9513 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9514 Note that all SUBREGs of MEM are paradoxical; otherwise they
9515 would have been rewritten. */
9516 if (MEM_P (x) && GET_CODE (y) == SUBREG
9517 && MEM_P (SUBREG_REG (y))
9518 && rtx_equal_p (SUBREG_REG (y),
9519 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9520 return 1;
9522 if (MEM_P (y) && GET_CODE (x) == SUBREG
9523 && MEM_P (SUBREG_REG (x))
9524 && rtx_equal_p (SUBREG_REG (x),
9525 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9526 return 1;
9528 /* We used to see if get_last_value of X and Y were the same but that's
9529 not correct. In one direction, we'll cause the assignment to have
9530 the wrong destination and in the case, we'll import a register into this
9531 insn that might have already have been dead. So fail if none of the
9532 above cases are true. */
9533 return 0;
9536 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9537 Return that assignment if so.
9539 We only handle the most common cases. */
9541 static rtx
9542 make_field_assignment (rtx x)
9544 rtx dest = SET_DEST (x);
9545 rtx src = SET_SRC (x);
9546 rtx assign;
9547 rtx rhs, lhs;
9548 HOST_WIDE_INT c1;
9549 HOST_WIDE_INT pos;
9550 unsigned HOST_WIDE_INT len;
9551 rtx other;
9553 /* All the rules in this function are specific to scalar integers. */
9554 scalar_int_mode mode;
9555 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9556 return x;
9558 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9559 a clear of a one-bit field. We will have changed it to
9560 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9561 for a SUBREG. */
9563 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9564 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9565 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9566 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9568 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9569 1, 1, 1, 0);
9570 if (assign != 0)
9571 return gen_rtx_SET (assign, const0_rtx);
9572 return x;
9575 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9576 && subreg_lowpart_p (XEXP (src, 0))
9577 && partial_subreg_p (XEXP (src, 0))
9578 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9579 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9580 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9581 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9583 assign = make_extraction (VOIDmode, dest, 0,
9584 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9585 1, 1, 1, 0);
9586 if (assign != 0)
9587 return gen_rtx_SET (assign, const0_rtx);
9588 return x;
9591 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9592 one-bit field. */
9593 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9594 && XEXP (XEXP (src, 0), 0) == const1_rtx
9595 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9597 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9598 1, 1, 1, 0);
9599 if (assign != 0)
9600 return gen_rtx_SET (assign, const1_rtx);
9601 return x;
9604 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9605 SRC is an AND with all bits of that field set, then we can discard
9606 the AND. */
9607 if (GET_CODE (dest) == ZERO_EXTRACT
9608 && CONST_INT_P (XEXP (dest, 1))
9609 && GET_CODE (src) == AND
9610 && CONST_INT_P (XEXP (src, 1)))
9612 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9613 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9614 unsigned HOST_WIDE_INT ze_mask;
9616 if (width >= HOST_BITS_PER_WIDE_INT)
9617 ze_mask = -1;
9618 else
9619 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9621 /* Complete overlap. We can remove the source AND. */
9622 if ((and_mask & ze_mask) == ze_mask)
9623 return gen_rtx_SET (dest, XEXP (src, 0));
9625 /* Partial overlap. We can reduce the source AND. */
9626 if ((and_mask & ze_mask) != and_mask)
9628 src = gen_rtx_AND (mode, XEXP (src, 0),
9629 gen_int_mode (and_mask & ze_mask, mode));
9630 return gen_rtx_SET (dest, src);
9634 /* The other case we handle is assignments into a constant-position
9635 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9636 a mask that has all one bits except for a group of zero bits and
9637 OTHER is known to have zeros where C1 has ones, this is such an
9638 assignment. Compute the position and length from C1. Shift OTHER
9639 to the appropriate position, force it to the required mode, and
9640 make the extraction. Check for the AND in both operands. */
9642 /* One or more SUBREGs might obscure the constant-position field
9643 assignment. The first one we are likely to encounter is an outer
9644 narrowing SUBREG, which we can just strip for the purposes of
9645 identifying the constant-field assignment. */
9646 scalar_int_mode src_mode = mode;
9647 if (GET_CODE (src) == SUBREG
9648 && subreg_lowpart_p (src)
9649 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9650 src = SUBREG_REG (src);
9652 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9653 return x;
9655 rhs = expand_compound_operation (XEXP (src, 0));
9656 lhs = expand_compound_operation (XEXP (src, 1));
9658 if (GET_CODE (rhs) == AND
9659 && CONST_INT_P (XEXP (rhs, 1))
9660 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9661 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9662 /* The second SUBREG that might get in the way is a paradoxical
9663 SUBREG around the first operand of the AND. We want to
9664 pretend the operand is as wide as the destination here. We
9665 do this by adjusting the MEM to wider mode for the sole
9666 purpose of the call to rtx_equal_for_field_assignment_p. Also
9667 note this trick only works for MEMs. */
9668 else if (GET_CODE (rhs) == AND
9669 && paradoxical_subreg_p (XEXP (rhs, 0))
9670 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9671 && CONST_INT_P (XEXP (rhs, 1))
9672 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9673 dest, true))
9674 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9675 else if (GET_CODE (lhs) == AND
9676 && CONST_INT_P (XEXP (lhs, 1))
9677 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9678 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9679 /* The second SUBREG that might get in the way is a paradoxical
9680 SUBREG around the first operand of the AND. We want to
9681 pretend the operand is as wide as the destination here. We
9682 do this by adjusting the MEM to wider mode for the sole
9683 purpose of the call to rtx_equal_for_field_assignment_p. Also
9684 note this trick only works for MEMs. */
9685 else if (GET_CODE (lhs) == AND
9686 && paradoxical_subreg_p (XEXP (lhs, 0))
9687 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9688 && CONST_INT_P (XEXP (lhs, 1))
9689 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9690 dest, true))
9691 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9692 else
9693 return x;
9695 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9696 if (pos < 0
9697 || pos + len > GET_MODE_PRECISION (mode)
9698 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9699 || (c1 & nonzero_bits (other, mode)) != 0)
9700 return x;
9702 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9703 if (assign == 0)
9704 return x;
9706 /* The mode to use for the source is the mode of the assignment, or of
9707 what is inside a possible STRICT_LOW_PART. */
9708 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9709 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9711 /* Shift OTHER right POS places and make it the source, restricting it
9712 to the proper length and mode. */
9714 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9715 src_mode, other, pos),
9716 dest);
9717 src = force_to_mode (src, new_mode,
9718 len >= HOST_BITS_PER_WIDE_INT
9719 ? HOST_WIDE_INT_M1U
9720 : (HOST_WIDE_INT_1U << len) - 1,
9723 /* If SRC is masked by an AND that does not make a difference in
9724 the value being stored, strip it. */
9725 if (GET_CODE (assign) == ZERO_EXTRACT
9726 && CONST_INT_P (XEXP (assign, 1))
9727 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9728 && GET_CODE (src) == AND
9729 && CONST_INT_P (XEXP (src, 1))
9730 && UINTVAL (XEXP (src, 1))
9731 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9732 src = XEXP (src, 0);
9734 return gen_rtx_SET (assign, src);
9737 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9738 if so. */
9740 static rtx
9741 apply_distributive_law (rtx x)
9743 enum rtx_code code = GET_CODE (x);
9744 enum rtx_code inner_code;
9745 rtx lhs, rhs, other;
9746 rtx tem;
9748 /* Distributivity is not true for floating point as it can change the
9749 value. So we don't do it unless -funsafe-math-optimizations. */
9750 if (FLOAT_MODE_P (GET_MODE (x))
9751 && ! flag_unsafe_math_optimizations)
9752 return x;
9754 /* The outer operation can only be one of the following: */
9755 if (code != IOR && code != AND && code != XOR
9756 && code != PLUS && code != MINUS)
9757 return x;
9759 lhs = XEXP (x, 0);
9760 rhs = XEXP (x, 1);
9762 /* If either operand is a primitive we can't do anything, so get out
9763 fast. */
9764 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9765 return x;
9767 lhs = expand_compound_operation (lhs);
9768 rhs = expand_compound_operation (rhs);
9769 inner_code = GET_CODE (lhs);
9770 if (inner_code != GET_CODE (rhs))
9771 return x;
9773 /* See if the inner and outer operations distribute. */
9774 switch (inner_code)
9776 case LSHIFTRT:
9777 case ASHIFTRT:
9778 case AND:
9779 case IOR:
9780 /* These all distribute except over PLUS. */
9781 if (code == PLUS || code == MINUS)
9782 return x;
9783 break;
9785 case MULT:
9786 if (code != PLUS && code != MINUS)
9787 return x;
9788 break;
9790 case ASHIFT:
9791 /* This is also a multiply, so it distributes over everything. */
9792 break;
9794 /* This used to handle SUBREG, but this turned out to be counter-
9795 productive, since (subreg (op ...)) usually is not handled by
9796 insn patterns, and this "optimization" therefore transformed
9797 recognizable patterns into unrecognizable ones. Therefore the
9798 SUBREG case was removed from here.
9800 It is possible that distributing SUBREG over arithmetic operations
9801 leads to an intermediate result than can then be optimized further,
9802 e.g. by moving the outer SUBREG to the other side of a SET as done
9803 in simplify_set. This seems to have been the original intent of
9804 handling SUBREGs here.
9806 However, with current GCC this does not appear to actually happen,
9807 at least on major platforms. If some case is found where removing
9808 the SUBREG case here prevents follow-on optimizations, distributing
9809 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9811 default:
9812 return x;
9815 /* Set LHS and RHS to the inner operands (A and B in the example
9816 above) and set OTHER to the common operand (C in the example).
9817 There is only one way to do this unless the inner operation is
9818 commutative. */
9819 if (COMMUTATIVE_ARITH_P (lhs)
9820 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9821 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9822 else if (COMMUTATIVE_ARITH_P (lhs)
9823 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9824 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9825 else if (COMMUTATIVE_ARITH_P (lhs)
9826 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9827 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9828 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9829 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9830 else
9831 return x;
9833 /* Form the new inner operation, seeing if it simplifies first. */
9834 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9836 /* There is one exception to the general way of distributing:
9837 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9838 if (code == XOR && inner_code == IOR)
9840 inner_code = AND;
9841 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9844 /* We may be able to continuing distributing the result, so call
9845 ourselves recursively on the inner operation before forming the
9846 outer operation, which we return. */
9847 return simplify_gen_binary (inner_code, GET_MODE (x),
9848 apply_distributive_law (tem), other);
9851 /* See if X is of the form (* (+ A B) C), and if so convert to
9852 (+ (* A C) (* B C)) and try to simplify.
9854 Most of the time, this results in no change. However, if some of
9855 the operands are the same or inverses of each other, simplifications
9856 will result.
9858 For example, (and (ior A B) (not B)) can occur as the result of
9859 expanding a bit field assignment. When we apply the distributive
9860 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9861 which then simplifies to (and (A (not B))).
9863 Note that no checks happen on the validity of applying the inverse
9864 distributive law. This is pointless since we can do it in the
9865 few places where this routine is called.
9867 N is the index of the term that is decomposed (the arithmetic operation,
9868 i.e. (+ A B) in the first example above). !N is the index of the term that
9869 is distributed, i.e. of C in the first example above. */
9870 static rtx
9871 distribute_and_simplify_rtx (rtx x, int n)
9873 machine_mode mode;
9874 enum rtx_code outer_code, inner_code;
9875 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9877 /* Distributivity is not true for floating point as it can change the
9878 value. So we don't do it unless -funsafe-math-optimizations. */
9879 if (FLOAT_MODE_P (GET_MODE (x))
9880 && ! flag_unsafe_math_optimizations)
9881 return NULL_RTX;
9883 decomposed = XEXP (x, n);
9884 if (!ARITHMETIC_P (decomposed))
9885 return NULL_RTX;
9887 mode = GET_MODE (x);
9888 outer_code = GET_CODE (x);
9889 distributed = XEXP (x, !n);
9891 inner_code = GET_CODE (decomposed);
9892 inner_op0 = XEXP (decomposed, 0);
9893 inner_op1 = XEXP (decomposed, 1);
9895 /* Special case (and (xor B C) (not A)), which is equivalent to
9896 (xor (ior A B) (ior A C)) */
9897 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9899 distributed = XEXP (distributed, 0);
9900 outer_code = IOR;
9903 if (n == 0)
9905 /* Distribute the second term. */
9906 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9907 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9909 else
9911 /* Distribute the first term. */
9912 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9913 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9916 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9917 new_op0, new_op1));
9918 if (GET_CODE (tmp) != outer_code
9919 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9920 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9921 return tmp;
9923 return NULL_RTX;
9926 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9927 in MODE. Return an equivalent form, if different from (and VAROP
9928 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9930 static rtx
9931 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9932 unsigned HOST_WIDE_INT constop)
9934 unsigned HOST_WIDE_INT nonzero;
9935 unsigned HOST_WIDE_INT orig_constop;
9936 rtx orig_varop;
9937 int i;
9939 orig_varop = varop;
9940 orig_constop = constop;
9941 if (GET_CODE (varop) == CLOBBER)
9942 return NULL_RTX;
9944 /* Simplify VAROP knowing that we will be only looking at some of the
9945 bits in it.
9947 Note by passing in CONSTOP, we guarantee that the bits not set in
9948 CONSTOP are not significant and will never be examined. We must
9949 ensure that is the case by explicitly masking out those bits
9950 before returning. */
9951 varop = force_to_mode (varop, mode, constop, 0);
9953 /* If VAROP is a CLOBBER, we will fail so return it. */
9954 if (GET_CODE (varop) == CLOBBER)
9955 return varop;
9957 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9958 to VAROP and return the new constant. */
9959 if (CONST_INT_P (varop))
9960 return gen_int_mode (INTVAL (varop) & constop, mode);
9962 /* See what bits may be nonzero in VAROP. Unlike the general case of
9963 a call to nonzero_bits, here we don't care about bits outside
9964 MODE. */
9966 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9968 /* Turn off all bits in the constant that are known to already be zero.
9969 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9970 which is tested below. */
9972 constop &= nonzero;
9974 /* If we don't have any bits left, return zero. */
9975 if (constop == 0)
9976 return const0_rtx;
9978 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9979 a power of two, we can replace this with an ASHIFT. */
9980 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9981 && (i = exact_log2 (constop)) >= 0)
9982 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9984 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9985 or XOR, then try to apply the distributive law. This may eliminate
9986 operations if either branch can be simplified because of the AND.
9987 It may also make some cases more complex, but those cases probably
9988 won't match a pattern either with or without this. */
9990 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9992 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
9993 return
9994 gen_lowpart
9995 (mode,
9996 apply_distributive_law
9997 (simplify_gen_binary (GET_CODE (varop), varop_mode,
9998 simplify_and_const_int (NULL_RTX, varop_mode,
9999 XEXP (varop, 0),
10000 constop),
10001 simplify_and_const_int (NULL_RTX, varop_mode,
10002 XEXP (varop, 1),
10003 constop))));
10006 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10007 the AND and see if one of the operands simplifies to zero. If so, we
10008 may eliminate it. */
10010 if (GET_CODE (varop) == PLUS
10011 && pow2p_hwi (constop + 1))
10013 rtx o0, o1;
10015 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10016 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10017 if (o0 == const0_rtx)
10018 return o1;
10019 if (o1 == const0_rtx)
10020 return o0;
10023 /* Make a SUBREG if necessary. If we can't make it, fail. */
10024 varop = gen_lowpart (mode, varop);
10025 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10026 return NULL_RTX;
10028 /* If we are only masking insignificant bits, return VAROP. */
10029 if (constop == nonzero)
10030 return varop;
10032 if (varop == orig_varop && constop == orig_constop)
10033 return NULL_RTX;
10035 /* Otherwise, return an AND. */
10036 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10040 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10041 in MODE.
10043 Return an equivalent form, if different from X. Otherwise, return X. If
10044 X is zero, we are to always construct the equivalent form. */
10046 static rtx
10047 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10048 unsigned HOST_WIDE_INT constop)
10050 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10051 if (tem)
10052 return tem;
10054 if (!x)
10055 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10056 gen_int_mode (constop, mode));
10057 if (GET_MODE (x) != mode)
10058 x = gen_lowpart (mode, x);
10059 return x;
10062 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10063 We don't care about bits outside of those defined in MODE.
10065 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10066 a shift, AND, or zero_extract, we can do better. */
10068 static rtx
10069 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10070 scalar_int_mode mode,
10071 unsigned HOST_WIDE_INT *nonzero)
10073 rtx tem;
10074 reg_stat_type *rsp;
10076 /* If X is a register whose nonzero bits value is current, use it.
10077 Otherwise, if X is a register whose value we can find, use that
10078 value. Otherwise, use the previously-computed global nonzero bits
10079 for this register. */
10081 rsp = &reg_stat[REGNO (x)];
10082 if (rsp->last_set_value != 0
10083 && (rsp->last_set_mode == mode
10084 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10085 && GET_MODE_CLASS (mode) == MODE_INT))
10086 && ((rsp->last_set_label >= label_tick_ebb_start
10087 && rsp->last_set_label < label_tick)
10088 || (rsp->last_set_label == label_tick
10089 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10090 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10091 && REGNO (x) < reg_n_sets_max
10092 && REG_N_SETS (REGNO (x)) == 1
10093 && !REGNO_REG_SET_P
10094 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10095 REGNO (x)))))
10097 /* Note that, even if the precision of last_set_mode is lower than that
10098 of mode, record_value_for_reg invoked nonzero_bits on the register
10099 with nonzero_bits_mode (because last_set_mode is necessarily integral
10100 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10101 are all valid, hence in mode too since nonzero_bits_mode is defined
10102 to the largest HWI_COMPUTABLE_MODE_P mode. */
10103 *nonzero &= rsp->last_set_nonzero_bits;
10104 return NULL;
10107 tem = get_last_value (x);
10108 if (tem)
10110 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10111 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10113 return tem;
10116 if (nonzero_sign_valid && rsp->nonzero_bits)
10118 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10120 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10121 /* We don't know anything about the upper bits. */
10122 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10124 *nonzero &= mask;
10127 return NULL;
10130 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10131 end of X that are known to be equal to the sign bit. X will be used
10132 in mode MODE; the returned value will always be between 1 and the
10133 number of bits in MODE. */
10135 static rtx
10136 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10137 scalar_int_mode mode,
10138 unsigned int *result)
10140 rtx tem;
10141 reg_stat_type *rsp;
10143 rsp = &reg_stat[REGNO (x)];
10144 if (rsp->last_set_value != 0
10145 && rsp->last_set_mode == mode
10146 && ((rsp->last_set_label >= label_tick_ebb_start
10147 && rsp->last_set_label < label_tick)
10148 || (rsp->last_set_label == label_tick
10149 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10150 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10151 && REGNO (x) < reg_n_sets_max
10152 && REG_N_SETS (REGNO (x)) == 1
10153 && !REGNO_REG_SET_P
10154 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10155 REGNO (x)))))
10157 *result = rsp->last_set_sign_bit_copies;
10158 return NULL;
10161 tem = get_last_value (x);
10162 if (tem != 0)
10163 return tem;
10165 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10166 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10167 *result = rsp->sign_bit_copies;
10169 return NULL;
10172 /* Return the number of "extended" bits there are in X, when interpreted
10173 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10174 unsigned quantities, this is the number of high-order zero bits.
10175 For signed quantities, this is the number of copies of the sign bit
10176 minus 1. In both case, this function returns the number of "spare"
10177 bits. For example, if two quantities for which this function returns
10178 at least 1 are added, the addition is known not to overflow.
10180 This function will always return 0 unless called during combine, which
10181 implies that it must be called from a define_split. */
10183 unsigned int
10184 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10186 if (nonzero_sign_valid == 0)
10187 return 0;
10189 scalar_int_mode int_mode;
10190 return (unsignedp
10191 ? (is_a <scalar_int_mode> (mode, &int_mode)
10192 && HWI_COMPUTABLE_MODE_P (int_mode)
10193 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10194 - floor_log2 (nonzero_bits (x, int_mode)))
10195 : 0)
10196 : num_sign_bit_copies (x, mode) - 1);
10199 /* This function is called from `simplify_shift_const' to merge two
10200 outer operations. Specifically, we have already found that we need
10201 to perform operation *POP0 with constant *PCONST0 at the outermost
10202 position. We would now like to also perform OP1 with constant CONST1
10203 (with *POP0 being done last).
10205 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10206 the resulting operation. *PCOMP_P is set to 1 if we would need to
10207 complement the innermost operand, otherwise it is unchanged.
10209 MODE is the mode in which the operation will be done. No bits outside
10210 the width of this mode matter. It is assumed that the width of this mode
10211 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10213 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10214 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10215 result is simply *PCONST0.
10217 If the resulting operation cannot be expressed as one operation, we
10218 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10220 static int
10221 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10223 enum rtx_code op0 = *pop0;
10224 HOST_WIDE_INT const0 = *pconst0;
10226 const0 &= GET_MODE_MASK (mode);
10227 const1 &= GET_MODE_MASK (mode);
10229 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10230 if (op0 == AND)
10231 const1 &= const0;
10233 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10234 if OP0 is SET. */
10236 if (op1 == UNKNOWN || op0 == SET)
10237 return 1;
10239 else if (op0 == UNKNOWN)
10240 op0 = op1, const0 = const1;
10242 else if (op0 == op1)
10244 switch (op0)
10246 case AND:
10247 const0 &= const1;
10248 break;
10249 case IOR:
10250 const0 |= const1;
10251 break;
10252 case XOR:
10253 const0 ^= const1;
10254 break;
10255 case PLUS:
10256 const0 += const1;
10257 break;
10258 case NEG:
10259 op0 = UNKNOWN;
10260 break;
10261 default:
10262 break;
10266 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10267 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10268 return 0;
10270 /* If the two constants aren't the same, we can't do anything. The
10271 remaining six cases can all be done. */
10272 else if (const0 != const1)
10273 return 0;
10275 else
10276 switch (op0)
10278 case IOR:
10279 if (op1 == AND)
10280 /* (a & b) | b == b */
10281 op0 = SET;
10282 else /* op1 == XOR */
10283 /* (a ^ b) | b == a | b */
10285 break;
10287 case XOR:
10288 if (op1 == AND)
10289 /* (a & b) ^ b == (~a) & b */
10290 op0 = AND, *pcomp_p = 1;
10291 else /* op1 == IOR */
10292 /* (a | b) ^ b == a & ~b */
10293 op0 = AND, const0 = ~const0;
10294 break;
10296 case AND:
10297 if (op1 == IOR)
10298 /* (a | b) & b == b */
10299 op0 = SET;
10300 else /* op1 == XOR */
10301 /* (a ^ b) & b) == (~a) & b */
10302 *pcomp_p = 1;
10303 break;
10304 default:
10305 break;
10308 /* Check for NO-OP cases. */
10309 const0 &= GET_MODE_MASK (mode);
10310 if (const0 == 0
10311 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10312 op0 = UNKNOWN;
10313 else if (const0 == 0 && op0 == AND)
10314 op0 = SET;
10315 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10316 && op0 == AND)
10317 op0 = UNKNOWN;
10319 *pop0 = op0;
10321 /* ??? Slightly redundant with the above mask, but not entirely.
10322 Moving this above means we'd have to sign-extend the mode mask
10323 for the final test. */
10324 if (op0 != UNKNOWN && op0 != NEG)
10325 *pconst0 = trunc_int_for_mode (const0, mode);
10327 return 1;
10330 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10331 the shift in. The original shift operation CODE is performed on OP in
10332 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10333 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10334 result of the shift is subject to operation OUTER_CODE with operand
10335 OUTER_CONST. */
10337 static scalar_int_mode
10338 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10339 scalar_int_mode orig_mode, scalar_int_mode mode,
10340 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10342 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10344 /* In general we can't perform in wider mode for right shift and rotate. */
10345 switch (code)
10347 case ASHIFTRT:
10348 /* We can still widen if the bits brought in from the left are identical
10349 to the sign bit of ORIG_MODE. */
10350 if (num_sign_bit_copies (op, mode)
10351 > (unsigned) (GET_MODE_PRECISION (mode)
10352 - GET_MODE_PRECISION (orig_mode)))
10353 return mode;
10354 return orig_mode;
10356 case LSHIFTRT:
10357 /* Similarly here but with zero bits. */
10358 if (HWI_COMPUTABLE_MODE_P (mode)
10359 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10360 return mode;
10362 /* We can also widen if the bits brought in will be masked off. This
10363 operation is performed in ORIG_MODE. */
10364 if (outer_code == AND)
10366 int care_bits = low_bitmask_len (orig_mode, outer_const);
10368 if (care_bits >= 0
10369 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10370 return mode;
10372 /* fall through */
10374 case ROTATE:
10375 return orig_mode;
10377 case ROTATERT:
10378 gcc_unreachable ();
10380 default:
10381 return mode;
10385 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10386 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10387 if we cannot simplify it. Otherwise, return a simplified value.
10389 The shift is normally computed in the widest mode we find in VAROP, as
10390 long as it isn't a different number of words than RESULT_MODE. Exceptions
10391 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10393 static rtx
10394 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10395 rtx varop, int orig_count)
10397 enum rtx_code orig_code = code;
10398 rtx orig_varop = varop;
10399 int count;
10400 machine_mode mode = result_mode;
10401 machine_mode shift_mode;
10402 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10403 unsigned int mode_words
10404 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10405 /* We form (outer_op (code varop count) (outer_const)). */
10406 enum rtx_code outer_op = UNKNOWN;
10407 HOST_WIDE_INT outer_const = 0;
10408 int complement_p = 0;
10409 rtx new_rtx, x;
10411 /* Make sure and truncate the "natural" shift on the way in. We don't
10412 want to do this inside the loop as it makes it more difficult to
10413 combine shifts. */
10414 if (SHIFT_COUNT_TRUNCATED)
10415 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10417 /* If we were given an invalid count, don't do anything except exactly
10418 what was requested. */
10420 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10421 return NULL_RTX;
10423 count = orig_count;
10425 /* Unless one of the branches of the `if' in this loop does a `continue',
10426 we will `break' the loop after the `if'. */
10428 while (count != 0)
10430 /* If we have an operand of (clobber (const_int 0)), fail. */
10431 if (GET_CODE (varop) == CLOBBER)
10432 return NULL_RTX;
10434 /* Convert ROTATERT to ROTATE. */
10435 if (code == ROTATERT)
10437 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10438 code = ROTATE;
10439 count = bitsize - count;
10442 shift_mode = result_mode;
10443 if (shift_mode != mode)
10445 /* We only change the modes of scalar shifts. */
10446 int_mode = as_a <scalar_int_mode> (mode);
10447 int_result_mode = as_a <scalar_int_mode> (result_mode);
10448 shift_mode = try_widen_shift_mode (code, varop, count,
10449 int_result_mode, int_mode,
10450 outer_op, outer_const);
10453 scalar_int_mode shift_unit_mode
10454 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10456 /* Handle cases where the count is greater than the size of the mode
10457 minus 1. For ASHIFT, use the size minus one as the count (this can
10458 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10459 take the count modulo the size. For other shifts, the result is
10460 zero.
10462 Since these shifts are being produced by the compiler by combining
10463 multiple operations, each of which are defined, we know what the
10464 result is supposed to be. */
10466 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10468 if (code == ASHIFTRT)
10469 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10470 else if (code == ROTATE || code == ROTATERT)
10471 count %= GET_MODE_PRECISION (shift_unit_mode);
10472 else
10474 /* We can't simply return zero because there may be an
10475 outer op. */
10476 varop = const0_rtx;
10477 count = 0;
10478 break;
10482 /* If we discovered we had to complement VAROP, leave. Making a NOT
10483 here would cause an infinite loop. */
10484 if (complement_p)
10485 break;
10487 if (shift_mode == shift_unit_mode)
10489 /* An arithmetic right shift of a quantity known to be -1 or 0
10490 is a no-op. */
10491 if (code == ASHIFTRT
10492 && (num_sign_bit_copies (varop, shift_unit_mode)
10493 == GET_MODE_PRECISION (shift_unit_mode)))
10495 count = 0;
10496 break;
10499 /* If we are doing an arithmetic right shift and discarding all but
10500 the sign bit copies, this is equivalent to doing a shift by the
10501 bitsize minus one. Convert it into that shift because it will
10502 often allow other simplifications. */
10504 if (code == ASHIFTRT
10505 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10506 >= GET_MODE_PRECISION (shift_unit_mode)))
10507 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10509 /* We simplify the tests below and elsewhere by converting
10510 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10511 `make_compound_operation' will convert it to an ASHIFTRT for
10512 those machines (such as VAX) that don't have an LSHIFTRT. */
10513 if (code == ASHIFTRT
10514 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10515 && val_signbit_known_clear_p (shift_unit_mode,
10516 nonzero_bits (varop,
10517 shift_unit_mode)))
10518 code = LSHIFTRT;
10520 if (((code == LSHIFTRT
10521 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10522 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10523 || (code == ASHIFT
10524 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10525 && !((nonzero_bits (varop, shift_unit_mode) << count)
10526 & GET_MODE_MASK (shift_unit_mode))))
10527 && !side_effects_p (varop))
10528 varop = const0_rtx;
10531 switch (GET_CODE (varop))
10533 case SIGN_EXTEND:
10534 case ZERO_EXTEND:
10535 case SIGN_EXTRACT:
10536 case ZERO_EXTRACT:
10537 new_rtx = expand_compound_operation (varop);
10538 if (new_rtx != varop)
10540 varop = new_rtx;
10541 continue;
10543 break;
10545 case MEM:
10546 /* The following rules apply only to scalars. */
10547 if (shift_mode != shift_unit_mode)
10548 break;
10549 int_mode = as_a <scalar_int_mode> (mode);
10551 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10552 minus the width of a smaller mode, we can do this with a
10553 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10554 if ((code == ASHIFTRT || code == LSHIFTRT)
10555 && ! mode_dependent_address_p (XEXP (varop, 0),
10556 MEM_ADDR_SPACE (varop))
10557 && ! MEM_VOLATILE_P (varop)
10558 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10559 .exists (&tmode)))
10561 new_rtx = adjust_address_nv (varop, tmode,
10562 BYTES_BIG_ENDIAN ? 0
10563 : count / BITS_PER_UNIT);
10565 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10566 : ZERO_EXTEND, int_mode, new_rtx);
10567 count = 0;
10568 continue;
10570 break;
10572 case SUBREG:
10573 /* The following rules apply only to scalars. */
10574 if (shift_mode != shift_unit_mode)
10575 break;
10576 int_mode = as_a <scalar_int_mode> (mode);
10577 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10579 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10580 the same number of words as what we've seen so far. Then store
10581 the widest mode in MODE. */
10582 if (subreg_lowpart_p (varop)
10583 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10584 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10585 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10586 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10587 == mode_words
10588 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10590 varop = SUBREG_REG (varop);
10591 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10592 mode = inner_mode;
10593 continue;
10595 break;
10597 case MULT:
10598 /* Some machines use MULT instead of ASHIFT because MULT
10599 is cheaper. But it is still better on those machines to
10600 merge two shifts into one. */
10601 if (CONST_INT_P (XEXP (varop, 1))
10602 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10604 varop
10605 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10606 XEXP (varop, 0),
10607 GEN_INT (exact_log2 (
10608 UINTVAL (XEXP (varop, 1)))));
10609 continue;
10611 break;
10613 case UDIV:
10614 /* Similar, for when divides are cheaper. */
10615 if (CONST_INT_P (XEXP (varop, 1))
10616 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10618 varop
10619 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10620 XEXP (varop, 0),
10621 GEN_INT (exact_log2 (
10622 UINTVAL (XEXP (varop, 1)))));
10623 continue;
10625 break;
10627 case ASHIFTRT:
10628 /* If we are extracting just the sign bit of an arithmetic
10629 right shift, that shift is not needed. However, the sign
10630 bit of a wider mode may be different from what would be
10631 interpreted as the sign bit in a narrower mode, so, if
10632 the result is narrower, don't discard the shift. */
10633 if (code == LSHIFTRT
10634 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10635 && (GET_MODE_UNIT_BITSIZE (result_mode)
10636 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10638 varop = XEXP (varop, 0);
10639 continue;
10642 /* fall through */
10644 case LSHIFTRT:
10645 case ASHIFT:
10646 case ROTATE:
10647 /* The following rules apply only to scalars. */
10648 if (shift_mode != shift_unit_mode)
10649 break;
10650 int_mode = as_a <scalar_int_mode> (mode);
10651 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10652 int_result_mode = as_a <scalar_int_mode> (result_mode);
10654 /* Here we have two nested shifts. The result is usually the
10655 AND of a new shift with a mask. We compute the result below. */
10656 if (CONST_INT_P (XEXP (varop, 1))
10657 && INTVAL (XEXP (varop, 1)) >= 0
10658 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10659 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10660 && HWI_COMPUTABLE_MODE_P (int_mode))
10662 enum rtx_code first_code = GET_CODE (varop);
10663 unsigned int first_count = INTVAL (XEXP (varop, 1));
10664 unsigned HOST_WIDE_INT mask;
10665 rtx mask_rtx;
10667 /* We have one common special case. We can't do any merging if
10668 the inner code is an ASHIFTRT of a smaller mode. However, if
10669 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10670 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10671 we can convert it to
10672 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10673 This simplifies certain SIGN_EXTEND operations. */
10674 if (code == ASHIFT && first_code == ASHIFTRT
10675 && count == (GET_MODE_PRECISION (int_result_mode)
10676 - GET_MODE_PRECISION (int_varop_mode)))
10678 /* C3 has the low-order C1 bits zero. */
10680 mask = GET_MODE_MASK (int_mode)
10681 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10683 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10684 XEXP (varop, 0), mask);
10685 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10686 int_result_mode, varop, count);
10687 count = first_count;
10688 code = ASHIFTRT;
10689 continue;
10692 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10693 than C1 high-order bits equal to the sign bit, we can convert
10694 this to either an ASHIFT or an ASHIFTRT depending on the
10695 two counts.
10697 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10699 if (code == ASHIFTRT && first_code == ASHIFT
10700 && int_varop_mode == shift_unit_mode
10701 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10702 > first_count))
10704 varop = XEXP (varop, 0);
10705 count -= first_count;
10706 if (count < 0)
10708 count = -count;
10709 code = ASHIFT;
10712 continue;
10715 /* There are some cases we can't do. If CODE is ASHIFTRT,
10716 we can only do this if FIRST_CODE is also ASHIFTRT.
10718 We can't do the case when CODE is ROTATE and FIRST_CODE is
10719 ASHIFTRT.
10721 If the mode of this shift is not the mode of the outer shift,
10722 we can't do this if either shift is a right shift or ROTATE.
10724 Finally, we can't do any of these if the mode is too wide
10725 unless the codes are the same.
10727 Handle the case where the shift codes are the same
10728 first. */
10730 if (code == first_code)
10732 if (int_varop_mode != int_result_mode
10733 && (code == ASHIFTRT || code == LSHIFTRT
10734 || code == ROTATE))
10735 break;
10737 count += first_count;
10738 varop = XEXP (varop, 0);
10739 continue;
10742 if (code == ASHIFTRT
10743 || (code == ROTATE && first_code == ASHIFTRT)
10744 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10745 || (int_varop_mode != int_result_mode
10746 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10747 || first_code == ROTATE
10748 || code == ROTATE)))
10749 break;
10751 /* To compute the mask to apply after the shift, shift the
10752 nonzero bits of the inner shift the same way the
10753 outer shift will. */
10755 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10756 int_result_mode);
10758 mask_rtx
10759 = simplify_const_binary_operation (code, int_result_mode,
10760 mask_rtx, GEN_INT (count));
10762 /* Give up if we can't compute an outer operation to use. */
10763 if (mask_rtx == 0
10764 || !CONST_INT_P (mask_rtx)
10765 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10766 INTVAL (mask_rtx),
10767 int_result_mode, &complement_p))
10768 break;
10770 /* If the shifts are in the same direction, we add the
10771 counts. Otherwise, we subtract them. */
10772 if ((code == ASHIFTRT || code == LSHIFTRT)
10773 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10774 count += first_count;
10775 else
10776 count -= first_count;
10778 /* If COUNT is positive, the new shift is usually CODE,
10779 except for the two exceptions below, in which case it is
10780 FIRST_CODE. If the count is negative, FIRST_CODE should
10781 always be used */
10782 if (count > 0
10783 && ((first_code == ROTATE && code == ASHIFT)
10784 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10785 code = first_code;
10786 else if (count < 0)
10787 code = first_code, count = -count;
10789 varop = XEXP (varop, 0);
10790 continue;
10793 /* If we have (A << B << C) for any shift, we can convert this to
10794 (A << C << B). This wins if A is a constant. Only try this if
10795 B is not a constant. */
10797 else if (GET_CODE (varop) == code
10798 && CONST_INT_P (XEXP (varop, 0))
10799 && !CONST_INT_P (XEXP (varop, 1)))
10801 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10802 sure the result will be masked. See PR70222. */
10803 if (code == LSHIFTRT
10804 && int_mode != int_result_mode
10805 && !merge_outer_ops (&outer_op, &outer_const, AND,
10806 GET_MODE_MASK (int_result_mode)
10807 >> orig_count, int_result_mode,
10808 &complement_p))
10809 break;
10810 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10811 up outer sign extension (often left and right shift) is
10812 hardly more efficient than the original. See PR70429. */
10813 if (code == ASHIFTRT && int_mode != int_result_mode)
10814 break;
10816 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10817 XEXP (varop, 0),
10818 GEN_INT (count));
10819 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10820 count = 0;
10821 continue;
10823 break;
10825 case NOT:
10826 /* The following rules apply only to scalars. */
10827 if (shift_mode != shift_unit_mode)
10828 break;
10830 /* Make this fit the case below. */
10831 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10832 continue;
10834 case IOR:
10835 case AND:
10836 case XOR:
10837 /* The following rules apply only to scalars. */
10838 if (shift_mode != shift_unit_mode)
10839 break;
10840 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10841 int_result_mode = as_a <scalar_int_mode> (result_mode);
10843 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10844 with C the size of VAROP - 1 and the shift is logical if
10845 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10846 we have an (le X 0) operation. If we have an arithmetic shift
10847 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10848 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10850 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10851 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10852 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10853 && (code == LSHIFTRT || code == ASHIFTRT)
10854 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10855 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10857 count = 0;
10858 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10859 const0_rtx);
10861 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10862 varop = gen_rtx_NEG (int_varop_mode, varop);
10864 continue;
10867 /* If we have (shift (logical)), move the logical to the outside
10868 to allow it to possibly combine with another logical and the
10869 shift to combine with another shift. This also canonicalizes to
10870 what a ZERO_EXTRACT looks like. Also, some machines have
10871 (and (shift)) insns. */
10873 if (CONST_INT_P (XEXP (varop, 1))
10874 /* We can't do this if we have (ashiftrt (xor)) and the
10875 constant has its sign bit set in shift_unit_mode with
10876 shift_unit_mode wider than result_mode. */
10877 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10878 && int_result_mode != shift_unit_mode
10879 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10880 shift_unit_mode))
10881 && (new_rtx = simplify_const_binary_operation
10882 (code, int_result_mode,
10883 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10884 GEN_INT (count))) != 0
10885 && CONST_INT_P (new_rtx)
10886 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10887 INTVAL (new_rtx), int_result_mode,
10888 &complement_p))
10890 varop = XEXP (varop, 0);
10891 continue;
10894 /* If we can't do that, try to simplify the shift in each arm of the
10895 logical expression, make a new logical expression, and apply
10896 the inverse distributive law. This also can't be done for
10897 (ashiftrt (xor)) where we've widened the shift and the constant
10898 changes the sign bit. */
10899 if (CONST_INT_P (XEXP (varop, 1))
10900 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10901 && int_result_mode != shift_unit_mode
10902 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10903 shift_unit_mode)))
10905 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10906 XEXP (varop, 0), count);
10907 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10908 XEXP (varop, 1), count);
10910 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10911 lhs, rhs);
10912 varop = apply_distributive_law (varop);
10914 count = 0;
10915 continue;
10917 break;
10919 case EQ:
10920 /* The following rules apply only to scalars. */
10921 if (shift_mode != shift_unit_mode)
10922 break;
10923 int_result_mode = as_a <scalar_int_mode> (result_mode);
10925 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10926 says that the sign bit can be tested, FOO has mode MODE, C is
10927 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10928 that may be nonzero. */
10929 if (code == LSHIFTRT
10930 && XEXP (varop, 1) == const0_rtx
10931 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10932 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10933 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10934 && STORE_FLAG_VALUE == -1
10935 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10936 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10937 int_result_mode, &complement_p))
10939 varop = XEXP (varop, 0);
10940 count = 0;
10941 continue;
10943 break;
10945 case NEG:
10946 /* The following rules apply only to scalars. */
10947 if (shift_mode != shift_unit_mode)
10948 break;
10949 int_result_mode = as_a <scalar_int_mode> (result_mode);
10951 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10952 than the number of bits in the mode is equivalent to A. */
10953 if (code == LSHIFTRT
10954 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10955 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10957 varop = XEXP (varop, 0);
10958 count = 0;
10959 continue;
10962 /* NEG commutes with ASHIFT since it is multiplication. Move the
10963 NEG outside to allow shifts to combine. */
10964 if (code == ASHIFT
10965 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10966 int_result_mode, &complement_p))
10968 varop = XEXP (varop, 0);
10969 continue;
10971 break;
10973 case PLUS:
10974 /* The following rules apply only to scalars. */
10975 if (shift_mode != shift_unit_mode)
10976 break;
10977 int_result_mode = as_a <scalar_int_mode> (result_mode);
10979 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10980 is one less than the number of bits in the mode is
10981 equivalent to (xor A 1). */
10982 if (code == LSHIFTRT
10983 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10984 && XEXP (varop, 1) == constm1_rtx
10985 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10986 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10987 int_result_mode, &complement_p))
10989 count = 0;
10990 varop = XEXP (varop, 0);
10991 continue;
10994 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10995 that might be nonzero in BAR are those being shifted out and those
10996 bits are known zero in FOO, we can replace the PLUS with FOO.
10997 Similarly in the other operand order. This code occurs when
10998 we are computing the size of a variable-size array. */
11000 if ((code == ASHIFTRT || code == LSHIFTRT)
11001 && count < HOST_BITS_PER_WIDE_INT
11002 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11003 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11004 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11006 varop = XEXP (varop, 0);
11007 continue;
11009 else if ((code == ASHIFTRT || code == LSHIFTRT)
11010 && count < HOST_BITS_PER_WIDE_INT
11011 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11012 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11013 >> count)
11014 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11015 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11017 varop = XEXP (varop, 1);
11018 continue;
11021 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11022 if (code == ASHIFT
11023 && CONST_INT_P (XEXP (varop, 1))
11024 && (new_rtx = simplify_const_binary_operation
11025 (ASHIFT, int_result_mode,
11026 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11027 GEN_INT (count))) != 0
11028 && CONST_INT_P (new_rtx)
11029 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11030 INTVAL (new_rtx), int_result_mode,
11031 &complement_p))
11033 varop = XEXP (varop, 0);
11034 continue;
11037 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11038 signbit', and attempt to change the PLUS to an XOR and move it to
11039 the outer operation as is done above in the AND/IOR/XOR case
11040 leg for shift(logical). See details in logical handling above
11041 for reasoning in doing so. */
11042 if (code == LSHIFTRT
11043 && CONST_INT_P (XEXP (varop, 1))
11044 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11045 && (new_rtx = simplify_const_binary_operation
11046 (code, int_result_mode,
11047 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11048 GEN_INT (count))) != 0
11049 && CONST_INT_P (new_rtx)
11050 && merge_outer_ops (&outer_op, &outer_const, XOR,
11051 INTVAL (new_rtx), int_result_mode,
11052 &complement_p))
11054 varop = XEXP (varop, 0);
11055 continue;
11058 break;
11060 case MINUS:
11061 /* The following rules apply only to scalars. */
11062 if (shift_mode != shift_unit_mode)
11063 break;
11064 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11066 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11067 with C the size of VAROP - 1 and the shift is logical if
11068 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11069 we have a (gt X 0) operation. If the shift is arithmetic with
11070 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11071 we have a (neg (gt X 0)) operation. */
11073 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11074 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11075 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11076 && (code == LSHIFTRT || code == ASHIFTRT)
11077 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11078 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11079 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11081 count = 0;
11082 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11083 const0_rtx);
11085 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11086 varop = gen_rtx_NEG (int_varop_mode, varop);
11088 continue;
11090 break;
11092 case TRUNCATE:
11093 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11094 if the truncate does not affect the value. */
11095 if (code == LSHIFTRT
11096 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11097 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11098 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11099 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11100 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11102 rtx varop_inner = XEXP (varop, 0);
11104 varop_inner
11105 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11106 XEXP (varop_inner, 0),
11107 GEN_INT
11108 (count + INTVAL (XEXP (varop_inner, 1))));
11109 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11110 count = 0;
11111 continue;
11113 break;
11115 default:
11116 break;
11119 break;
11122 shift_mode = result_mode;
11123 if (shift_mode != mode)
11125 /* We only change the modes of scalar shifts. */
11126 int_mode = as_a <scalar_int_mode> (mode);
11127 int_result_mode = as_a <scalar_int_mode> (result_mode);
11128 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11129 int_mode, outer_op, outer_const);
11132 /* We have now finished analyzing the shift. The result should be
11133 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11134 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11135 to the result of the shift. OUTER_CONST is the relevant constant,
11136 but we must turn off all bits turned off in the shift. */
11138 if (outer_op == UNKNOWN
11139 && orig_code == code && orig_count == count
11140 && varop == orig_varop
11141 && shift_mode == GET_MODE (varop))
11142 return NULL_RTX;
11144 /* Make a SUBREG if necessary. If we can't make it, fail. */
11145 varop = gen_lowpart (shift_mode, varop);
11146 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11147 return NULL_RTX;
11149 /* If we have an outer operation and we just made a shift, it is
11150 possible that we could have simplified the shift were it not
11151 for the outer operation. So try to do the simplification
11152 recursively. */
11154 if (outer_op != UNKNOWN)
11155 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11156 else
11157 x = NULL_RTX;
11159 if (x == NULL_RTX)
11160 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11162 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11163 turn off all the bits that the shift would have turned off. */
11164 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11165 /* We only change the modes of scalar shifts. */
11166 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11167 x, GET_MODE_MASK (result_mode) >> orig_count);
11169 /* Do the remainder of the processing in RESULT_MODE. */
11170 x = gen_lowpart_or_truncate (result_mode, x);
11172 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11173 operation. */
11174 if (complement_p)
11175 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11177 if (outer_op != UNKNOWN)
11179 int_result_mode = as_a <scalar_int_mode> (result_mode);
11181 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11182 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11183 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11185 if (outer_op == AND)
11186 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11187 else if (outer_op == SET)
11189 /* This means that we have determined that the result is
11190 equivalent to a constant. This should be rare. */
11191 if (!side_effects_p (x))
11192 x = GEN_INT (outer_const);
11194 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11195 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11196 else
11197 x = simplify_gen_binary (outer_op, int_result_mode, x,
11198 GEN_INT (outer_const));
11201 return x;
11204 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11205 The result of the shift is RESULT_MODE. If we cannot simplify it,
11206 return X or, if it is NULL, synthesize the expression with
11207 simplify_gen_binary. Otherwise, return a simplified value.
11209 The shift is normally computed in the widest mode we find in VAROP, as
11210 long as it isn't a different number of words than RESULT_MODE. Exceptions
11211 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11213 static rtx
11214 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11215 rtx varop, int count)
11217 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11218 if (tem)
11219 return tem;
11221 if (!x)
11222 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11223 if (GET_MODE (x) != result_mode)
11224 x = gen_lowpart (result_mode, x);
11225 return x;
11229 /* A subroutine of recog_for_combine. See there for arguments and
11230 return value. */
11232 static int
11233 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11235 rtx pat = *pnewpat;
11236 rtx pat_without_clobbers;
11237 int insn_code_number;
11238 int num_clobbers_to_add = 0;
11239 int i;
11240 rtx notes = NULL_RTX;
11241 rtx old_notes, old_pat;
11242 int old_icode;
11244 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11245 we use to indicate that something didn't match. If we find such a
11246 thing, force rejection. */
11247 if (GET_CODE (pat) == PARALLEL)
11248 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11249 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11250 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11251 return -1;
11253 old_pat = PATTERN (insn);
11254 old_notes = REG_NOTES (insn);
11255 PATTERN (insn) = pat;
11256 REG_NOTES (insn) = NULL_RTX;
11258 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11259 if (dump_file && (dump_flags & TDF_DETAILS))
11261 if (insn_code_number < 0)
11262 fputs ("Failed to match this instruction:\n", dump_file);
11263 else
11264 fputs ("Successfully matched this instruction:\n", dump_file);
11265 print_rtl_single (dump_file, pat);
11268 /* If it isn't, there is the possibility that we previously had an insn
11269 that clobbered some register as a side effect, but the combined
11270 insn doesn't need to do that. So try once more without the clobbers
11271 unless this represents an ASM insn. */
11273 if (insn_code_number < 0 && ! check_asm_operands (pat)
11274 && GET_CODE (pat) == PARALLEL)
11276 int pos;
11278 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11279 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11281 if (i != pos)
11282 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11283 pos++;
11286 SUBST_INT (XVECLEN (pat, 0), pos);
11288 if (pos == 1)
11289 pat = XVECEXP (pat, 0, 0);
11291 PATTERN (insn) = pat;
11292 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11293 if (dump_file && (dump_flags & TDF_DETAILS))
11295 if (insn_code_number < 0)
11296 fputs ("Failed to match this instruction:\n", dump_file);
11297 else
11298 fputs ("Successfully matched this instruction:\n", dump_file);
11299 print_rtl_single (dump_file, pat);
11303 pat_without_clobbers = pat;
11305 PATTERN (insn) = old_pat;
11306 REG_NOTES (insn) = old_notes;
11308 /* Recognize all noop sets, these will be killed by followup pass. */
11309 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11310 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11312 /* If we had any clobbers to add, make a new pattern than contains
11313 them. Then check to make sure that all of them are dead. */
11314 if (num_clobbers_to_add)
11316 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11317 rtvec_alloc (GET_CODE (pat) == PARALLEL
11318 ? (XVECLEN (pat, 0)
11319 + num_clobbers_to_add)
11320 : num_clobbers_to_add + 1));
11322 if (GET_CODE (pat) == PARALLEL)
11323 for (i = 0; i < XVECLEN (pat, 0); i++)
11324 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11325 else
11326 XVECEXP (newpat, 0, 0) = pat;
11328 add_clobbers (newpat, insn_code_number);
11330 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11331 i < XVECLEN (newpat, 0); i++)
11333 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11334 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11335 return -1;
11336 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11338 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11339 notes = alloc_reg_note (REG_UNUSED,
11340 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11343 pat = newpat;
11346 if (insn_code_number >= 0
11347 && insn_code_number != NOOP_MOVE_INSN_CODE)
11349 old_pat = PATTERN (insn);
11350 old_notes = REG_NOTES (insn);
11351 old_icode = INSN_CODE (insn);
11352 PATTERN (insn) = pat;
11353 REG_NOTES (insn) = notes;
11354 INSN_CODE (insn) = insn_code_number;
11356 /* Allow targets to reject combined insn. */
11357 if (!targetm.legitimate_combined_insn (insn))
11359 if (dump_file && (dump_flags & TDF_DETAILS))
11360 fputs ("Instruction not appropriate for target.",
11361 dump_file);
11363 /* Callers expect recog_for_combine to strip
11364 clobbers from the pattern on failure. */
11365 pat = pat_without_clobbers;
11366 notes = NULL_RTX;
11368 insn_code_number = -1;
11371 PATTERN (insn) = old_pat;
11372 REG_NOTES (insn) = old_notes;
11373 INSN_CODE (insn) = old_icode;
11376 *pnewpat = pat;
11377 *pnotes = notes;
11379 return insn_code_number;
11382 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11383 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11384 Return whether anything was so changed. */
11386 static bool
11387 change_zero_ext (rtx pat)
11389 bool changed = false;
11390 rtx *src = &SET_SRC (pat);
11392 subrtx_ptr_iterator::array_type array;
11393 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11395 rtx x = **iter;
11396 scalar_int_mode mode, inner_mode;
11397 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11398 continue;
11399 int size;
11401 if (GET_CODE (x) == ZERO_EXTRACT
11402 && CONST_INT_P (XEXP (x, 1))
11403 && CONST_INT_P (XEXP (x, 2))
11404 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11405 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11407 size = INTVAL (XEXP (x, 1));
11409 int start = INTVAL (XEXP (x, 2));
11410 if (BITS_BIG_ENDIAN)
11411 start = GET_MODE_PRECISION (inner_mode) - size - start;
11413 if (start)
11414 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11415 else
11416 x = XEXP (x, 0);
11417 if (mode != inner_mode)
11418 x = gen_lowpart_SUBREG (mode, x);
11420 else if (GET_CODE (x) == ZERO_EXTEND
11421 && GET_CODE (XEXP (x, 0)) == SUBREG
11422 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11423 && !paradoxical_subreg_p (XEXP (x, 0))
11424 && subreg_lowpart_p (XEXP (x, 0)))
11426 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11427 size = GET_MODE_PRECISION (inner_mode);
11428 x = SUBREG_REG (XEXP (x, 0));
11429 if (GET_MODE (x) != mode)
11430 x = gen_lowpart_SUBREG (mode, x);
11432 else if (GET_CODE (x) == ZERO_EXTEND
11433 && REG_P (XEXP (x, 0))
11434 && HARD_REGISTER_P (XEXP (x, 0))
11435 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11437 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11438 size = GET_MODE_PRECISION (inner_mode);
11439 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11441 else
11442 continue;
11444 if (!(GET_CODE (x) == LSHIFTRT
11445 && CONST_INT_P (XEXP (x, 1))
11446 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11448 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11449 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11452 SUBST (**iter, x);
11453 changed = true;
11456 if (changed)
11457 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11458 maybe_swap_commutative_operands (**iter);
11460 rtx *dst = &SET_DEST (pat);
11461 scalar_int_mode mode;
11462 if (GET_CODE (*dst) == ZERO_EXTRACT
11463 && REG_P (XEXP (*dst, 0))
11464 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11465 && CONST_INT_P (XEXP (*dst, 1))
11466 && CONST_INT_P (XEXP (*dst, 2)))
11468 rtx reg = XEXP (*dst, 0);
11469 int width = INTVAL (XEXP (*dst, 1));
11470 int offset = INTVAL (XEXP (*dst, 2));
11471 int reg_width = GET_MODE_PRECISION (mode);
11472 if (BITS_BIG_ENDIAN)
11473 offset = reg_width - width - offset;
11475 rtx x, y, z, w;
11476 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11477 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11478 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11479 if (offset)
11480 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11481 else
11482 y = SET_SRC (pat);
11483 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11484 w = gen_rtx_IOR (mode, x, z);
11485 SUBST (SET_DEST (pat), reg);
11486 SUBST (SET_SRC (pat), w);
11488 changed = true;
11491 return changed;
11494 /* Like recog, but we receive the address of a pointer to a new pattern.
11495 We try to match the rtx that the pointer points to.
11496 If that fails, we may try to modify or replace the pattern,
11497 storing the replacement into the same pointer object.
11499 Modifications include deletion or addition of CLOBBERs. If the
11500 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11501 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11502 (and undo if that fails).
11504 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11505 the CLOBBERs are placed.
11507 The value is the final insn code from the pattern ultimately matched,
11508 or -1. */
11510 static int
11511 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11513 rtx pat = *pnewpat;
11514 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11515 if (insn_code_number >= 0 || check_asm_operands (pat))
11516 return insn_code_number;
11518 void *marker = get_undo_marker ();
11519 bool changed = false;
11521 if (GET_CODE (pat) == SET)
11522 changed = change_zero_ext (pat);
11523 else if (GET_CODE (pat) == PARALLEL)
11525 int i;
11526 for (i = 0; i < XVECLEN (pat, 0); i++)
11528 rtx set = XVECEXP (pat, 0, i);
11529 if (GET_CODE (set) == SET)
11530 changed |= change_zero_ext (set);
11534 if (changed)
11536 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11538 if (insn_code_number < 0)
11539 undo_to_marker (marker);
11542 return insn_code_number;
11545 /* Like gen_lowpart_general but for use by combine. In combine it
11546 is not possible to create any new pseudoregs. However, it is
11547 safe to create invalid memory addresses, because combine will
11548 try to recognize them and all they will do is make the combine
11549 attempt fail.
11551 If for some reason this cannot do its job, an rtx
11552 (clobber (const_int 0)) is returned.
11553 An insn containing that will not be recognized. */
11555 static rtx
11556 gen_lowpart_for_combine (machine_mode omode, rtx x)
11558 machine_mode imode = GET_MODE (x);
11559 unsigned int osize = GET_MODE_SIZE (omode);
11560 unsigned int isize = GET_MODE_SIZE (imode);
11561 rtx result;
11563 if (omode == imode)
11564 return x;
11566 /* We can only support MODE being wider than a word if X is a
11567 constant integer or has a mode the same size. */
11568 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11569 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11570 goto fail;
11572 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11573 won't know what to do. So we will strip off the SUBREG here and
11574 process normally. */
11575 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11577 x = SUBREG_REG (x);
11579 /* For use in case we fall down into the address adjustments
11580 further below, we need to adjust the known mode and size of
11581 x; imode and isize, since we just adjusted x. */
11582 imode = GET_MODE (x);
11584 if (imode == omode)
11585 return x;
11587 isize = GET_MODE_SIZE (imode);
11590 result = gen_lowpart_common (omode, x);
11592 if (result)
11593 return result;
11595 if (MEM_P (x))
11597 int offset = 0;
11599 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11600 address. */
11601 if (MEM_VOLATILE_P (x)
11602 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11603 goto fail;
11605 /* If we want to refer to something bigger than the original memref,
11606 generate a paradoxical subreg instead. That will force a reload
11607 of the original memref X. */
11608 if (paradoxical_subreg_p (omode, imode))
11609 return gen_rtx_SUBREG (omode, x, 0);
11611 if (WORDS_BIG_ENDIAN)
11612 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11614 /* Adjust the address so that the address-after-the-data is
11615 unchanged. */
11616 if (BYTES_BIG_ENDIAN)
11617 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11619 return adjust_address_nv (x, omode, offset);
11622 /* If X is a comparison operator, rewrite it in a new mode. This
11623 probably won't match, but may allow further simplifications. */
11624 else if (COMPARISON_P (x))
11625 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11627 /* If we couldn't simplify X any other way, just enclose it in a
11628 SUBREG. Normally, this SUBREG won't match, but some patterns may
11629 include an explicit SUBREG or we may simplify it further in combine. */
11630 else
11632 rtx res;
11634 if (imode == VOIDmode)
11636 imode = int_mode_for_mode (omode).require ();
11637 x = gen_lowpart_common (imode, x);
11638 if (x == NULL)
11639 goto fail;
11641 res = lowpart_subreg (omode, x, imode);
11642 if (res)
11643 return res;
11646 fail:
11647 return gen_rtx_CLOBBER (omode, const0_rtx);
11650 /* Try to simplify a comparison between OP0 and a constant OP1,
11651 where CODE is the comparison code that will be tested, into a
11652 (CODE OP0 const0_rtx) form.
11654 The result is a possibly different comparison code to use.
11655 *POP1 may be updated. */
11657 static enum rtx_code
11658 simplify_compare_const (enum rtx_code code, machine_mode mode,
11659 rtx op0, rtx *pop1)
11661 scalar_int_mode int_mode;
11662 HOST_WIDE_INT const_op = INTVAL (*pop1);
11664 /* Get the constant we are comparing against and turn off all bits
11665 not on in our mode. */
11666 if (mode != VOIDmode)
11667 const_op = trunc_int_for_mode (const_op, mode);
11669 /* If we are comparing against a constant power of two and the value
11670 being compared can only have that single bit nonzero (e.g., it was
11671 `and'ed with that bit), we can replace this with a comparison
11672 with zero. */
11673 if (const_op
11674 && (code == EQ || code == NE || code == GE || code == GEU
11675 || code == LT || code == LTU)
11676 && is_a <scalar_int_mode> (mode, &int_mode)
11677 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11678 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11679 && (nonzero_bits (op0, int_mode)
11680 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11682 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11683 const_op = 0;
11686 /* Similarly, if we are comparing a value known to be either -1 or
11687 0 with -1, change it to the opposite comparison against zero. */
11688 if (const_op == -1
11689 && (code == EQ || code == NE || code == GT || code == LE
11690 || code == GEU || code == LTU)
11691 && is_a <scalar_int_mode> (mode, &int_mode)
11692 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11694 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11695 const_op = 0;
11698 /* Do some canonicalizations based on the comparison code. We prefer
11699 comparisons against zero and then prefer equality comparisons.
11700 If we can reduce the size of a constant, we will do that too. */
11701 switch (code)
11703 case LT:
11704 /* < C is equivalent to <= (C - 1) */
11705 if (const_op > 0)
11707 const_op -= 1;
11708 code = LE;
11709 /* ... fall through to LE case below. */
11710 gcc_fallthrough ();
11712 else
11713 break;
11715 case LE:
11716 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11717 if (const_op < 0)
11719 const_op += 1;
11720 code = LT;
11723 /* If we are doing a <= 0 comparison on a value known to have
11724 a zero sign bit, we can replace this with == 0. */
11725 else if (const_op == 0
11726 && is_a <scalar_int_mode> (mode, &int_mode)
11727 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11728 && (nonzero_bits (op0, int_mode)
11729 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11730 == 0)
11731 code = EQ;
11732 break;
11734 case GE:
11735 /* >= C is equivalent to > (C - 1). */
11736 if (const_op > 0)
11738 const_op -= 1;
11739 code = GT;
11740 /* ... fall through to GT below. */
11741 gcc_fallthrough ();
11743 else
11744 break;
11746 case GT:
11747 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11748 if (const_op < 0)
11750 const_op += 1;
11751 code = GE;
11754 /* If we are doing a > 0 comparison on a value known to have
11755 a zero sign bit, we can replace this with != 0. */
11756 else if (const_op == 0
11757 && is_a <scalar_int_mode> (mode, &int_mode)
11758 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11759 && (nonzero_bits (op0, int_mode)
11760 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11761 == 0)
11762 code = NE;
11763 break;
11765 case LTU:
11766 /* < C is equivalent to <= (C - 1). */
11767 if (const_op > 0)
11769 const_op -= 1;
11770 code = LEU;
11771 /* ... fall through ... */
11773 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11774 else if (is_a <scalar_int_mode> (mode, &int_mode)
11775 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11776 && ((unsigned HOST_WIDE_INT) const_op
11777 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11779 const_op = 0;
11780 code = GE;
11781 break;
11783 else
11784 break;
11786 case LEU:
11787 /* unsigned <= 0 is equivalent to == 0 */
11788 if (const_op == 0)
11789 code = EQ;
11790 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11791 else if (is_a <scalar_int_mode> (mode, &int_mode)
11792 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11793 && ((unsigned HOST_WIDE_INT) const_op
11794 == ((HOST_WIDE_INT_1U
11795 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11797 const_op = 0;
11798 code = GE;
11800 break;
11802 case GEU:
11803 /* >= C is equivalent to > (C - 1). */
11804 if (const_op > 1)
11806 const_op -= 1;
11807 code = GTU;
11808 /* ... fall through ... */
11811 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11812 else if (is_a <scalar_int_mode> (mode, &int_mode)
11813 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11814 && ((unsigned HOST_WIDE_INT) const_op
11815 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11817 const_op = 0;
11818 code = LT;
11819 break;
11821 else
11822 break;
11824 case GTU:
11825 /* unsigned > 0 is equivalent to != 0 */
11826 if (const_op == 0)
11827 code = NE;
11828 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11829 else if (is_a <scalar_int_mode> (mode, &int_mode)
11830 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11831 && ((unsigned HOST_WIDE_INT) const_op
11832 == (HOST_WIDE_INT_1U
11833 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11835 const_op = 0;
11836 code = LT;
11838 break;
11840 default:
11841 break;
11844 *pop1 = GEN_INT (const_op);
11845 return code;
11848 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11849 comparison code that will be tested.
11851 The result is a possibly different comparison code to use. *POP0 and
11852 *POP1 may be updated.
11854 It is possible that we might detect that a comparison is either always
11855 true or always false. However, we do not perform general constant
11856 folding in combine, so this knowledge isn't useful. Such tautologies
11857 should have been detected earlier. Hence we ignore all such cases. */
11859 static enum rtx_code
11860 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11862 rtx op0 = *pop0;
11863 rtx op1 = *pop1;
11864 rtx tem, tem1;
11865 int i;
11866 scalar_int_mode mode, inner_mode, tmode;
11867 opt_scalar_int_mode tmode_iter;
11869 /* Try a few ways of applying the same transformation to both operands. */
11870 while (1)
11872 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11873 so check specially. */
11874 if (!WORD_REGISTER_OPERATIONS
11875 && code != GTU && code != GEU && code != LTU && code != LEU
11876 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11877 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11878 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11879 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11880 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11881 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11882 && (is_a <scalar_int_mode>
11883 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11884 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11885 && CONST_INT_P (XEXP (op0, 1))
11886 && XEXP (op0, 1) == XEXP (op1, 1)
11887 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11888 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11889 && (INTVAL (XEXP (op0, 1))
11890 == (GET_MODE_PRECISION (mode)
11891 - GET_MODE_PRECISION (inner_mode))))
11893 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11894 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11897 /* If both operands are the same constant shift, see if we can ignore the
11898 shift. We can if the shift is a rotate or if the bits shifted out of
11899 this shift are known to be zero for both inputs and if the type of
11900 comparison is compatible with the shift. */
11901 if (GET_CODE (op0) == GET_CODE (op1)
11902 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11903 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11904 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11905 && (code != GT && code != LT && code != GE && code != LE))
11906 || (GET_CODE (op0) == ASHIFTRT
11907 && (code != GTU && code != LTU
11908 && code != GEU && code != LEU)))
11909 && CONST_INT_P (XEXP (op0, 1))
11910 && INTVAL (XEXP (op0, 1)) >= 0
11911 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11912 && XEXP (op0, 1) == XEXP (op1, 1))
11914 machine_mode mode = GET_MODE (op0);
11915 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11916 int shift_count = INTVAL (XEXP (op0, 1));
11918 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11919 mask &= (mask >> shift_count) << shift_count;
11920 else if (GET_CODE (op0) == ASHIFT)
11921 mask = (mask & (mask << shift_count)) >> shift_count;
11923 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11924 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11925 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11926 else
11927 break;
11930 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11931 SUBREGs are of the same mode, and, in both cases, the AND would
11932 be redundant if the comparison was done in the narrower mode,
11933 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11934 and the operand's possibly nonzero bits are 0xffffff01; in that case
11935 if we only care about QImode, we don't need the AND). This case
11936 occurs if the output mode of an scc insn is not SImode and
11937 STORE_FLAG_VALUE == 1 (e.g., the 386).
11939 Similarly, check for a case where the AND's are ZERO_EXTEND
11940 operations from some narrower mode even though a SUBREG is not
11941 present. */
11943 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11944 && CONST_INT_P (XEXP (op0, 1))
11945 && CONST_INT_P (XEXP (op1, 1)))
11947 rtx inner_op0 = XEXP (op0, 0);
11948 rtx inner_op1 = XEXP (op1, 0);
11949 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11950 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11951 int changed = 0;
11953 if (paradoxical_subreg_p (inner_op0)
11954 && GET_CODE (inner_op1) == SUBREG
11955 && (GET_MODE (SUBREG_REG (inner_op0))
11956 == GET_MODE (SUBREG_REG (inner_op1)))
11957 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11958 <= HOST_BITS_PER_WIDE_INT)
11959 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11960 GET_MODE (SUBREG_REG (inner_op0)))))
11961 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11962 GET_MODE (SUBREG_REG (inner_op1))))))
11964 op0 = SUBREG_REG (inner_op0);
11965 op1 = SUBREG_REG (inner_op1);
11967 /* The resulting comparison is always unsigned since we masked
11968 off the original sign bit. */
11969 code = unsigned_condition (code);
11971 changed = 1;
11974 else if (c0 == c1)
11975 FOR_EACH_MODE_UNTIL (tmode,
11976 as_a <scalar_int_mode> (GET_MODE (op0)))
11977 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11979 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11980 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11981 code = unsigned_condition (code);
11982 changed = 1;
11983 break;
11986 if (! changed)
11987 break;
11990 /* If both operands are NOT, we can strip off the outer operation
11991 and adjust the comparison code for swapped operands; similarly for
11992 NEG, except that this must be an equality comparison. */
11993 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11994 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11995 && (code == EQ || code == NE)))
11996 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11998 else
11999 break;
12002 /* If the first operand is a constant, swap the operands and adjust the
12003 comparison code appropriately, but don't do this if the second operand
12004 is already a constant integer. */
12005 if (swap_commutative_operands_p (op0, op1))
12007 std::swap (op0, op1);
12008 code = swap_condition (code);
12011 /* We now enter a loop during which we will try to simplify the comparison.
12012 For the most part, we only are concerned with comparisons with zero,
12013 but some things may really be comparisons with zero but not start
12014 out looking that way. */
12016 while (CONST_INT_P (op1))
12018 machine_mode raw_mode = GET_MODE (op0);
12019 scalar_int_mode int_mode;
12020 int equality_comparison_p;
12021 int sign_bit_comparison_p;
12022 int unsigned_comparison_p;
12023 HOST_WIDE_INT const_op;
12025 /* We only want to handle integral modes. This catches VOIDmode,
12026 CCmode, and the floating-point modes. An exception is that we
12027 can handle VOIDmode if OP0 is a COMPARE or a comparison
12028 operation. */
12030 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12031 && ! (raw_mode == VOIDmode
12032 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12033 break;
12035 /* Try to simplify the compare to constant, possibly changing the
12036 comparison op, and/or changing op1 to zero. */
12037 code = simplify_compare_const (code, raw_mode, op0, &op1);
12038 const_op = INTVAL (op1);
12040 /* Compute some predicates to simplify code below. */
12042 equality_comparison_p = (code == EQ || code == NE);
12043 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12044 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12045 || code == GEU);
12047 /* If this is a sign bit comparison and we can do arithmetic in
12048 MODE, say that we will only be needing the sign bit of OP0. */
12049 if (sign_bit_comparison_p
12050 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12051 && HWI_COMPUTABLE_MODE_P (int_mode))
12052 op0 = force_to_mode (op0, int_mode,
12053 HOST_WIDE_INT_1U
12054 << (GET_MODE_PRECISION (int_mode) - 1),
12057 if (COMPARISON_P (op0))
12059 /* We can't do anything if OP0 is a condition code value, rather
12060 than an actual data value. */
12061 if (const_op != 0
12062 || CC0_P (XEXP (op0, 0))
12063 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12064 break;
12066 /* Get the two operands being compared. */
12067 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12068 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12069 else
12070 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12072 /* Check for the cases where we simply want the result of the
12073 earlier test or the opposite of that result. */
12074 if (code == NE || code == EQ
12075 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12076 && (code == LT || code == GE)))
12078 enum rtx_code new_code;
12079 if (code == LT || code == NE)
12080 new_code = GET_CODE (op0);
12081 else
12082 new_code = reversed_comparison_code (op0, NULL);
12084 if (new_code != UNKNOWN)
12086 code = new_code;
12087 op0 = tem;
12088 op1 = tem1;
12089 continue;
12092 break;
12095 if (raw_mode == VOIDmode)
12096 break;
12097 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12099 /* Now try cases based on the opcode of OP0. If none of the cases
12100 does a "continue", we exit this loop immediately after the
12101 switch. */
12103 unsigned int mode_width = GET_MODE_PRECISION (mode);
12104 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12105 switch (GET_CODE (op0))
12107 case ZERO_EXTRACT:
12108 /* If we are extracting a single bit from a variable position in
12109 a constant that has only a single bit set and are comparing it
12110 with zero, we can convert this into an equality comparison
12111 between the position and the location of the single bit. */
12112 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12113 have already reduced the shift count modulo the word size. */
12114 if (!SHIFT_COUNT_TRUNCATED
12115 && CONST_INT_P (XEXP (op0, 0))
12116 && XEXP (op0, 1) == const1_rtx
12117 && equality_comparison_p && const_op == 0
12118 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12120 if (BITS_BIG_ENDIAN)
12121 i = BITS_PER_WORD - 1 - i;
12123 op0 = XEXP (op0, 2);
12124 op1 = GEN_INT (i);
12125 const_op = i;
12127 /* Result is nonzero iff shift count is equal to I. */
12128 code = reverse_condition (code);
12129 continue;
12132 /* fall through */
12134 case SIGN_EXTRACT:
12135 tem = expand_compound_operation (op0);
12136 if (tem != op0)
12138 op0 = tem;
12139 continue;
12141 break;
12143 case NOT:
12144 /* If testing for equality, we can take the NOT of the constant. */
12145 if (equality_comparison_p
12146 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12148 op0 = XEXP (op0, 0);
12149 op1 = tem;
12150 continue;
12153 /* If just looking at the sign bit, reverse the sense of the
12154 comparison. */
12155 if (sign_bit_comparison_p)
12157 op0 = XEXP (op0, 0);
12158 code = (code == GE ? LT : GE);
12159 continue;
12161 break;
12163 case NEG:
12164 /* If testing for equality, we can take the NEG of the constant. */
12165 if (equality_comparison_p
12166 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12168 op0 = XEXP (op0, 0);
12169 op1 = tem;
12170 continue;
12173 /* The remaining cases only apply to comparisons with zero. */
12174 if (const_op != 0)
12175 break;
12177 /* When X is ABS or is known positive,
12178 (neg X) is < 0 if and only if X != 0. */
12180 if (sign_bit_comparison_p
12181 && (GET_CODE (XEXP (op0, 0)) == ABS
12182 || (mode_width <= HOST_BITS_PER_WIDE_INT
12183 && (nonzero_bits (XEXP (op0, 0), mode)
12184 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12185 == 0)))
12187 op0 = XEXP (op0, 0);
12188 code = (code == LT ? NE : EQ);
12189 continue;
12192 /* If we have NEG of something whose two high-order bits are the
12193 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12194 if (num_sign_bit_copies (op0, mode) >= 2)
12196 op0 = XEXP (op0, 0);
12197 code = swap_condition (code);
12198 continue;
12200 break;
12202 case ROTATE:
12203 /* If we are testing equality and our count is a constant, we
12204 can perform the inverse operation on our RHS. */
12205 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12206 && (tem = simplify_binary_operation (ROTATERT, mode,
12207 op1, XEXP (op0, 1))) != 0)
12209 op0 = XEXP (op0, 0);
12210 op1 = tem;
12211 continue;
12214 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12215 a particular bit. Convert it to an AND of a constant of that
12216 bit. This will be converted into a ZERO_EXTRACT. */
12217 if (const_op == 0 && sign_bit_comparison_p
12218 && CONST_INT_P (XEXP (op0, 1))
12219 && mode_width <= HOST_BITS_PER_WIDE_INT)
12221 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12222 (HOST_WIDE_INT_1U
12223 << (mode_width - 1
12224 - INTVAL (XEXP (op0, 1)))));
12225 code = (code == LT ? NE : EQ);
12226 continue;
12229 /* Fall through. */
12231 case ABS:
12232 /* ABS is ignorable inside an equality comparison with zero. */
12233 if (const_op == 0 && equality_comparison_p)
12235 op0 = XEXP (op0, 0);
12236 continue;
12238 break;
12240 case SIGN_EXTEND:
12241 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12242 (compare FOO CONST) if CONST fits in FOO's mode and we
12243 are either testing inequality or have an unsigned
12244 comparison with ZERO_EXTEND or a signed comparison with
12245 SIGN_EXTEND. But don't do it if we don't have a compare
12246 insn of the given mode, since we'd have to revert it
12247 later on, and then we wouldn't know whether to sign- or
12248 zero-extend. */
12249 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12250 && ! unsigned_comparison_p
12251 && HWI_COMPUTABLE_MODE_P (mode)
12252 && trunc_int_for_mode (const_op, mode) == const_op
12253 && have_insn_for (COMPARE, mode))
12255 op0 = XEXP (op0, 0);
12256 continue;
12258 break;
12260 case SUBREG:
12261 /* Check for the case where we are comparing A - C1 with C2, that is
12263 (subreg:MODE (plus (A) (-C1))) op (C2)
12265 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12266 comparison in the wider mode. One of the following two conditions
12267 must be true in order for this to be valid:
12269 1. The mode extension results in the same bit pattern being added
12270 on both sides and the comparison is equality or unsigned. As
12271 C2 has been truncated to fit in MODE, the pattern can only be
12272 all 0s or all 1s.
12274 2. The mode extension results in the sign bit being copied on
12275 each side.
12277 The difficulty here is that we have predicates for A but not for
12278 (A - C1) so we need to check that C1 is within proper bounds so
12279 as to perturbate A as little as possible. */
12281 if (mode_width <= HOST_BITS_PER_WIDE_INT
12282 && subreg_lowpart_p (op0)
12283 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12284 &inner_mode)
12285 && GET_MODE_PRECISION (inner_mode) > mode_width
12286 && GET_CODE (SUBREG_REG (op0)) == PLUS
12287 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12289 rtx a = XEXP (SUBREG_REG (op0), 0);
12290 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12292 if ((c1 > 0
12293 && (unsigned HOST_WIDE_INT) c1
12294 < HOST_WIDE_INT_1U << (mode_width - 1)
12295 && (equality_comparison_p || unsigned_comparison_p)
12296 /* (A - C1) zero-extends if it is positive and sign-extends
12297 if it is negative, C2 both zero- and sign-extends. */
12298 && ((0 == (nonzero_bits (a, inner_mode)
12299 & ~GET_MODE_MASK (mode))
12300 && const_op >= 0)
12301 /* (A - C1) sign-extends if it is positive and 1-extends
12302 if it is negative, C2 both sign- and 1-extends. */
12303 || (num_sign_bit_copies (a, inner_mode)
12304 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12305 - mode_width)
12306 && const_op < 0)))
12307 || ((unsigned HOST_WIDE_INT) c1
12308 < HOST_WIDE_INT_1U << (mode_width - 2)
12309 /* (A - C1) always sign-extends, like C2. */
12310 && num_sign_bit_copies (a, inner_mode)
12311 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12312 - (mode_width - 1))))
12314 op0 = SUBREG_REG (op0);
12315 continue;
12319 /* If the inner mode is narrower and we are extracting the low part,
12320 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12321 if (paradoxical_subreg_p (op0))
12323 else if (subreg_lowpart_p (op0)
12324 && GET_MODE_CLASS (mode) == MODE_INT
12325 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12326 && (code == NE || code == EQ)
12327 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12328 && !paradoxical_subreg_p (op0)
12329 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12330 & ~GET_MODE_MASK (mode)) == 0)
12332 /* Remove outer subregs that don't do anything. */
12333 tem = gen_lowpart (inner_mode, op1);
12335 if ((nonzero_bits (tem, inner_mode)
12336 & ~GET_MODE_MASK (mode)) == 0)
12338 op0 = SUBREG_REG (op0);
12339 op1 = tem;
12340 continue;
12342 break;
12344 else
12345 break;
12347 /* FALLTHROUGH */
12349 case ZERO_EXTEND:
12350 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12351 && (unsigned_comparison_p || equality_comparison_p)
12352 && HWI_COMPUTABLE_MODE_P (mode)
12353 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12354 && const_op >= 0
12355 && have_insn_for (COMPARE, mode))
12357 op0 = XEXP (op0, 0);
12358 continue;
12360 break;
12362 case PLUS:
12363 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12364 this for equality comparisons due to pathological cases involving
12365 overflows. */
12366 if (equality_comparison_p
12367 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12368 op1, XEXP (op0, 1))))
12370 op0 = XEXP (op0, 0);
12371 op1 = tem;
12372 continue;
12375 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12376 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12377 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12379 op0 = XEXP (XEXP (op0, 0), 0);
12380 code = (code == LT ? EQ : NE);
12381 continue;
12383 break;
12385 case MINUS:
12386 /* We used to optimize signed comparisons against zero, but that
12387 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12388 arrive here as equality comparisons, or (GEU, LTU) are
12389 optimized away. No need to special-case them. */
12391 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12392 (eq B (minus A C)), whichever simplifies. We can only do
12393 this for equality comparisons due to pathological cases involving
12394 overflows. */
12395 if (equality_comparison_p
12396 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12397 XEXP (op0, 1), op1)))
12399 op0 = XEXP (op0, 0);
12400 op1 = tem;
12401 continue;
12404 if (equality_comparison_p
12405 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12406 XEXP (op0, 0), op1)))
12408 op0 = XEXP (op0, 1);
12409 op1 = tem;
12410 continue;
12413 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12414 of bits in X minus 1, is one iff X > 0. */
12415 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12416 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12417 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12418 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12420 op0 = XEXP (op0, 1);
12421 code = (code == GE ? LE : GT);
12422 continue;
12424 break;
12426 case XOR:
12427 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12428 if C is zero or B is a constant. */
12429 if (equality_comparison_p
12430 && 0 != (tem = simplify_binary_operation (XOR, mode,
12431 XEXP (op0, 1), op1)))
12433 op0 = XEXP (op0, 0);
12434 op1 = tem;
12435 continue;
12437 break;
12440 case IOR:
12441 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12442 iff X <= 0. */
12443 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12444 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12445 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12447 op0 = XEXP (op0, 1);
12448 code = (code == GE ? GT : LE);
12449 continue;
12451 break;
12453 case AND:
12454 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12455 will be converted to a ZERO_EXTRACT later. */
12456 if (const_op == 0 && equality_comparison_p
12457 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12458 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12460 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12461 XEXP (XEXP (op0, 0), 1));
12462 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12463 continue;
12466 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12467 zero and X is a comparison and C1 and C2 describe only bits set
12468 in STORE_FLAG_VALUE, we can compare with X. */
12469 if (const_op == 0 && equality_comparison_p
12470 && mode_width <= HOST_BITS_PER_WIDE_INT
12471 && CONST_INT_P (XEXP (op0, 1))
12472 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12473 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12474 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12475 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12477 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12478 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12479 if ((~STORE_FLAG_VALUE & mask) == 0
12480 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12481 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12482 && COMPARISON_P (tem))))
12484 op0 = XEXP (XEXP (op0, 0), 0);
12485 continue;
12489 /* If we are doing an equality comparison of an AND of a bit equal
12490 to the sign bit, replace this with a LT or GE comparison of
12491 the underlying value. */
12492 if (equality_comparison_p
12493 && const_op == 0
12494 && CONST_INT_P (XEXP (op0, 1))
12495 && mode_width <= HOST_BITS_PER_WIDE_INT
12496 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12497 == HOST_WIDE_INT_1U << (mode_width - 1)))
12499 op0 = XEXP (op0, 0);
12500 code = (code == EQ ? GE : LT);
12501 continue;
12504 /* If this AND operation is really a ZERO_EXTEND from a narrower
12505 mode, the constant fits within that mode, and this is either an
12506 equality or unsigned comparison, try to do this comparison in
12507 the narrower mode.
12509 Note that in:
12511 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12512 -> (ne:DI (reg:SI 4) (const_int 0))
12514 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12515 known to hold a value of the required mode the
12516 transformation is invalid. */
12517 if ((equality_comparison_p || unsigned_comparison_p)
12518 && CONST_INT_P (XEXP (op0, 1))
12519 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12520 & GET_MODE_MASK (mode))
12521 + 1)) >= 0
12522 && const_op >> i == 0
12523 && int_mode_for_size (i, 1).exists (&tmode))
12525 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12526 continue;
12529 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12530 fits in both M1 and M2 and the SUBREG is either paradoxical
12531 or represents the low part, permute the SUBREG and the AND
12532 and try again. */
12533 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12534 && CONST_INT_P (XEXP (op0, 1)))
12536 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12537 /* Require an integral mode, to avoid creating something like
12538 (AND:SF ...). */
12539 if ((is_a <scalar_int_mode>
12540 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12541 /* It is unsafe to commute the AND into the SUBREG if the
12542 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12543 not defined. As originally written the upper bits
12544 have a defined value due to the AND operation.
12545 However, if we commute the AND inside the SUBREG then
12546 they no longer have defined values and the meaning of
12547 the code has been changed.
12548 Also C1 should not change value in the smaller mode,
12549 see PR67028 (a positive C1 can become negative in the
12550 smaller mode, so that the AND does no longer mask the
12551 upper bits). */
12552 && ((WORD_REGISTER_OPERATIONS
12553 && mode_width > GET_MODE_PRECISION (tmode)
12554 && mode_width <= BITS_PER_WORD
12555 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12556 || (mode_width <= GET_MODE_PRECISION (tmode)
12557 && subreg_lowpart_p (XEXP (op0, 0))))
12558 && mode_width <= HOST_BITS_PER_WIDE_INT
12559 && HWI_COMPUTABLE_MODE_P (tmode)
12560 && (c1 & ~mask) == 0
12561 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12562 && c1 != mask
12563 && c1 != GET_MODE_MASK (tmode))
12565 op0 = simplify_gen_binary (AND, tmode,
12566 SUBREG_REG (XEXP (op0, 0)),
12567 gen_int_mode (c1, tmode));
12568 op0 = gen_lowpart (mode, op0);
12569 continue;
12573 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12574 if (const_op == 0 && equality_comparison_p
12575 && XEXP (op0, 1) == const1_rtx
12576 && GET_CODE (XEXP (op0, 0)) == NOT)
12578 op0 = simplify_and_const_int (NULL_RTX, mode,
12579 XEXP (XEXP (op0, 0), 0), 1);
12580 code = (code == NE ? EQ : NE);
12581 continue;
12584 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12585 (eq (and (lshiftrt X) 1) 0).
12586 Also handle the case where (not X) is expressed using xor. */
12587 if (const_op == 0 && equality_comparison_p
12588 && XEXP (op0, 1) == const1_rtx
12589 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12591 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12592 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12594 if (GET_CODE (shift_op) == NOT
12595 || (GET_CODE (shift_op) == XOR
12596 && CONST_INT_P (XEXP (shift_op, 1))
12597 && CONST_INT_P (shift_count)
12598 && HWI_COMPUTABLE_MODE_P (mode)
12599 && (UINTVAL (XEXP (shift_op, 1))
12600 == HOST_WIDE_INT_1U
12601 << INTVAL (shift_count))))
12604 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12605 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12606 code = (code == NE ? EQ : NE);
12607 continue;
12610 break;
12612 case ASHIFT:
12613 /* If we have (compare (ashift FOO N) (const_int C)) and
12614 the high order N bits of FOO (N+1 if an inequality comparison)
12615 are known to be zero, we can do this by comparing FOO with C
12616 shifted right N bits so long as the low-order N bits of C are
12617 zero. */
12618 if (CONST_INT_P (XEXP (op0, 1))
12619 && INTVAL (XEXP (op0, 1)) >= 0
12620 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12621 < HOST_BITS_PER_WIDE_INT)
12622 && (((unsigned HOST_WIDE_INT) const_op
12623 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12624 - 1)) == 0)
12625 && mode_width <= HOST_BITS_PER_WIDE_INT
12626 && (nonzero_bits (XEXP (op0, 0), mode)
12627 & ~(mask >> (INTVAL (XEXP (op0, 1))
12628 + ! equality_comparison_p))) == 0)
12630 /* We must perform a logical shift, not an arithmetic one,
12631 as we want the top N bits of C to be zero. */
12632 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12634 temp >>= INTVAL (XEXP (op0, 1));
12635 op1 = gen_int_mode (temp, mode);
12636 op0 = XEXP (op0, 0);
12637 continue;
12640 /* If we are doing a sign bit comparison, it means we are testing
12641 a particular bit. Convert it to the appropriate AND. */
12642 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12643 && mode_width <= HOST_BITS_PER_WIDE_INT)
12645 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12646 (HOST_WIDE_INT_1U
12647 << (mode_width - 1
12648 - INTVAL (XEXP (op0, 1)))));
12649 code = (code == LT ? NE : EQ);
12650 continue;
12653 /* If this an equality comparison with zero and we are shifting
12654 the low bit to the sign bit, we can convert this to an AND of the
12655 low-order bit. */
12656 if (const_op == 0 && equality_comparison_p
12657 && CONST_INT_P (XEXP (op0, 1))
12658 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12660 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12661 continue;
12663 break;
12665 case ASHIFTRT:
12666 /* If this is an equality comparison with zero, we can do this
12667 as a logical shift, which might be much simpler. */
12668 if (equality_comparison_p && const_op == 0
12669 && CONST_INT_P (XEXP (op0, 1)))
12671 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12672 XEXP (op0, 0),
12673 INTVAL (XEXP (op0, 1)));
12674 continue;
12677 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12678 do the comparison in a narrower mode. */
12679 if (! unsigned_comparison_p
12680 && CONST_INT_P (XEXP (op0, 1))
12681 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12682 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12683 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12684 .exists (&tmode))
12685 && (((unsigned HOST_WIDE_INT) const_op
12686 + (GET_MODE_MASK (tmode) >> 1) + 1)
12687 <= GET_MODE_MASK (tmode)))
12689 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12690 continue;
12693 /* Likewise if OP0 is a PLUS of a sign extension with a
12694 constant, which is usually represented with the PLUS
12695 between the shifts. */
12696 if (! unsigned_comparison_p
12697 && CONST_INT_P (XEXP (op0, 1))
12698 && GET_CODE (XEXP (op0, 0)) == PLUS
12699 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12700 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12701 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12702 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12703 .exists (&tmode))
12704 && (((unsigned HOST_WIDE_INT) const_op
12705 + (GET_MODE_MASK (tmode) >> 1) + 1)
12706 <= GET_MODE_MASK (tmode)))
12708 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12709 rtx add_const = XEXP (XEXP (op0, 0), 1);
12710 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12711 add_const, XEXP (op0, 1));
12713 op0 = simplify_gen_binary (PLUS, tmode,
12714 gen_lowpart (tmode, inner),
12715 new_const);
12716 continue;
12719 /* FALLTHROUGH */
12720 case LSHIFTRT:
12721 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12722 the low order N bits of FOO are known to be zero, we can do this
12723 by comparing FOO with C shifted left N bits so long as no
12724 overflow occurs. Even if the low order N bits of FOO aren't known
12725 to be zero, if the comparison is >= or < we can use the same
12726 optimization and for > or <= by setting all the low
12727 order N bits in the comparison constant. */
12728 if (CONST_INT_P (XEXP (op0, 1))
12729 && INTVAL (XEXP (op0, 1)) > 0
12730 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12731 && mode_width <= HOST_BITS_PER_WIDE_INT
12732 && (((unsigned HOST_WIDE_INT) const_op
12733 + (GET_CODE (op0) != LSHIFTRT
12734 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12735 + 1)
12736 : 0))
12737 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12739 unsigned HOST_WIDE_INT low_bits
12740 = (nonzero_bits (XEXP (op0, 0), mode)
12741 & ((HOST_WIDE_INT_1U
12742 << INTVAL (XEXP (op0, 1))) - 1));
12743 if (low_bits == 0 || !equality_comparison_p)
12745 /* If the shift was logical, then we must make the condition
12746 unsigned. */
12747 if (GET_CODE (op0) == LSHIFTRT)
12748 code = unsigned_condition (code);
12750 const_op = (unsigned HOST_WIDE_INT) const_op
12751 << INTVAL (XEXP (op0, 1));
12752 if (low_bits != 0
12753 && (code == GT || code == GTU
12754 || code == LE || code == LEU))
12755 const_op
12756 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12757 op1 = GEN_INT (const_op);
12758 op0 = XEXP (op0, 0);
12759 continue;
12763 /* If we are using this shift to extract just the sign bit, we
12764 can replace this with an LT or GE comparison. */
12765 if (const_op == 0
12766 && (equality_comparison_p || sign_bit_comparison_p)
12767 && CONST_INT_P (XEXP (op0, 1))
12768 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12770 op0 = XEXP (op0, 0);
12771 code = (code == NE || code == GT ? LT : GE);
12772 continue;
12774 break;
12776 default:
12777 break;
12780 break;
12783 /* Now make any compound operations involved in this comparison. Then,
12784 check for an outmost SUBREG on OP0 that is not doing anything or is
12785 paradoxical. The latter transformation must only be performed when
12786 it is known that the "extra" bits will be the same in op0 and op1 or
12787 that they don't matter. There are three cases to consider:
12789 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12790 care bits and we can assume they have any convenient value. So
12791 making the transformation is safe.
12793 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12794 In this case the upper bits of op0 are undefined. We should not make
12795 the simplification in that case as we do not know the contents of
12796 those bits.
12798 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12799 In that case we know those bits are zeros or ones. We must also be
12800 sure that they are the same as the upper bits of op1.
12802 We can never remove a SUBREG for a non-equality comparison because
12803 the sign bit is in a different place in the underlying object. */
12805 rtx_code op0_mco_code = SET;
12806 if (op1 == const0_rtx)
12807 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12809 op0 = make_compound_operation (op0, op0_mco_code);
12810 op1 = make_compound_operation (op1, SET);
12812 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12813 && is_int_mode (GET_MODE (op0), &mode)
12814 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12815 && (code == NE || code == EQ))
12817 if (paradoxical_subreg_p (op0))
12819 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12820 implemented. */
12821 if (REG_P (SUBREG_REG (op0)))
12823 op0 = SUBREG_REG (op0);
12824 op1 = gen_lowpart (inner_mode, op1);
12827 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12828 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12829 & ~GET_MODE_MASK (mode)) == 0)
12831 tem = gen_lowpart (inner_mode, op1);
12833 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12834 op0 = SUBREG_REG (op0), op1 = tem;
12838 /* We now do the opposite procedure: Some machines don't have compare
12839 insns in all modes. If OP0's mode is an integer mode smaller than a
12840 word and we can't do a compare in that mode, see if there is a larger
12841 mode for which we can do the compare. There are a number of cases in
12842 which we can use the wider mode. */
12844 if (is_int_mode (GET_MODE (op0), &mode)
12845 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12846 && ! have_insn_for (COMPARE, mode))
12847 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12849 tmode = tmode_iter.require ();
12850 if (!HWI_COMPUTABLE_MODE_P (tmode))
12851 break;
12852 if (have_insn_for (COMPARE, tmode))
12854 int zero_extended;
12856 /* If this is a test for negative, we can make an explicit
12857 test of the sign bit. Test this first so we can use
12858 a paradoxical subreg to extend OP0. */
12860 if (op1 == const0_rtx && (code == LT || code == GE)
12861 && HWI_COMPUTABLE_MODE_P (mode))
12863 unsigned HOST_WIDE_INT sign
12864 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12865 op0 = simplify_gen_binary (AND, tmode,
12866 gen_lowpart (tmode, op0),
12867 gen_int_mode (sign, tmode));
12868 code = (code == LT) ? NE : EQ;
12869 break;
12872 /* If the only nonzero bits in OP0 and OP1 are those in the
12873 narrower mode and this is an equality or unsigned comparison,
12874 we can use the wider mode. Similarly for sign-extended
12875 values, in which case it is true for all comparisons. */
12876 zero_extended = ((code == EQ || code == NE
12877 || code == GEU || code == GTU
12878 || code == LEU || code == LTU)
12879 && (nonzero_bits (op0, tmode)
12880 & ~GET_MODE_MASK (mode)) == 0
12881 && ((CONST_INT_P (op1)
12882 || (nonzero_bits (op1, tmode)
12883 & ~GET_MODE_MASK (mode)) == 0)));
12885 if (zero_extended
12886 || ((num_sign_bit_copies (op0, tmode)
12887 > (unsigned int) (GET_MODE_PRECISION (tmode)
12888 - GET_MODE_PRECISION (mode)))
12889 && (num_sign_bit_copies (op1, tmode)
12890 > (unsigned int) (GET_MODE_PRECISION (tmode)
12891 - GET_MODE_PRECISION (mode)))))
12893 /* If OP0 is an AND and we don't have an AND in MODE either,
12894 make a new AND in the proper mode. */
12895 if (GET_CODE (op0) == AND
12896 && !have_insn_for (AND, mode))
12897 op0 = simplify_gen_binary (AND, tmode,
12898 gen_lowpart (tmode,
12899 XEXP (op0, 0)),
12900 gen_lowpart (tmode,
12901 XEXP (op0, 1)));
12902 else
12904 if (zero_extended)
12906 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12907 op0, mode);
12908 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12909 op1, mode);
12911 else
12913 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12914 op0, mode);
12915 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12916 op1, mode);
12918 break;
12924 /* We may have changed the comparison operands. Re-canonicalize. */
12925 if (swap_commutative_operands_p (op0, op1))
12927 std::swap (op0, op1);
12928 code = swap_condition (code);
12931 /* If this machine only supports a subset of valid comparisons, see if we
12932 can convert an unsupported one into a supported one. */
12933 target_canonicalize_comparison (&code, &op0, &op1, 0);
12935 *pop0 = op0;
12936 *pop1 = op1;
12938 return code;
12941 /* Utility function for record_value_for_reg. Count number of
12942 rtxs in X. */
12943 static int
12944 count_rtxs (rtx x)
12946 enum rtx_code code = GET_CODE (x);
12947 const char *fmt;
12948 int i, j, ret = 1;
12950 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12951 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12953 rtx x0 = XEXP (x, 0);
12954 rtx x1 = XEXP (x, 1);
12956 if (x0 == x1)
12957 return 1 + 2 * count_rtxs (x0);
12959 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12960 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12961 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12962 return 2 + 2 * count_rtxs (x0)
12963 + count_rtxs (x == XEXP (x1, 0)
12964 ? XEXP (x1, 1) : XEXP (x1, 0));
12966 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12967 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12968 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12969 return 2 + 2 * count_rtxs (x1)
12970 + count_rtxs (x == XEXP (x0, 0)
12971 ? XEXP (x0, 1) : XEXP (x0, 0));
12974 fmt = GET_RTX_FORMAT (code);
12975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12976 if (fmt[i] == 'e')
12977 ret += count_rtxs (XEXP (x, i));
12978 else if (fmt[i] == 'E')
12979 for (j = 0; j < XVECLEN (x, i); j++)
12980 ret += count_rtxs (XVECEXP (x, i, j));
12982 return ret;
12985 /* Utility function for following routine. Called when X is part of a value
12986 being stored into last_set_value. Sets last_set_table_tick
12987 for each register mentioned. Similar to mention_regs in cse.c */
12989 static void
12990 update_table_tick (rtx x)
12992 enum rtx_code code = GET_CODE (x);
12993 const char *fmt = GET_RTX_FORMAT (code);
12994 int i, j;
12996 if (code == REG)
12998 unsigned int regno = REGNO (x);
12999 unsigned int endregno = END_REGNO (x);
13000 unsigned int r;
13002 for (r = regno; r < endregno; r++)
13004 reg_stat_type *rsp = &reg_stat[r];
13005 rsp->last_set_table_tick = label_tick;
13008 return;
13011 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13012 if (fmt[i] == 'e')
13014 /* Check for identical subexpressions. If x contains
13015 identical subexpression we only have to traverse one of
13016 them. */
13017 if (i == 0 && ARITHMETIC_P (x))
13019 /* Note that at this point x1 has already been
13020 processed. */
13021 rtx x0 = XEXP (x, 0);
13022 rtx x1 = XEXP (x, 1);
13024 /* If x0 and x1 are identical then there is no need to
13025 process x0. */
13026 if (x0 == x1)
13027 break;
13029 /* If x0 is identical to a subexpression of x1 then while
13030 processing x1, x0 has already been processed. Thus we
13031 are done with x. */
13032 if (ARITHMETIC_P (x1)
13033 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13034 break;
13036 /* If x1 is identical to a subexpression of x0 then we
13037 still have to process the rest of x0. */
13038 if (ARITHMETIC_P (x0)
13039 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13041 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13042 break;
13046 update_table_tick (XEXP (x, i));
13048 else if (fmt[i] == 'E')
13049 for (j = 0; j < XVECLEN (x, i); j++)
13050 update_table_tick (XVECEXP (x, i, j));
13053 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13054 are saying that the register is clobbered and we no longer know its
13055 value. If INSN is zero, don't update reg_stat[].last_set; this is
13056 only permitted with VALUE also zero and is used to invalidate the
13057 register. */
13059 static void
13060 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13062 unsigned int regno = REGNO (reg);
13063 unsigned int endregno = END_REGNO (reg);
13064 unsigned int i;
13065 reg_stat_type *rsp;
13067 /* If VALUE contains REG and we have a previous value for REG, substitute
13068 the previous value. */
13069 if (value && insn && reg_overlap_mentioned_p (reg, value))
13071 rtx tem;
13073 /* Set things up so get_last_value is allowed to see anything set up to
13074 our insn. */
13075 subst_low_luid = DF_INSN_LUID (insn);
13076 tem = get_last_value (reg);
13078 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13079 it isn't going to be useful and will take a lot of time to process,
13080 so just use the CLOBBER. */
13082 if (tem)
13084 if (ARITHMETIC_P (tem)
13085 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13086 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13087 tem = XEXP (tem, 0);
13088 else if (count_occurrences (value, reg, 1) >= 2)
13090 /* If there are two or more occurrences of REG in VALUE,
13091 prevent the value from growing too much. */
13092 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13093 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13096 value = replace_rtx (copy_rtx (value), reg, tem);
13100 /* For each register modified, show we don't know its value, that
13101 we don't know about its bitwise content, that its value has been
13102 updated, and that we don't know the location of the death of the
13103 register. */
13104 for (i = regno; i < endregno; i++)
13106 rsp = &reg_stat[i];
13108 if (insn)
13109 rsp->last_set = insn;
13111 rsp->last_set_value = 0;
13112 rsp->last_set_mode = VOIDmode;
13113 rsp->last_set_nonzero_bits = 0;
13114 rsp->last_set_sign_bit_copies = 0;
13115 rsp->last_death = 0;
13116 rsp->truncated_to_mode = VOIDmode;
13119 /* Mark registers that are being referenced in this value. */
13120 if (value)
13121 update_table_tick (value);
13123 /* Now update the status of each register being set.
13124 If someone is using this register in this block, set this register
13125 to invalid since we will get confused between the two lives in this
13126 basic block. This makes using this register always invalid. In cse, we
13127 scan the table to invalidate all entries using this register, but this
13128 is too much work for us. */
13130 for (i = regno; i < endregno; i++)
13132 rsp = &reg_stat[i];
13133 rsp->last_set_label = label_tick;
13134 if (!insn
13135 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13136 rsp->last_set_invalid = 1;
13137 else
13138 rsp->last_set_invalid = 0;
13141 /* The value being assigned might refer to X (like in "x++;"). In that
13142 case, we must replace it with (clobber (const_int 0)) to prevent
13143 infinite loops. */
13144 rsp = &reg_stat[regno];
13145 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13147 value = copy_rtx (value);
13148 if (!get_last_value_validate (&value, insn, label_tick, 1))
13149 value = 0;
13152 /* For the main register being modified, update the value, the mode, the
13153 nonzero bits, and the number of sign bit copies. */
13155 rsp->last_set_value = value;
13157 if (value)
13159 machine_mode mode = GET_MODE (reg);
13160 subst_low_luid = DF_INSN_LUID (insn);
13161 rsp->last_set_mode = mode;
13162 if (GET_MODE_CLASS (mode) == MODE_INT
13163 && HWI_COMPUTABLE_MODE_P (mode))
13164 mode = nonzero_bits_mode;
13165 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13166 rsp->last_set_sign_bit_copies
13167 = num_sign_bit_copies (value, GET_MODE (reg));
13171 /* Called via note_stores from record_dead_and_set_regs to handle one
13172 SET or CLOBBER in an insn. DATA is the instruction in which the
13173 set is occurring. */
13175 static void
13176 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13178 rtx_insn *record_dead_insn = (rtx_insn *) data;
13180 if (GET_CODE (dest) == SUBREG)
13181 dest = SUBREG_REG (dest);
13183 if (!record_dead_insn)
13185 if (REG_P (dest))
13186 record_value_for_reg (dest, NULL, NULL_RTX);
13187 return;
13190 if (REG_P (dest))
13192 /* If we are setting the whole register, we know its value. Otherwise
13193 show that we don't know the value. We can handle SUBREG in
13194 some cases. */
13195 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13196 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13197 else if (GET_CODE (setter) == SET
13198 && GET_CODE (SET_DEST (setter)) == SUBREG
13199 && SUBREG_REG (SET_DEST (setter)) == dest
13200 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13201 && subreg_lowpart_p (SET_DEST (setter)))
13202 record_value_for_reg (dest, record_dead_insn,
13203 gen_lowpart (GET_MODE (dest),
13204 SET_SRC (setter)));
13205 else
13206 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13208 else if (MEM_P (dest)
13209 /* Ignore pushes, they clobber nothing. */
13210 && ! push_operand (dest, GET_MODE (dest)))
13211 mem_last_set = DF_INSN_LUID (record_dead_insn);
13214 /* Update the records of when each REG was most recently set or killed
13215 for the things done by INSN. This is the last thing done in processing
13216 INSN in the combiner loop.
13218 We update reg_stat[], in particular fields last_set, last_set_value,
13219 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13220 last_death, and also the similar information mem_last_set (which insn
13221 most recently modified memory) and last_call_luid (which insn was the
13222 most recent subroutine call). */
13224 static void
13225 record_dead_and_set_regs (rtx_insn *insn)
13227 rtx link;
13228 unsigned int i;
13230 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13232 if (REG_NOTE_KIND (link) == REG_DEAD
13233 && REG_P (XEXP (link, 0)))
13235 unsigned int regno = REGNO (XEXP (link, 0));
13236 unsigned int endregno = END_REGNO (XEXP (link, 0));
13238 for (i = regno; i < endregno; i++)
13240 reg_stat_type *rsp;
13242 rsp = &reg_stat[i];
13243 rsp->last_death = insn;
13246 else if (REG_NOTE_KIND (link) == REG_INC)
13247 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13250 if (CALL_P (insn))
13252 hard_reg_set_iterator hrsi;
13253 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13255 reg_stat_type *rsp;
13257 rsp = &reg_stat[i];
13258 rsp->last_set_invalid = 1;
13259 rsp->last_set = insn;
13260 rsp->last_set_value = 0;
13261 rsp->last_set_mode = VOIDmode;
13262 rsp->last_set_nonzero_bits = 0;
13263 rsp->last_set_sign_bit_copies = 0;
13264 rsp->last_death = 0;
13265 rsp->truncated_to_mode = VOIDmode;
13268 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13270 /* We can't combine into a call pattern. Remember, though, that
13271 the return value register is set at this LUID. We could
13272 still replace a register with the return value from the
13273 wrong subroutine call! */
13274 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13276 else
13277 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13280 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13281 register present in the SUBREG, so for each such SUBREG go back and
13282 adjust nonzero and sign bit information of the registers that are
13283 known to have some zero/sign bits set.
13285 This is needed because when combine blows the SUBREGs away, the
13286 information on zero/sign bits is lost and further combines can be
13287 missed because of that. */
13289 static void
13290 record_promoted_value (rtx_insn *insn, rtx subreg)
13292 struct insn_link *links;
13293 rtx set;
13294 unsigned int regno = REGNO (SUBREG_REG (subreg));
13295 machine_mode mode = GET_MODE (subreg);
13297 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13298 return;
13300 for (links = LOG_LINKS (insn); links;)
13302 reg_stat_type *rsp;
13304 insn = links->insn;
13305 set = single_set (insn);
13307 if (! set || !REG_P (SET_DEST (set))
13308 || REGNO (SET_DEST (set)) != regno
13309 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13311 links = links->next;
13312 continue;
13315 rsp = &reg_stat[regno];
13316 if (rsp->last_set == insn)
13318 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13319 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13322 if (REG_P (SET_SRC (set)))
13324 regno = REGNO (SET_SRC (set));
13325 links = LOG_LINKS (insn);
13327 else
13328 break;
13332 /* Check if X, a register, is known to contain a value already
13333 truncated to MODE. In this case we can use a subreg to refer to
13334 the truncated value even though in the generic case we would need
13335 an explicit truncation. */
13337 static bool
13338 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13340 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13341 machine_mode truncated = rsp->truncated_to_mode;
13343 if (truncated == 0
13344 || rsp->truncation_label < label_tick_ebb_start)
13345 return false;
13346 if (!partial_subreg_p (mode, truncated))
13347 return true;
13348 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13349 return true;
13350 return false;
13353 /* If X is a hard reg or a subreg record the mode that the register is
13354 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13355 able to turn a truncate into a subreg using this information. Return true
13356 if traversing X is complete. */
13358 static bool
13359 record_truncated_value (rtx x)
13361 machine_mode truncated_mode;
13362 reg_stat_type *rsp;
13364 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13366 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13367 truncated_mode = GET_MODE (x);
13369 if (!partial_subreg_p (truncated_mode, original_mode))
13370 return true;
13372 truncated_mode = GET_MODE (x);
13373 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13374 return true;
13376 x = SUBREG_REG (x);
13378 /* ??? For hard-regs we now record everything. We might be able to
13379 optimize this using last_set_mode. */
13380 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13381 truncated_mode = GET_MODE (x);
13382 else
13383 return false;
13385 rsp = &reg_stat[REGNO (x)];
13386 if (rsp->truncated_to_mode == 0
13387 || rsp->truncation_label < label_tick_ebb_start
13388 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13390 rsp->truncated_to_mode = truncated_mode;
13391 rsp->truncation_label = label_tick;
13394 return true;
13397 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13398 the modes they are used in. This can help truning TRUNCATEs into
13399 SUBREGs. */
13401 static void
13402 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13404 subrtx_var_iterator::array_type array;
13405 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13406 if (record_truncated_value (*iter))
13407 iter.skip_subrtxes ();
13410 /* Scan X for promoted SUBREGs. For each one found,
13411 note what it implies to the registers used in it. */
13413 static void
13414 check_promoted_subreg (rtx_insn *insn, rtx x)
13416 if (GET_CODE (x) == SUBREG
13417 && SUBREG_PROMOTED_VAR_P (x)
13418 && REG_P (SUBREG_REG (x)))
13419 record_promoted_value (insn, x);
13420 else
13422 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13423 int i, j;
13425 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13426 switch (format[i])
13428 case 'e':
13429 check_promoted_subreg (insn, XEXP (x, i));
13430 break;
13431 case 'V':
13432 case 'E':
13433 if (XVEC (x, i) != 0)
13434 for (j = 0; j < XVECLEN (x, i); j++)
13435 check_promoted_subreg (insn, XVECEXP (x, i, j));
13436 break;
13441 /* Verify that all the registers and memory references mentioned in *LOC are
13442 still valid. *LOC was part of a value set in INSN when label_tick was
13443 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13444 the invalid references with (clobber (const_int 0)) and return 1. This
13445 replacement is useful because we often can get useful information about
13446 the form of a value (e.g., if it was produced by a shift that always
13447 produces -1 or 0) even though we don't know exactly what registers it
13448 was produced from. */
13450 static int
13451 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13453 rtx x = *loc;
13454 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13455 int len = GET_RTX_LENGTH (GET_CODE (x));
13456 int i, j;
13458 if (REG_P (x))
13460 unsigned int regno = REGNO (x);
13461 unsigned int endregno = END_REGNO (x);
13462 unsigned int j;
13464 for (j = regno; j < endregno; j++)
13466 reg_stat_type *rsp = &reg_stat[j];
13467 if (rsp->last_set_invalid
13468 /* If this is a pseudo-register that was only set once and not
13469 live at the beginning of the function, it is always valid. */
13470 || (! (regno >= FIRST_PSEUDO_REGISTER
13471 && regno < reg_n_sets_max
13472 && REG_N_SETS (regno) == 1
13473 && (!REGNO_REG_SET_P
13474 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13475 regno)))
13476 && rsp->last_set_label > tick))
13478 if (replace)
13479 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13480 return replace;
13484 return 1;
13486 /* If this is a memory reference, make sure that there were no stores after
13487 it that might have clobbered the value. We don't have alias info, so we
13488 assume any store invalidates it. Moreover, we only have local UIDs, so
13489 we also assume that there were stores in the intervening basic blocks. */
13490 else if (MEM_P (x) && !MEM_READONLY_P (x)
13491 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13493 if (replace)
13494 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13495 return replace;
13498 for (i = 0; i < len; i++)
13500 if (fmt[i] == 'e')
13502 /* Check for identical subexpressions. If x contains
13503 identical subexpression we only have to traverse one of
13504 them. */
13505 if (i == 1 && ARITHMETIC_P (x))
13507 /* Note that at this point x0 has already been checked
13508 and found valid. */
13509 rtx x0 = XEXP (x, 0);
13510 rtx x1 = XEXP (x, 1);
13512 /* If x0 and x1 are identical then x is also valid. */
13513 if (x0 == x1)
13514 return 1;
13516 /* If x1 is identical to a subexpression of x0 then
13517 while checking x0, x1 has already been checked. Thus
13518 it is valid and so as x. */
13519 if (ARITHMETIC_P (x0)
13520 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13521 return 1;
13523 /* If x0 is identical to a subexpression of x1 then x is
13524 valid iff the rest of x1 is valid. */
13525 if (ARITHMETIC_P (x1)
13526 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13527 return
13528 get_last_value_validate (&XEXP (x1,
13529 x0 == XEXP (x1, 0) ? 1 : 0),
13530 insn, tick, replace);
13533 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13534 replace) == 0)
13535 return 0;
13537 else if (fmt[i] == 'E')
13538 for (j = 0; j < XVECLEN (x, i); j++)
13539 if (get_last_value_validate (&XVECEXP (x, i, j),
13540 insn, tick, replace) == 0)
13541 return 0;
13544 /* If we haven't found a reason for it to be invalid, it is valid. */
13545 return 1;
13548 /* Get the last value assigned to X, if known. Some registers
13549 in the value may be replaced with (clobber (const_int 0)) if their value
13550 is known longer known reliably. */
13552 static rtx
13553 get_last_value (const_rtx x)
13555 unsigned int regno;
13556 rtx value;
13557 reg_stat_type *rsp;
13559 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13560 then convert it to the desired mode. If this is a paradoxical SUBREG,
13561 we cannot predict what values the "extra" bits might have. */
13562 if (GET_CODE (x) == SUBREG
13563 && subreg_lowpart_p (x)
13564 && !paradoxical_subreg_p (x)
13565 && (value = get_last_value (SUBREG_REG (x))) != 0)
13566 return gen_lowpart (GET_MODE (x), value);
13568 if (!REG_P (x))
13569 return 0;
13571 regno = REGNO (x);
13572 rsp = &reg_stat[regno];
13573 value = rsp->last_set_value;
13575 /* If we don't have a value, or if it isn't for this basic block and
13576 it's either a hard register, set more than once, or it's a live
13577 at the beginning of the function, return 0.
13579 Because if it's not live at the beginning of the function then the reg
13580 is always set before being used (is never used without being set).
13581 And, if it's set only once, and it's always set before use, then all
13582 uses must have the same last value, even if it's not from this basic
13583 block. */
13585 if (value == 0
13586 || (rsp->last_set_label < label_tick_ebb_start
13587 && (regno < FIRST_PSEUDO_REGISTER
13588 || regno >= reg_n_sets_max
13589 || REG_N_SETS (regno) != 1
13590 || REGNO_REG_SET_P
13591 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13592 return 0;
13594 /* If the value was set in a later insn than the ones we are processing,
13595 we can't use it even if the register was only set once. */
13596 if (rsp->last_set_label == label_tick
13597 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13598 return 0;
13600 /* If fewer bits were set than what we are asked for now, we cannot use
13601 the value. */
13602 if (GET_MODE_PRECISION (rsp->last_set_mode)
13603 < GET_MODE_PRECISION (GET_MODE (x)))
13604 return 0;
13606 /* If the value has all its registers valid, return it. */
13607 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13608 return value;
13610 /* Otherwise, make a copy and replace any invalid register with
13611 (clobber (const_int 0)). If that fails for some reason, return 0. */
13613 value = copy_rtx (value);
13614 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13615 return value;
13617 return 0;
13620 /* Return nonzero if expression X refers to a REG or to memory
13621 that is set in an instruction more recent than FROM_LUID. */
13623 static int
13624 use_crosses_set_p (const_rtx x, int from_luid)
13626 const char *fmt;
13627 int i;
13628 enum rtx_code code = GET_CODE (x);
13630 if (code == REG)
13632 unsigned int regno = REGNO (x);
13633 unsigned endreg = END_REGNO (x);
13635 #ifdef PUSH_ROUNDING
13636 /* Don't allow uses of the stack pointer to be moved,
13637 because we don't know whether the move crosses a push insn. */
13638 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13639 return 1;
13640 #endif
13641 for (; regno < endreg; regno++)
13643 reg_stat_type *rsp = &reg_stat[regno];
13644 if (rsp->last_set
13645 && rsp->last_set_label == label_tick
13646 && DF_INSN_LUID (rsp->last_set) > from_luid)
13647 return 1;
13649 return 0;
13652 if (code == MEM && mem_last_set > from_luid)
13653 return 1;
13655 fmt = GET_RTX_FORMAT (code);
13657 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13659 if (fmt[i] == 'E')
13661 int j;
13662 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13663 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13664 return 1;
13666 else if (fmt[i] == 'e'
13667 && use_crosses_set_p (XEXP (x, i), from_luid))
13668 return 1;
13670 return 0;
13673 /* Define three variables used for communication between the following
13674 routines. */
13676 static unsigned int reg_dead_regno, reg_dead_endregno;
13677 static int reg_dead_flag;
13679 /* Function called via note_stores from reg_dead_at_p.
13681 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13682 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13684 static void
13685 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13687 unsigned int regno, endregno;
13689 if (!REG_P (dest))
13690 return;
13692 regno = REGNO (dest);
13693 endregno = END_REGNO (dest);
13694 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13695 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13698 /* Return nonzero if REG is known to be dead at INSN.
13700 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13701 referencing REG, it is dead. If we hit a SET referencing REG, it is
13702 live. Otherwise, see if it is live or dead at the start of the basic
13703 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13704 must be assumed to be always live. */
13706 static int
13707 reg_dead_at_p (rtx reg, rtx_insn *insn)
13709 basic_block block;
13710 unsigned int i;
13712 /* Set variables for reg_dead_at_p_1. */
13713 reg_dead_regno = REGNO (reg);
13714 reg_dead_endregno = END_REGNO (reg);
13716 reg_dead_flag = 0;
13718 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13719 we allow the machine description to decide whether use-and-clobber
13720 patterns are OK. */
13721 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13723 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13724 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13725 return 0;
13728 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13729 beginning of basic block. */
13730 block = BLOCK_FOR_INSN (insn);
13731 for (;;)
13733 if (INSN_P (insn))
13735 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13736 return 1;
13738 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13739 if (reg_dead_flag)
13740 return reg_dead_flag == 1 ? 1 : 0;
13742 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13743 return 1;
13746 if (insn == BB_HEAD (block))
13747 break;
13749 insn = PREV_INSN (insn);
13752 /* Look at live-in sets for the basic block that we were in. */
13753 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13754 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13755 return 0;
13757 return 1;
13760 /* Note hard registers in X that are used. */
13762 static void
13763 mark_used_regs_combine (rtx x)
13765 RTX_CODE code = GET_CODE (x);
13766 unsigned int regno;
13767 int i;
13769 switch (code)
13771 case LABEL_REF:
13772 case SYMBOL_REF:
13773 case CONST:
13774 CASE_CONST_ANY:
13775 case PC:
13776 case ADDR_VEC:
13777 case ADDR_DIFF_VEC:
13778 case ASM_INPUT:
13779 /* CC0 must die in the insn after it is set, so we don't need to take
13780 special note of it here. */
13781 case CC0:
13782 return;
13784 case CLOBBER:
13785 /* If we are clobbering a MEM, mark any hard registers inside the
13786 address as used. */
13787 if (MEM_P (XEXP (x, 0)))
13788 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13789 return;
13791 case REG:
13792 regno = REGNO (x);
13793 /* A hard reg in a wide mode may really be multiple registers.
13794 If so, mark all of them just like the first. */
13795 if (regno < FIRST_PSEUDO_REGISTER)
13797 /* None of this applies to the stack, frame or arg pointers. */
13798 if (regno == STACK_POINTER_REGNUM
13799 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13800 && regno == HARD_FRAME_POINTER_REGNUM)
13801 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13802 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13803 || regno == FRAME_POINTER_REGNUM)
13804 return;
13806 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13808 return;
13810 case SET:
13812 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13813 the address. */
13814 rtx testreg = SET_DEST (x);
13816 while (GET_CODE (testreg) == SUBREG
13817 || GET_CODE (testreg) == ZERO_EXTRACT
13818 || GET_CODE (testreg) == STRICT_LOW_PART)
13819 testreg = XEXP (testreg, 0);
13821 if (MEM_P (testreg))
13822 mark_used_regs_combine (XEXP (testreg, 0));
13824 mark_used_regs_combine (SET_SRC (x));
13826 return;
13828 default:
13829 break;
13832 /* Recursively scan the operands of this expression. */
13835 const char *fmt = GET_RTX_FORMAT (code);
13837 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13839 if (fmt[i] == 'e')
13840 mark_used_regs_combine (XEXP (x, i));
13841 else if (fmt[i] == 'E')
13843 int j;
13845 for (j = 0; j < XVECLEN (x, i); j++)
13846 mark_used_regs_combine (XVECEXP (x, i, j));
13852 /* Remove register number REGNO from the dead registers list of INSN.
13854 Return the note used to record the death, if there was one. */
13857 remove_death (unsigned int regno, rtx_insn *insn)
13859 rtx note = find_regno_note (insn, REG_DEAD, regno);
13861 if (note)
13862 remove_note (insn, note);
13864 return note;
13867 /* For each register (hardware or pseudo) used within expression X, if its
13868 death is in an instruction with luid between FROM_LUID (inclusive) and
13869 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13870 list headed by PNOTES.
13872 That said, don't move registers killed by maybe_kill_insn.
13874 This is done when X is being merged by combination into TO_INSN. These
13875 notes will then be distributed as needed. */
13877 static void
13878 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13879 rtx *pnotes)
13881 const char *fmt;
13882 int len, i;
13883 enum rtx_code code = GET_CODE (x);
13885 if (code == REG)
13887 unsigned int regno = REGNO (x);
13888 rtx_insn *where_dead = reg_stat[regno].last_death;
13890 /* Don't move the register if it gets killed in between from and to. */
13891 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13892 && ! reg_referenced_p (x, maybe_kill_insn))
13893 return;
13895 if (where_dead
13896 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13897 && DF_INSN_LUID (where_dead) >= from_luid
13898 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13900 rtx note = remove_death (regno, where_dead);
13902 /* It is possible for the call above to return 0. This can occur
13903 when last_death points to I2 or I1 that we combined with.
13904 In that case make a new note.
13906 We must also check for the case where X is a hard register
13907 and NOTE is a death note for a range of hard registers
13908 including X. In that case, we must put REG_DEAD notes for
13909 the remaining registers in place of NOTE. */
13911 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13912 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13914 unsigned int deadregno = REGNO (XEXP (note, 0));
13915 unsigned int deadend = END_REGNO (XEXP (note, 0));
13916 unsigned int ourend = END_REGNO (x);
13917 unsigned int i;
13919 for (i = deadregno; i < deadend; i++)
13920 if (i < regno || i >= ourend)
13921 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13924 /* If we didn't find any note, or if we found a REG_DEAD note that
13925 covers only part of the given reg, and we have a multi-reg hard
13926 register, then to be safe we must check for REG_DEAD notes
13927 for each register other than the first. They could have
13928 their own REG_DEAD notes lying around. */
13929 else if ((note == 0
13930 || (note != 0
13931 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13932 GET_MODE (x))))
13933 && regno < FIRST_PSEUDO_REGISTER
13934 && REG_NREGS (x) > 1)
13936 unsigned int ourend = END_REGNO (x);
13937 unsigned int i, offset;
13938 rtx oldnotes = 0;
13940 if (note)
13941 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13942 else
13943 offset = 1;
13945 for (i = regno + offset; i < ourend; i++)
13946 move_deaths (regno_reg_rtx[i],
13947 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13950 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13952 XEXP (note, 1) = *pnotes;
13953 *pnotes = note;
13955 else
13956 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13959 return;
13962 else if (GET_CODE (x) == SET)
13964 rtx dest = SET_DEST (x);
13966 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13968 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13969 that accesses one word of a multi-word item, some
13970 piece of everything register in the expression is used by
13971 this insn, so remove any old death. */
13972 /* ??? So why do we test for equality of the sizes? */
13974 if (GET_CODE (dest) == ZERO_EXTRACT
13975 || GET_CODE (dest) == STRICT_LOW_PART
13976 || (GET_CODE (dest) == SUBREG
13977 && (((GET_MODE_SIZE (GET_MODE (dest))
13978 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13979 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13980 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13982 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13983 return;
13986 /* If this is some other SUBREG, we know it replaces the entire
13987 value, so use that as the destination. */
13988 if (GET_CODE (dest) == SUBREG)
13989 dest = SUBREG_REG (dest);
13991 /* If this is a MEM, adjust deaths of anything used in the address.
13992 For a REG (the only other possibility), the entire value is
13993 being replaced so the old value is not used in this insn. */
13995 if (MEM_P (dest))
13996 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13997 to_insn, pnotes);
13998 return;
14001 else if (GET_CODE (x) == CLOBBER)
14002 return;
14004 len = GET_RTX_LENGTH (code);
14005 fmt = GET_RTX_FORMAT (code);
14007 for (i = 0; i < len; i++)
14009 if (fmt[i] == 'E')
14011 int j;
14012 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14013 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14014 to_insn, pnotes);
14016 else if (fmt[i] == 'e')
14017 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14021 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14022 pattern of an insn. X must be a REG. */
14024 static int
14025 reg_bitfield_target_p (rtx x, rtx body)
14027 int i;
14029 if (GET_CODE (body) == SET)
14031 rtx dest = SET_DEST (body);
14032 rtx target;
14033 unsigned int regno, tregno, endregno, endtregno;
14035 if (GET_CODE (dest) == ZERO_EXTRACT)
14036 target = XEXP (dest, 0);
14037 else if (GET_CODE (dest) == STRICT_LOW_PART)
14038 target = SUBREG_REG (XEXP (dest, 0));
14039 else
14040 return 0;
14042 if (GET_CODE (target) == SUBREG)
14043 target = SUBREG_REG (target);
14045 if (!REG_P (target))
14046 return 0;
14048 tregno = REGNO (target), regno = REGNO (x);
14049 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14050 return target == x;
14052 endtregno = end_hard_regno (GET_MODE (target), tregno);
14053 endregno = end_hard_regno (GET_MODE (x), regno);
14055 return endregno > tregno && regno < endtregno;
14058 else if (GET_CODE (body) == PARALLEL)
14059 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14060 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14061 return 1;
14063 return 0;
14066 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14067 as appropriate. I3 and I2 are the insns resulting from the combination
14068 insns including FROM (I2 may be zero).
14070 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14071 not need REG_DEAD notes because they are being substituted for. This
14072 saves searching in the most common cases.
14074 Each note in the list is either ignored or placed on some insns, depending
14075 on the type of note. */
14077 static void
14078 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14079 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14081 rtx note, next_note;
14082 rtx tem_note;
14083 rtx_insn *tem_insn;
14085 for (note = notes; note; note = next_note)
14087 rtx_insn *place = 0, *place2 = 0;
14089 next_note = XEXP (note, 1);
14090 switch (REG_NOTE_KIND (note))
14092 case REG_BR_PROB:
14093 case REG_BR_PRED:
14094 /* Doesn't matter much where we put this, as long as it's somewhere.
14095 It is preferable to keep these notes on branches, which is most
14096 likely to be i3. */
14097 place = i3;
14098 break;
14100 case REG_NON_LOCAL_GOTO:
14101 if (JUMP_P (i3))
14102 place = i3;
14103 else
14105 gcc_assert (i2 && JUMP_P (i2));
14106 place = i2;
14108 break;
14110 case REG_EH_REGION:
14111 /* These notes must remain with the call or trapping instruction. */
14112 if (CALL_P (i3))
14113 place = i3;
14114 else if (i2 && CALL_P (i2))
14115 place = i2;
14116 else
14118 gcc_assert (cfun->can_throw_non_call_exceptions);
14119 if (may_trap_p (i3))
14120 place = i3;
14121 else if (i2 && may_trap_p (i2))
14122 place = i2;
14123 /* ??? Otherwise assume we've combined things such that we
14124 can now prove that the instructions can't trap. Drop the
14125 note in this case. */
14127 break;
14129 case REG_ARGS_SIZE:
14130 /* ??? How to distribute between i3-i1. Assume i3 contains the
14131 entire adjustment. Assert i3 contains at least some adjust. */
14132 if (!noop_move_p (i3))
14134 int old_size, args_size = INTVAL (XEXP (note, 0));
14135 /* fixup_args_size_notes looks at REG_NORETURN note,
14136 so ensure the note is placed there first. */
14137 if (CALL_P (i3))
14139 rtx *np;
14140 for (np = &next_note; *np; np = &XEXP (*np, 1))
14141 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14143 rtx n = *np;
14144 *np = XEXP (n, 1);
14145 XEXP (n, 1) = REG_NOTES (i3);
14146 REG_NOTES (i3) = n;
14147 break;
14150 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14151 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14152 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14153 gcc_assert (old_size != args_size
14154 || (CALL_P (i3)
14155 && !ACCUMULATE_OUTGOING_ARGS
14156 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14158 break;
14160 case REG_NORETURN:
14161 case REG_SETJMP:
14162 case REG_TM:
14163 case REG_CALL_DECL:
14164 /* These notes must remain with the call. It should not be
14165 possible for both I2 and I3 to be a call. */
14166 if (CALL_P (i3))
14167 place = i3;
14168 else
14170 gcc_assert (i2 && CALL_P (i2));
14171 place = i2;
14173 break;
14175 case REG_UNUSED:
14176 /* Any clobbers for i3 may still exist, and so we must process
14177 REG_UNUSED notes from that insn.
14179 Any clobbers from i2 or i1 can only exist if they were added by
14180 recog_for_combine. In that case, recog_for_combine created the
14181 necessary REG_UNUSED notes. Trying to keep any original
14182 REG_UNUSED notes from these insns can cause incorrect output
14183 if it is for the same register as the original i3 dest.
14184 In that case, we will notice that the register is set in i3,
14185 and then add a REG_UNUSED note for the destination of i3, which
14186 is wrong. However, it is possible to have REG_UNUSED notes from
14187 i2 or i1 for register which were both used and clobbered, so
14188 we keep notes from i2 or i1 if they will turn into REG_DEAD
14189 notes. */
14191 /* If this register is set or clobbered in I3, put the note there
14192 unless there is one already. */
14193 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14195 if (from_insn != i3)
14196 break;
14198 if (! (REG_P (XEXP (note, 0))
14199 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14200 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14201 place = i3;
14203 /* Otherwise, if this register is used by I3, then this register
14204 now dies here, so we must put a REG_DEAD note here unless there
14205 is one already. */
14206 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14207 && ! (REG_P (XEXP (note, 0))
14208 ? find_regno_note (i3, REG_DEAD,
14209 REGNO (XEXP (note, 0)))
14210 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14212 PUT_REG_NOTE_KIND (note, REG_DEAD);
14213 place = i3;
14215 break;
14217 case REG_EQUAL:
14218 case REG_EQUIV:
14219 case REG_NOALIAS:
14220 /* These notes say something about results of an insn. We can
14221 only support them if they used to be on I3 in which case they
14222 remain on I3. Otherwise they are ignored.
14224 If the note refers to an expression that is not a constant, we
14225 must also ignore the note since we cannot tell whether the
14226 equivalence is still true. It might be possible to do
14227 slightly better than this (we only have a problem if I2DEST
14228 or I1DEST is present in the expression), but it doesn't
14229 seem worth the trouble. */
14231 if (from_insn == i3
14232 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14233 place = i3;
14234 break;
14236 case REG_INC:
14237 /* These notes say something about how a register is used. They must
14238 be present on any use of the register in I2 or I3. */
14239 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14240 place = i3;
14242 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14244 if (place)
14245 place2 = i2;
14246 else
14247 place = i2;
14249 break;
14251 case REG_LABEL_TARGET:
14252 case REG_LABEL_OPERAND:
14253 /* This can show up in several ways -- either directly in the
14254 pattern, or hidden off in the constant pool with (or without?)
14255 a REG_EQUAL note. */
14256 /* ??? Ignore the without-reg_equal-note problem for now. */
14257 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14258 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14259 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14260 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14261 place = i3;
14263 if (i2
14264 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14265 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14266 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14267 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14269 if (place)
14270 place2 = i2;
14271 else
14272 place = i2;
14275 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14276 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14277 there. */
14278 if (place && JUMP_P (place)
14279 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14280 && (JUMP_LABEL (place) == NULL
14281 || JUMP_LABEL (place) == XEXP (note, 0)))
14283 rtx label = JUMP_LABEL (place);
14285 if (!label)
14286 JUMP_LABEL (place) = XEXP (note, 0);
14287 else if (LABEL_P (label))
14288 LABEL_NUSES (label)--;
14291 if (place2 && JUMP_P (place2)
14292 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14293 && (JUMP_LABEL (place2) == NULL
14294 || JUMP_LABEL (place2) == XEXP (note, 0)))
14296 rtx label = JUMP_LABEL (place2);
14298 if (!label)
14299 JUMP_LABEL (place2) = XEXP (note, 0);
14300 else if (LABEL_P (label))
14301 LABEL_NUSES (label)--;
14302 place2 = 0;
14304 break;
14306 case REG_NONNEG:
14307 /* This note says something about the value of a register prior
14308 to the execution of an insn. It is too much trouble to see
14309 if the note is still correct in all situations. It is better
14310 to simply delete it. */
14311 break;
14313 case REG_DEAD:
14314 /* If we replaced the right hand side of FROM_INSN with a
14315 REG_EQUAL note, the original use of the dying register
14316 will not have been combined into I3 and I2. In such cases,
14317 FROM_INSN is guaranteed to be the first of the combined
14318 instructions, so we simply need to search back before
14319 FROM_INSN for the previous use or set of this register,
14320 then alter the notes there appropriately.
14322 If the register is used as an input in I3, it dies there.
14323 Similarly for I2, if it is nonzero and adjacent to I3.
14325 If the register is not used as an input in either I3 or I2
14326 and it is not one of the registers we were supposed to eliminate,
14327 there are two possibilities. We might have a non-adjacent I2
14328 or we might have somehow eliminated an additional register
14329 from a computation. For example, we might have had A & B where
14330 we discover that B will always be zero. In this case we will
14331 eliminate the reference to A.
14333 In both cases, we must search to see if we can find a previous
14334 use of A and put the death note there. */
14336 if (from_insn
14337 && from_insn == i2mod
14338 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14339 tem_insn = from_insn;
14340 else
14342 if (from_insn
14343 && CALL_P (from_insn)
14344 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14345 place = from_insn;
14346 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14347 place = i3;
14348 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14349 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14350 place = i2;
14351 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14352 && !(i2mod
14353 && reg_overlap_mentioned_p (XEXP (note, 0),
14354 i2mod_old_rhs)))
14355 || rtx_equal_p (XEXP (note, 0), elim_i1)
14356 || rtx_equal_p (XEXP (note, 0), elim_i0))
14357 break;
14358 tem_insn = i3;
14359 /* If the new I2 sets the same register that is marked dead
14360 in the note, we do not know where to put the note.
14361 Give up. */
14362 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14363 break;
14366 if (place == 0)
14368 basic_block bb = this_basic_block;
14370 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14372 if (!NONDEBUG_INSN_P (tem_insn))
14374 if (tem_insn == BB_HEAD (bb))
14375 break;
14376 continue;
14379 /* If the register is being set at TEM_INSN, see if that is all
14380 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14381 into a REG_UNUSED note instead. Don't delete sets to
14382 global register vars. */
14383 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14384 || !global_regs[REGNO (XEXP (note, 0))])
14385 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14387 rtx set = single_set (tem_insn);
14388 rtx inner_dest = 0;
14389 rtx_insn *cc0_setter = NULL;
14391 if (set != 0)
14392 for (inner_dest = SET_DEST (set);
14393 (GET_CODE (inner_dest) == STRICT_LOW_PART
14394 || GET_CODE (inner_dest) == SUBREG
14395 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14396 inner_dest = XEXP (inner_dest, 0))
14399 /* Verify that it was the set, and not a clobber that
14400 modified the register.
14402 CC0 targets must be careful to maintain setter/user
14403 pairs. If we cannot delete the setter due to side
14404 effects, mark the user with an UNUSED note instead
14405 of deleting it. */
14407 if (set != 0 && ! side_effects_p (SET_SRC (set))
14408 && rtx_equal_p (XEXP (note, 0), inner_dest)
14409 && (!HAVE_cc0
14410 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14411 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14412 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14414 /* Move the notes and links of TEM_INSN elsewhere.
14415 This might delete other dead insns recursively.
14416 First set the pattern to something that won't use
14417 any register. */
14418 rtx old_notes = REG_NOTES (tem_insn);
14420 PATTERN (tem_insn) = pc_rtx;
14421 REG_NOTES (tem_insn) = NULL;
14423 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14424 NULL_RTX, NULL_RTX, NULL_RTX);
14425 distribute_links (LOG_LINKS (tem_insn));
14427 unsigned int regno = REGNO (XEXP (note, 0));
14428 reg_stat_type *rsp = &reg_stat[regno];
14429 if (rsp->last_set == tem_insn)
14430 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14432 SET_INSN_DELETED (tem_insn);
14433 if (tem_insn == i2)
14434 i2 = NULL;
14436 /* Delete the setter too. */
14437 if (cc0_setter)
14439 PATTERN (cc0_setter) = pc_rtx;
14440 old_notes = REG_NOTES (cc0_setter);
14441 REG_NOTES (cc0_setter) = NULL;
14443 distribute_notes (old_notes, cc0_setter,
14444 cc0_setter, NULL,
14445 NULL_RTX, NULL_RTX, NULL_RTX);
14446 distribute_links (LOG_LINKS (cc0_setter));
14448 SET_INSN_DELETED (cc0_setter);
14449 if (cc0_setter == i2)
14450 i2 = NULL;
14453 else
14455 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14457 /* If there isn't already a REG_UNUSED note, put one
14458 here. Do not place a REG_DEAD note, even if
14459 the register is also used here; that would not
14460 match the algorithm used in lifetime analysis
14461 and can cause the consistency check in the
14462 scheduler to fail. */
14463 if (! find_regno_note (tem_insn, REG_UNUSED,
14464 REGNO (XEXP (note, 0))))
14465 place = tem_insn;
14466 break;
14469 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14470 || (CALL_P (tem_insn)
14471 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14473 place = tem_insn;
14475 /* If we are doing a 3->2 combination, and we have a
14476 register which formerly died in i3 and was not used
14477 by i2, which now no longer dies in i3 and is used in
14478 i2 but does not die in i2, and place is between i2
14479 and i3, then we may need to move a link from place to
14480 i2. */
14481 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14482 && from_insn
14483 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14484 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14486 struct insn_link *links = LOG_LINKS (place);
14487 LOG_LINKS (place) = NULL;
14488 distribute_links (links);
14490 break;
14493 if (tem_insn == BB_HEAD (bb))
14494 break;
14499 /* If the register is set or already dead at PLACE, we needn't do
14500 anything with this note if it is still a REG_DEAD note.
14501 We check here if it is set at all, not if is it totally replaced,
14502 which is what `dead_or_set_p' checks, so also check for it being
14503 set partially. */
14505 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14507 unsigned int regno = REGNO (XEXP (note, 0));
14508 reg_stat_type *rsp = &reg_stat[regno];
14510 if (dead_or_set_p (place, XEXP (note, 0))
14511 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14513 /* Unless the register previously died in PLACE, clear
14514 last_death. [I no longer understand why this is
14515 being done.] */
14516 if (rsp->last_death != place)
14517 rsp->last_death = 0;
14518 place = 0;
14520 else
14521 rsp->last_death = place;
14523 /* If this is a death note for a hard reg that is occupying
14524 multiple registers, ensure that we are still using all
14525 parts of the object. If we find a piece of the object
14526 that is unused, we must arrange for an appropriate REG_DEAD
14527 note to be added for it. However, we can't just emit a USE
14528 and tag the note to it, since the register might actually
14529 be dead; so we recourse, and the recursive call then finds
14530 the previous insn that used this register. */
14532 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14534 unsigned int endregno = END_REGNO (XEXP (note, 0));
14535 bool all_used = true;
14536 unsigned int i;
14538 for (i = regno; i < endregno; i++)
14539 if ((! refers_to_regno_p (i, PATTERN (place))
14540 && ! find_regno_fusage (place, USE, i))
14541 || dead_or_set_regno_p (place, i))
14543 all_used = false;
14544 break;
14547 if (! all_used)
14549 /* Put only REG_DEAD notes for pieces that are
14550 not already dead or set. */
14552 for (i = regno; i < endregno;
14553 i += hard_regno_nregs (i, reg_raw_mode[i]))
14555 rtx piece = regno_reg_rtx[i];
14556 basic_block bb = this_basic_block;
14558 if (! dead_or_set_p (place, piece)
14559 && ! reg_bitfield_target_p (piece,
14560 PATTERN (place)))
14562 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14563 NULL_RTX);
14565 distribute_notes (new_note, place, place,
14566 NULL, NULL_RTX, NULL_RTX,
14567 NULL_RTX);
14569 else if (! refers_to_regno_p (i, PATTERN (place))
14570 && ! find_regno_fusage (place, USE, i))
14571 for (tem_insn = PREV_INSN (place); ;
14572 tem_insn = PREV_INSN (tem_insn))
14574 if (!NONDEBUG_INSN_P (tem_insn))
14576 if (tem_insn == BB_HEAD (bb))
14577 break;
14578 continue;
14580 if (dead_or_set_p (tem_insn, piece)
14581 || reg_bitfield_target_p (piece,
14582 PATTERN (tem_insn)))
14584 add_reg_note (tem_insn, REG_UNUSED, piece);
14585 break;
14590 place = 0;
14594 break;
14596 default:
14597 /* Any other notes should not be present at this point in the
14598 compilation. */
14599 gcc_unreachable ();
14602 if (place)
14604 XEXP (note, 1) = REG_NOTES (place);
14605 REG_NOTES (place) = note;
14608 if (place2)
14609 add_shallow_copy_of_reg_note (place2, note);
14613 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14614 I3, I2, and I1 to new locations. This is also called to add a link
14615 pointing at I3 when I3's destination is changed. */
14617 static void
14618 distribute_links (struct insn_link *links)
14620 struct insn_link *link, *next_link;
14622 for (link = links; link; link = next_link)
14624 rtx_insn *place = 0;
14625 rtx_insn *insn;
14626 rtx set, reg;
14628 next_link = link->next;
14630 /* If the insn that this link points to is a NOTE, ignore it. */
14631 if (NOTE_P (link->insn))
14632 continue;
14634 set = 0;
14635 rtx pat = PATTERN (link->insn);
14636 if (GET_CODE (pat) == SET)
14637 set = pat;
14638 else if (GET_CODE (pat) == PARALLEL)
14640 int i;
14641 for (i = 0; i < XVECLEN (pat, 0); i++)
14643 set = XVECEXP (pat, 0, i);
14644 if (GET_CODE (set) != SET)
14645 continue;
14647 reg = SET_DEST (set);
14648 while (GET_CODE (reg) == ZERO_EXTRACT
14649 || GET_CODE (reg) == STRICT_LOW_PART
14650 || GET_CODE (reg) == SUBREG)
14651 reg = XEXP (reg, 0);
14653 if (!REG_P (reg))
14654 continue;
14656 if (REGNO (reg) == link->regno)
14657 break;
14659 if (i == XVECLEN (pat, 0))
14660 continue;
14662 else
14663 continue;
14665 reg = SET_DEST (set);
14667 while (GET_CODE (reg) == ZERO_EXTRACT
14668 || GET_CODE (reg) == STRICT_LOW_PART
14669 || GET_CODE (reg) == SUBREG)
14670 reg = XEXP (reg, 0);
14672 /* A LOG_LINK is defined as being placed on the first insn that uses
14673 a register and points to the insn that sets the register. Start
14674 searching at the next insn after the target of the link and stop
14675 when we reach a set of the register or the end of the basic block.
14677 Note that this correctly handles the link that used to point from
14678 I3 to I2. Also note that not much searching is typically done here
14679 since most links don't point very far away. */
14681 for (insn = NEXT_INSN (link->insn);
14682 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14683 || BB_HEAD (this_basic_block->next_bb) != insn));
14684 insn = NEXT_INSN (insn))
14685 if (DEBUG_INSN_P (insn))
14686 continue;
14687 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14689 if (reg_referenced_p (reg, PATTERN (insn)))
14690 place = insn;
14691 break;
14693 else if (CALL_P (insn)
14694 && find_reg_fusage (insn, USE, reg))
14696 place = insn;
14697 break;
14699 else if (INSN_P (insn) && reg_set_p (reg, insn))
14700 break;
14702 /* If we found a place to put the link, place it there unless there
14703 is already a link to the same insn as LINK at that point. */
14705 if (place)
14707 struct insn_link *link2;
14709 FOR_EACH_LOG_LINK (link2, place)
14710 if (link2->insn == link->insn && link2->regno == link->regno)
14711 break;
14713 if (link2 == NULL)
14715 link->next = LOG_LINKS (place);
14716 LOG_LINKS (place) = link;
14718 /* Set added_links_insn to the earliest insn we added a
14719 link to. */
14720 if (added_links_insn == 0
14721 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14722 added_links_insn = place;
14728 /* Check for any register or memory mentioned in EQUIV that is not
14729 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14730 of EXPR where some registers may have been replaced by constants. */
14732 static bool
14733 unmentioned_reg_p (rtx equiv, rtx expr)
14735 subrtx_iterator::array_type array;
14736 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14738 const_rtx x = *iter;
14739 if ((REG_P (x) || MEM_P (x))
14740 && !reg_mentioned_p (x, expr))
14741 return true;
14743 return false;
14746 DEBUG_FUNCTION void
14747 dump_combine_stats (FILE *file)
14749 fprintf
14750 (file,
14751 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14752 combine_attempts, combine_merges, combine_extras, combine_successes);
14755 void
14756 dump_combine_total_stats (FILE *file)
14758 fprintf
14759 (file,
14760 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14761 total_attempts, total_merges, total_extras, total_successes);
14764 /* Try combining insns through substitution. */
14765 static unsigned int
14766 rest_of_handle_combine (void)
14768 int rebuild_jump_labels_after_combine;
14770 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14771 df_note_add_problem ();
14772 df_analyze ();
14774 regstat_init_n_sets_and_refs ();
14775 reg_n_sets_max = max_reg_num ();
14777 rebuild_jump_labels_after_combine
14778 = combine_instructions (get_insns (), max_reg_num ());
14780 /* Combining insns may have turned an indirect jump into a
14781 direct jump. Rebuild the JUMP_LABEL fields of jumping
14782 instructions. */
14783 if (rebuild_jump_labels_after_combine)
14785 if (dom_info_available_p (CDI_DOMINATORS))
14786 free_dominance_info (CDI_DOMINATORS);
14787 timevar_push (TV_JUMP);
14788 rebuild_jump_labels (get_insns ());
14789 cleanup_cfg (0);
14790 timevar_pop (TV_JUMP);
14793 regstat_free_n_sets_and_refs ();
14794 return 0;
14797 namespace {
14799 const pass_data pass_data_combine =
14801 RTL_PASS, /* type */
14802 "combine", /* name */
14803 OPTGROUP_NONE, /* optinfo_flags */
14804 TV_COMBINE, /* tv_id */
14805 PROP_cfglayout, /* properties_required */
14806 0, /* properties_provided */
14807 0, /* properties_destroyed */
14808 0, /* todo_flags_start */
14809 TODO_df_finish, /* todo_flags_finish */
14812 class pass_combine : public rtl_opt_pass
14814 public:
14815 pass_combine (gcc::context *ctxt)
14816 : rtl_opt_pass (pass_data_combine, ctxt)
14819 /* opt_pass methods: */
14820 virtual bool gate (function *) { return (optimize > 0); }
14821 virtual unsigned int execute (function *)
14823 return rest_of_handle_combine ();
14826 }; // class pass_combine
14828 } // anon namespace
14830 rtl_opt_pass *
14831 make_pass_combine (gcc::context *ctxt)
14833 return new pass_combine (ctxt);