[gcc/testsuite]
[official-gcc.git] / gcc / combine.c
blob84ce873610734607f8f45aa32a91a6ebb67d7a43
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras;
120 /* Number of instructions combined in this function. */
122 static int combine_successes;
124 /* Totals over entire compilation. */
126 static int total_attempts, total_merges, total_extras, total_successes;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn *i2mod;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs;
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
197 rtx last_set_value;
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick;
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
207 int last_set_label;
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies;
238 unsigned HOST_WIDE_INT nonzero_bits;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
255 static vec<reg_stat_type> reg_stat;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn *subst_insn;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
303 static rtx_insn *added_links_insn;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
328 static struct insn_link **uid_log_links;
330 static inline int
331 insn_uid_check (const_rtx insn)
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
335 return uid;
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
344 /* Links for LOG_LINKS are allocated from this obstack. */
346 static struct obstack insn_link_obstack;
348 /* Allocate a link. */
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
353 struct insn_link *l
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
356 l->insn = insn;
357 l->regno = regno;
358 l->next = next;
359 return l;
362 /* Incremented for each basic block. */
364 static int label_tick;
366 /* Reset to label_tick for each extended basic block in scanning order. */
368 static int label_tick_ebb_start;
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
373 static scalar_int_mode nonzero_bits_mode;
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
379 in a loop. */
381 static int nonzero_sign_valid;
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
389 struct undo
391 struct undo *next;
392 enum undo_kind kind;
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
403 struct undobuf
405 struct undo *undos;
406 struct undo *frees;
407 rtx_insn *other_insn;
410 static struct undobuf undobuf;
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
415 static int n_occurrences;
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
418 scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
421 scalar_int_mode,
422 unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
532 register. */
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
537 rtx_insn *ret;
538 unsigned int nregs;
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
544 return ret;
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
556 rtx x = *loc;
557 enum rtx_code code = GET_CODE (x);
558 rtx *result = NULL;
559 rtx *this_result;
560 int i;
561 const char *fmt;
563 switch (code)
565 case CONST:
566 case LABEL_REF:
567 case SYMBOL_REF:
568 CASE_CONST_ANY:
569 case CLOBBER:
570 return 0;
572 case SET:
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
586 break;
588 return find_single_use_1 (dest, &SET_SRC (x));
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
594 default:
595 break;
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
604 if (fmt[i] == 'e')
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
619 else if (fmt[i] == 'E')
621 int j;
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
641 return result;
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
659 static rtx *
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
667 if (dest == cc0_rtx)
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
680 if (!REG_P (dest))
681 return 0;
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
693 if (link)
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
702 return 0;
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
711 static void
712 do_SUBST (rtx *into, rtx newval)
714 struct undo *buf;
715 rtx oldval = *into;
717 if (oldval == newval)
718 return;
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
755 buf->next = undobuf.undos, undobuf.undos = buf;
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
764 static void
765 do_SUBST_INT (int *into, int newval)
767 struct undo *buf;
768 int oldval = *into;
770 if (oldval == newval)
771 return;
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
783 buf->next = undobuf.undos, undobuf.undos = buf;
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
793 static void
794 do_SUBST_MODE (rtx *into, machine_mode newval)
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
799 if (oldval == newval)
800 return;
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
812 buf->next = undobuf.undos, undobuf.undos = buf;
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
819 static void
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
822 struct undo *buf;
823 struct insn_link * oldval = *into;
825 if (oldval == newval)
826 return;
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
838 buf->next = undobuf.undos, undobuf.undos = buf;
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
851 static bool
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
863 if (i1)
865 i1_cost = INSN_COST (i1);
866 if (i0)
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
872 else
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
879 else
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
893 if (newi2pat)
895 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
896 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
897 ? new_i2_cost + new_i3_cost : 0;
899 else
901 new_cost = new_i3_cost;
902 new_i2_cost = 0;
905 if (undobuf.other_insn)
907 int old_other_cost, new_other_cost;
909 old_other_cost = INSN_COST (undobuf.other_insn);
910 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
911 if (old_other_cost > 0 && new_other_cost > 0)
913 old_cost += old_other_cost;
914 new_cost += new_other_cost;
916 else
917 old_cost = 0;
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject = old_cost > 0 && new_cost > old_cost;
924 if (dump_file)
926 fprintf (dump_file, "%s combination of insns ",
927 reject ? "rejecting" : "allowing");
928 if (i0)
929 fprintf (dump_file, "%d, ", INSN_UID (i0));
930 if (i1 && INSN_UID (i1) != INSN_UID (i2))
931 fprintf (dump_file, "%d, ", INSN_UID (i1));
932 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
934 fprintf (dump_file, "original costs ");
935 if (i0)
936 fprintf (dump_file, "%d + ", i0_cost);
937 if (i1 && INSN_UID (i1) != INSN_UID (i2))
938 fprintf (dump_file, "%d + ", i1_cost);
939 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
941 if (newi2pat)
942 fprintf (dump_file, "replacement costs %d + %d = %d\n",
943 new_i2_cost, new_i3_cost, new_cost);
944 else
945 fprintf (dump_file, "replacement cost %d\n", new_cost);
948 if (reject)
949 return false;
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2) = new_i2_cost;
953 INSN_COST (i3) = new_i3_cost;
954 if (i1)
956 INSN_COST (i1) = 0;
957 if (i0)
958 INSN_COST (i0) = 0;
961 return true;
965 /* Delete any insns that copy a register to itself. */
967 static void
968 delete_noop_moves (void)
970 rtx_insn *insn, *next;
971 basic_block bb;
973 FOR_EACH_BB_FN (bb, cfun)
975 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
977 next = NEXT_INSN (insn);
978 if (INSN_P (insn) && noop_move_p (insn))
980 if (dump_file)
981 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
983 delete_insn_and_edges (insn);
990 /* Return false if we do not want to (or cannot) combine DEF. */
991 static bool
992 can_combine_def_p (df_ref def)
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
996 return false;
998 unsigned int regno = DF_REF_REGNO (def);
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno == FRAME_POINTER_REGNUM
1002 && (!reload_completed || frame_pointer_needed))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno == HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed || frame_pointer_needed))
1006 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1007 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1008 return false;
1010 return true;
1013 /* Return false if we do not want to (or cannot) combine USE. */
1014 static bool
1015 can_combine_use_p (df_ref use)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1019 return false;
1021 return true;
1024 /* Fill in log links field for all insns. */
1026 static void
1027 create_log_links (void)
1029 basic_block bb;
1030 rtx_insn **next_use;
1031 rtx_insn *insn;
1032 df_ref def, use;
1034 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb, cfun)
1047 FOR_BB_INSNS_REVERSE (bb, insn)
1049 if (!NONDEBUG_INSN_P (insn))
1050 continue;
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn));
1055 FOR_EACH_INSN_DEF (def, insn)
1057 unsigned int regno = DF_REF_REGNO (def);
1058 rtx_insn *use_insn;
1060 if (!next_use[regno])
1061 continue;
1063 if (!can_combine_def_p (def))
1064 continue;
1066 use_insn = next_use[regno];
1067 next_use[regno] = NULL;
1069 if (BLOCK_FOR_INSN (use_insn) != bb)
1070 continue;
1072 /* flow.c claimed:
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno < FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn)) >= 0)
1081 continue;
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link *links;
1085 FOR_EACH_LOG_LINK (links, use_insn)
1086 if (insn == links->insn && regno == links->regno)
1087 break;
1089 if (!links)
1090 LOG_LINKS (use_insn)
1091 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1094 FOR_EACH_INSN_USE (use, insn)
1095 if (can_combine_use_p (use))
1096 next_use[DF_REF_REGNO (use)] = insn;
1100 free (next_use);
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1108 pair. */
1110 static bool
1111 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1113 struct insn_link *links;
1114 FOR_EACH_LOG_LINK (links, b)
1115 if (links->insn == a)
1116 return true;
1117 if (HAVE_cc0 && sets_cc0_p (a))
1118 return true;
1119 return false;
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1127 static int
1128 combine_instructions (rtx_insn *f, unsigned int nregs)
1130 rtx_insn *insn, *next;
1131 rtx_insn *prev;
1132 struct insn_link *links, *nextlinks;
1133 rtx_insn *first;
1134 basic_block last_bb;
1136 int new_direct_jump_p = 0;
1138 for (first = f; first && !NONDEBUG_INSN_P (first); )
1139 first = NEXT_INSN (first);
1140 if (!first)
1141 return 0;
1143 combine_attempts = 0;
1144 combine_merges = 0;
1145 combine_extras = 0;
1146 combine_successes = 0;
1148 rtl_hooks = combine_rtl_hooks;
1150 reg_stat.safe_grow_cleared (nregs);
1152 init_recog_no_volatile ();
1154 /* Allocate array for insn info. */
1155 max_uid_known = get_max_uid ();
1156 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1157 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1158 gcc_obstack_init (&insn_link_obstack);
1160 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1165 nonzero_sign_valid = 0;
1166 label_tick = label_tick_ebb_start = 1;
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1175 setup_incoming_promotions (first);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block, cfun)
1183 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1184 last_call_luid = 0;
1185 mem_last_set = -1;
1187 label_tick++;
1188 if (!single_pred_p (this_basic_block)
1189 || single_pred (this_basic_block) != last_bb)
1190 label_tick_ebb_start = label_tick;
1191 last_bb = this_basic_block;
1193 FOR_BB_INSNS (this_basic_block, insn)
1194 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1196 rtx links;
1198 subst_low_luid = DF_INSN_LUID (insn);
1199 subst_insn = insn;
1201 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1202 insn);
1203 record_dead_and_set_regs (insn);
1205 if (AUTO_INC_DEC)
1206 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1207 if (REG_NOTE_KIND (links) == REG_INC)
1208 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1209 insn);
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn))
1213 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1214 optimize_this_for_speed_p);
1215 if (dump_file)
1217 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1218 dump_insn_slim (dump_file, insn);
1223 nonzero_sign_valid = 1;
1225 /* Now scan all the insns in forward order. */
1226 label_tick = label_tick_ebb_start = 1;
1227 init_reg_last ();
1228 setup_incoming_promotions (first);
1229 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1230 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1232 FOR_EACH_BB_FN (this_basic_block, cfun)
1234 rtx_insn *last_combined_insn = NULL;
1235 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1236 last_call_luid = 0;
1237 mem_last_set = -1;
1239 label_tick++;
1240 if (!single_pred_p (this_basic_block)
1241 || single_pred (this_basic_block) != last_bb)
1242 label_tick_ebb_start = label_tick;
1243 last_bb = this_basic_block;
1245 rtl_profile_for_bb (this_basic_block);
1246 for (insn = BB_HEAD (this_basic_block);
1247 insn != NEXT_INSN (BB_END (this_basic_block));
1248 insn = next ? next : NEXT_INSN (insn))
1250 next = 0;
1251 if (!NONDEBUG_INSN_P (insn))
1252 continue;
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn)
1256 || last_combined_insn->deleted ()))
1257 last_combined_insn = PREV_INSN (last_combined_insn);
1258 if (last_combined_insn == NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1261 last_combined_insn = insn;
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn, PATTERN (insn));
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1269 into SUBREGs. */
1270 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1272 /* Try this insn with each insn it links back to. */
1274 FOR_EACH_LOG_LINK (links, insn)
1275 if ((next = try_combine (insn, links->insn, NULL,
1276 NULL, &new_direct_jump_p,
1277 last_combined_insn)) != 0)
1279 statistics_counter_event (cfun, "two-insn combine", 1);
1280 goto retry;
1283 /* Try each sequence of three linked insns ending with this one. */
1285 if (max_combine >= 3)
1286 FOR_EACH_LOG_LINK (links, insn)
1288 rtx_insn *link = links->insn;
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1292 if (NOTE_P (link))
1293 continue;
1295 FOR_EACH_LOG_LINK (nextlinks, link)
1296 if ((next = try_combine (insn, link, nextlinks->insn,
1297 NULL, &new_direct_jump_p,
1298 last_combined_insn)) != 0)
1300 statistics_counter_event (cfun, "three-insn combine", 1);
1301 goto retry;
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1312 if (HAVE_cc0
1313 && JUMP_P (insn)
1314 && (prev = prev_nonnote_insn (insn)) != 0
1315 && NONJUMP_INSN_P (prev)
1316 && sets_cc0_p (PATTERN (prev)))
1318 if ((next = try_combine (insn, prev, NULL, NULL,
1319 &new_direct_jump_p,
1320 last_combined_insn)) != 0)
1321 goto retry;
1323 FOR_EACH_LOG_LINK (nextlinks, prev)
1324 if ((next = try_combine (insn, prev, nextlinks->insn,
1325 NULL, &new_direct_jump_p,
1326 last_combined_insn)) != 0)
1327 goto retry;
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1332 && (prev = prev_nonnote_insn (insn)) != 0
1333 && NONJUMP_INSN_P (prev)
1334 && sets_cc0_p (PATTERN (prev))
1335 && GET_CODE (PATTERN (insn)) == SET
1336 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1338 if ((next = try_combine (insn, prev, NULL, NULL,
1339 &new_direct_jump_p,
1340 last_combined_insn)) != 0)
1341 goto retry;
1343 FOR_EACH_LOG_LINK (nextlinks, prev)
1344 if ((next = try_combine (insn, prev, nextlinks->insn,
1345 NULL, &new_direct_jump_p,
1346 last_combined_insn)) != 0)
1347 goto retry;
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1353 if (HAVE_cc0)
1355 FOR_EACH_LOG_LINK (links, insn)
1356 if (NONJUMP_INSN_P (links->insn)
1357 && GET_CODE (PATTERN (links->insn)) == SET
1358 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1359 && (prev = prev_nonnote_insn (links->insn)) != 0
1360 && NONJUMP_INSN_P (prev)
1361 && sets_cc0_p (PATTERN (prev))
1362 && (next = try_combine (insn, links->insn,
1363 prev, NULL, &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1368 /* Try combining an insn with two different insns whose results it
1369 uses. */
1370 if (max_combine >= 3)
1371 FOR_EACH_LOG_LINK (links, insn)
1372 for (nextlinks = links->next; nextlinks;
1373 nextlinks = nextlinks->next)
1374 if ((next = try_combine (insn, links->insn,
1375 nextlinks->insn, NULL,
1376 &new_direct_jump_p,
1377 last_combined_insn)) != 0)
1380 statistics_counter_event (cfun, "three-insn combine", 1);
1381 goto retry;
1384 /* Try four-instruction combinations. */
1385 if (max_combine >= 4)
1386 FOR_EACH_LOG_LINK (links, insn)
1388 struct insn_link *next1;
1389 rtx_insn *link = links->insn;
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1393 if (NOTE_P (link))
1394 continue;
1396 FOR_EACH_LOG_LINK (next1, link)
1398 rtx_insn *link1 = next1->insn;
1399 if (NOTE_P (link1))
1400 continue;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks, link1)
1403 if ((next = try_combine (insn, link, link1,
1404 nextlinks->insn,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1408 statistics_counter_event (cfun, "four-insn combine", 1);
1409 goto retry;
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks = next1->next; nextlinks;
1413 nextlinks = nextlinks->next)
1414 if ((next = try_combine (insn, link, link1,
1415 nextlinks->insn,
1416 &new_direct_jump_p,
1417 last_combined_insn)) != 0)
1419 statistics_counter_event (cfun, "four-insn combine", 1);
1420 goto retry;
1424 for (next1 = links->next; next1; next1 = next1->next)
1426 rtx_insn *link1 = next1->insn;
1427 if (NOTE_P (link1))
1428 continue;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks, link1)
1441 if ((next = try_combine (insn, link, link1,
1442 nextlinks->insn,
1443 &new_direct_jump_p,
1444 last_combined_insn)) != 0)
1446 statistics_counter_event (cfun, "four-insn combine", 1);
1447 goto retry;
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links, insn)
1455 rtx set, note;
1456 rtx_insn *temp = links->insn;
1457 if ((set = single_set (temp)) != 0
1458 && (note = find_reg_equal_equiv_note (temp)) != 0
1459 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note, SET_SRC (set))
1463 && (GET_MODE (note) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1465 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1466 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set), 0))
1468 == GET_MODE (note))))))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src = SET_SRC (set);
1474 rtx orig_dest = SET_DEST (set);
1475 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1476 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1477 SET_SRC (set) = note;
1478 i2mod = temp;
1479 i2mod_old_rhs = copy_rtx (orig_src);
1480 i2mod_new_rhs = copy_rtx (note);
1481 next = try_combine (insn, i2mod, NULL, NULL,
1482 &new_direct_jump_p,
1483 last_combined_insn);
1484 i2mod = NULL;
1485 if (next)
1487 statistics_counter_event (cfun, "insn-with-note combine", 1);
1488 goto retry;
1490 SET_SRC (set) = orig_src;
1491 SET_DEST (set) = orig_dest;
1495 if (!NOTE_P (insn))
1496 record_dead_and_set_regs (insn);
1498 retry:
1503 default_rtl_profile ();
1504 clear_bb_flags ();
1505 new_direct_jump_p |= purge_all_dead_edges ();
1506 delete_noop_moves ();
1508 /* Clean up. */
1509 obstack_free (&insn_link_obstack, NULL);
1510 free (uid_log_links);
1511 free (uid_insn_cost);
1512 reg_stat.release ();
1515 struct undo *undo, *next;
1516 for (undo = undobuf.frees; undo; undo = next)
1518 next = undo->next;
1519 free (undo);
1521 undobuf.frees = 0;
1524 total_attempts += combine_attempts;
1525 total_merges += combine_merges;
1526 total_extras += combine_extras;
1527 total_successes += combine_successes;
1529 nonzero_sign_valid = 0;
1530 rtl_hooks = general_rtl_hooks;
1532 /* Make recognizer allow volatile MEMs again. */
1533 init_recog ();
1535 return new_direct_jump_p;
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1540 static void
1541 init_reg_last (void)
1543 unsigned int i;
1544 reg_stat_type *p;
1546 FOR_EACH_VEC_ELT (reg_stat, i, p)
1547 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1550 /* Set up any promoted values for incoming argument registers. */
1552 static void
1553 setup_incoming_promotions (rtx_insn *first)
1555 tree arg;
1556 bool strictly_local = false;
1558 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1559 arg = DECL_CHAIN (arg))
1561 rtx x, reg = DECL_INCOMING_RTL (arg);
1562 int uns1, uns3;
1563 machine_mode mode1, mode2, mode3, mode4;
1565 /* Only continue if the incoming argument is in a register. */
1566 if (!REG_P (reg))
1567 continue;
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1 = TYPE_MODE (TREE_TYPE (arg));
1578 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1583 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1588 TREE_TYPE (cfun->decl), 0);
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4 = GET_MODE (reg);
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1595 if (mode1 == mode3)
1596 continue;
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1599 if (mode3 != mode4)
1600 continue;
1601 /* (c) There's no language level extension; */
1602 if (mode1 == mode2)
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local)
1609 continue;
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1614 else if (uns1)
1615 uns3 = true;
1616 else if (uns3)
1617 continue;
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1623 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1624 record_value_for_reg (reg, first, x);
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1634 kludge. */
1636 static rtx
1637 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1639 scalar_int_mode int_mode;
1640 if (CONST_INT_P (src)
1641 && is_a <scalar_int_mode> (mode, &int_mode)
1642 && GET_MODE_PRECISION (int_mode) < prec
1643 && INTVAL (src) > 0
1644 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1645 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1647 return src;
1650 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1651 and SET. */
1653 static void
1654 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1655 rtx x)
1657 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1658 unsigned HOST_WIDE_INT bits = 0;
1659 rtx reg_equal = NULL, src = SET_SRC (set);
1660 unsigned int num = 0;
1662 if (reg_equal_note)
1663 reg_equal = XEXP (reg_equal_note, 0);
1665 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1667 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1668 if (reg_equal)
1669 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1672 /* Don't call nonzero_bits if it cannot change anything. */
1673 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1675 bits = nonzero_bits (src, nonzero_bits_mode);
1676 if (reg_equal && bits)
1677 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1678 rsp->nonzero_bits |= bits;
1681 /* Don't call num_sign_bit_copies if it cannot change anything. */
1682 if (rsp->sign_bit_copies != 1)
1684 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1685 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1687 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1688 if (num == 0 || numeq > num)
1689 num = numeq;
1691 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1692 rsp->sign_bit_copies = num;
1696 /* Called via note_stores. If X is a pseudo that is narrower than
1697 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1699 If we are setting only a portion of X and we can't figure out what
1700 portion, assume all bits will be used since we don't know what will
1701 be happening.
1703 Similarly, set how many bits of X are known to be copies of the sign bit
1704 at all locations in the function. This is the smallest number implied
1705 by any set of X. */
1707 static void
1708 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1710 rtx_insn *insn = (rtx_insn *) data;
1711 scalar_int_mode mode;
1713 if (REG_P (x)
1714 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1715 /* If this register is undefined at the start of the file, we can't
1716 say what its contents were. */
1717 && ! REGNO_REG_SET_P
1718 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1719 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1720 && HWI_COMPUTABLE_MODE_P (mode))
1722 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1724 if (set == 0 || GET_CODE (set) == CLOBBER)
1726 rsp->nonzero_bits = GET_MODE_MASK (mode);
1727 rsp->sign_bit_copies = 1;
1728 return;
1731 /* If this register is being initialized using itself, and the
1732 register is uninitialized in this basic block, and there are
1733 no LOG_LINKS which set the register, then part of the
1734 register is uninitialized. In that case we can't assume
1735 anything about the number of nonzero bits.
1737 ??? We could do better if we checked this in
1738 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1739 could avoid making assumptions about the insn which initially
1740 sets the register, while still using the information in other
1741 insns. We would have to be careful to check every insn
1742 involved in the combination. */
1744 if (insn
1745 && reg_referenced_p (x, PATTERN (insn))
1746 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1747 REGNO (x)))
1749 struct insn_link *link;
1751 FOR_EACH_LOG_LINK (link, insn)
1752 if (dead_or_set_p (link->insn, x))
1753 break;
1754 if (!link)
1756 rsp->nonzero_bits = GET_MODE_MASK (mode);
1757 rsp->sign_bit_copies = 1;
1758 return;
1762 /* If this is a complex assignment, see if we can convert it into a
1763 simple assignment. */
1764 set = expand_field_assignment (set);
1766 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1767 set what we know about X. */
1769 if (SET_DEST (set) == x
1770 || (paradoxical_subreg_p (SET_DEST (set))
1771 && SUBREG_REG (SET_DEST (set)) == x))
1772 update_rsp_from_reg_equal (rsp, insn, set, x);
1773 else
1775 rsp->nonzero_bits = GET_MODE_MASK (mode);
1776 rsp->sign_bit_copies = 1;
1781 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1782 optionally insns that were previously combined into I3 or that will be
1783 combined into the merger of INSN and I3. The order is PRED, PRED2,
1784 INSN, SUCC, SUCC2, I3.
1786 Return 0 if the combination is not allowed for any reason.
1788 If the combination is allowed, *PDEST will be set to the single
1789 destination of INSN and *PSRC to the single source, and this function
1790 will return 1. */
1792 static int
1793 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1794 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1795 rtx *pdest, rtx *psrc)
1797 int i;
1798 const_rtx set = 0;
1799 rtx src, dest;
1800 rtx_insn *p;
1801 rtx link;
1802 bool all_adjacent = true;
1803 int (*is_volatile_p) (const_rtx);
1805 if (succ)
1807 if (succ2)
1809 if (next_active_insn (succ2) != i3)
1810 all_adjacent = false;
1811 if (next_active_insn (succ) != succ2)
1812 all_adjacent = false;
1814 else if (next_active_insn (succ) != i3)
1815 all_adjacent = false;
1816 if (next_active_insn (insn) != succ)
1817 all_adjacent = false;
1819 else if (next_active_insn (insn) != i3)
1820 all_adjacent = false;
1822 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1823 or a PARALLEL consisting of such a SET and CLOBBERs.
1825 If INSN has CLOBBER parallel parts, ignore them for our processing.
1826 By definition, these happen during the execution of the insn. When it
1827 is merged with another insn, all bets are off. If they are, in fact,
1828 needed and aren't also supplied in I3, they may be added by
1829 recog_for_combine. Otherwise, it won't match.
1831 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1832 note.
1834 Get the source and destination of INSN. If more than one, can't
1835 combine. */
1837 if (GET_CODE (PATTERN (insn)) == SET)
1838 set = PATTERN (insn);
1839 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1840 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1842 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1844 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1846 switch (GET_CODE (elt))
1848 /* This is important to combine floating point insns
1849 for the SH4 port. */
1850 case USE:
1851 /* Combining an isolated USE doesn't make sense.
1852 We depend here on combinable_i3pat to reject them. */
1853 /* The code below this loop only verifies that the inputs of
1854 the SET in INSN do not change. We call reg_set_between_p
1855 to verify that the REG in the USE does not change between
1856 I3 and INSN.
1857 If the USE in INSN was for a pseudo register, the matching
1858 insn pattern will likely match any register; combining this
1859 with any other USE would only be safe if we knew that the
1860 used registers have identical values, or if there was
1861 something to tell them apart, e.g. different modes. For
1862 now, we forgo such complicated tests and simply disallow
1863 combining of USES of pseudo registers with any other USE. */
1864 if (REG_P (XEXP (elt, 0))
1865 && GET_CODE (PATTERN (i3)) == PARALLEL)
1867 rtx i3pat = PATTERN (i3);
1868 int i = XVECLEN (i3pat, 0) - 1;
1869 unsigned int regno = REGNO (XEXP (elt, 0));
1873 rtx i3elt = XVECEXP (i3pat, 0, i);
1875 if (GET_CODE (i3elt) == USE
1876 && REG_P (XEXP (i3elt, 0))
1877 && (REGNO (XEXP (i3elt, 0)) == regno
1878 ? reg_set_between_p (XEXP (elt, 0),
1879 PREV_INSN (insn), i3)
1880 : regno >= FIRST_PSEUDO_REGISTER))
1881 return 0;
1883 while (--i >= 0);
1885 break;
1887 /* We can ignore CLOBBERs. */
1888 case CLOBBER:
1889 break;
1891 case SET:
1892 /* Ignore SETs whose result isn't used but not those that
1893 have side-effects. */
1894 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1895 && insn_nothrow_p (insn)
1896 && !side_effects_p (elt))
1897 break;
1899 /* If we have already found a SET, this is a second one and
1900 so we cannot combine with this insn. */
1901 if (set)
1902 return 0;
1904 set = elt;
1905 break;
1907 default:
1908 /* Anything else means we can't combine. */
1909 return 0;
1913 if (set == 0
1914 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1915 so don't do anything with it. */
1916 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1917 return 0;
1919 else
1920 return 0;
1922 if (set == 0)
1923 return 0;
1925 /* The simplification in expand_field_assignment may call back to
1926 get_last_value, so set safe guard here. */
1927 subst_low_luid = DF_INSN_LUID (insn);
1929 set = expand_field_assignment (set);
1930 src = SET_SRC (set), dest = SET_DEST (set);
1932 /* Do not eliminate user-specified register if it is in an
1933 asm input because we may break the register asm usage defined
1934 in GCC manual if allow to do so.
1935 Be aware that this may cover more cases than we expect but this
1936 should be harmless. */
1937 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1938 && extract_asm_operands (PATTERN (i3)))
1939 return 0;
1941 /* Don't eliminate a store in the stack pointer. */
1942 if (dest == stack_pointer_rtx
1943 /* Don't combine with an insn that sets a register to itself if it has
1944 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1945 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1946 /* Can't merge an ASM_OPERANDS. */
1947 || GET_CODE (src) == ASM_OPERANDS
1948 /* Can't merge a function call. */
1949 || GET_CODE (src) == CALL
1950 /* Don't eliminate a function call argument. */
1951 || (CALL_P (i3)
1952 && (find_reg_fusage (i3, USE, dest)
1953 || (REG_P (dest)
1954 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1955 && global_regs[REGNO (dest)])))
1956 /* Don't substitute into an incremented register. */
1957 || FIND_REG_INC_NOTE (i3, dest)
1958 || (succ && FIND_REG_INC_NOTE (succ, dest))
1959 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1960 /* Don't substitute into a non-local goto, this confuses CFG. */
1961 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1962 /* Make sure that DEST is not used after INSN but before SUCC, or
1963 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1964 || (!all_adjacent
1965 && ((succ2
1966 && (reg_used_between_p (dest, succ2, i3)
1967 || reg_used_between_p (dest, succ, succ2)))
1968 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1969 || (succ
1970 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1971 that case SUCC is not in the insn stream, so use SUCC2
1972 instead for this test. */
1973 && reg_used_between_p (dest, insn,
1974 succ2
1975 && INSN_UID (succ) == INSN_UID (succ2)
1976 ? succ2 : succ))))
1977 /* Make sure that the value that is to be substituted for the register
1978 does not use any registers whose values alter in between. However,
1979 If the insns are adjacent, a use can't cross a set even though we
1980 think it might (this can happen for a sequence of insns each setting
1981 the same destination; last_set of that register might point to
1982 a NOTE). If INSN has a REG_EQUIV note, the register is always
1983 equivalent to the memory so the substitution is valid even if there
1984 are intervening stores. Also, don't move a volatile asm or
1985 UNSPEC_VOLATILE across any other insns. */
1986 || (! all_adjacent
1987 && (((!MEM_P (src)
1988 || ! find_reg_note (insn, REG_EQUIV, src))
1989 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1990 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1991 || GET_CODE (src) == UNSPEC_VOLATILE))
1992 /* Don't combine across a CALL_INSN, because that would possibly
1993 change whether the life span of some REGs crosses calls or not,
1994 and it is a pain to update that information.
1995 Exception: if source is a constant, moving it later can't hurt.
1996 Accept that as a special case. */
1997 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1998 return 0;
2000 /* DEST must either be a REG or CC0. */
2001 if (REG_P (dest))
2003 /* If register alignment is being enforced for multi-word items in all
2004 cases except for parameters, it is possible to have a register copy
2005 insn referencing a hard register that is not allowed to contain the
2006 mode being copied and which would not be valid as an operand of most
2007 insns. Eliminate this problem by not combining with such an insn.
2009 Also, on some machines we don't want to extend the life of a hard
2010 register. */
2012 if (REG_P (src)
2013 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2014 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2015 /* Don't extend the life of a hard register unless it is
2016 user variable (if we have few registers) or it can't
2017 fit into the desired register (meaning something special
2018 is going on).
2019 Also avoid substituting a return register into I3, because
2020 reload can't handle a conflict with constraints of other
2021 inputs. */
2022 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2023 && !targetm.hard_regno_mode_ok (REGNO (src),
2024 GET_MODE (src)))))
2025 return 0;
2027 else if (GET_CODE (dest) != CC0)
2028 return 0;
2031 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2032 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2033 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2035 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2037 /* If the clobber represents an earlyclobber operand, we must not
2038 substitute an expression containing the clobbered register.
2039 As we do not analyze the constraint strings here, we have to
2040 make the conservative assumption. However, if the register is
2041 a fixed hard reg, the clobber cannot represent any operand;
2042 we leave it up to the machine description to either accept or
2043 reject use-and-clobber patterns. */
2044 if (!REG_P (reg)
2045 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2046 || !fixed_regs[REGNO (reg)])
2047 if (reg_overlap_mentioned_p (reg, src))
2048 return 0;
2051 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2052 or not), reject, unless nothing volatile comes between it and I3 */
2054 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2056 /* Make sure neither succ nor succ2 contains a volatile reference. */
2057 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2058 return 0;
2059 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2060 return 0;
2061 /* We'll check insns between INSN and I3 below. */
2064 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2065 to be an explicit register variable, and was chosen for a reason. */
2067 if (GET_CODE (src) == ASM_OPERANDS
2068 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2069 return 0;
2071 /* If INSN contains volatile references (specifically volatile MEMs),
2072 we cannot combine across any other volatile references.
2073 Even if INSN doesn't contain volatile references, any intervening
2074 volatile insn might affect machine state. */
2076 is_volatile_p = volatile_refs_p (PATTERN (insn))
2077 ? volatile_refs_p
2078 : volatile_insn_p;
2080 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2081 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2082 return 0;
2084 /* If INSN contains an autoincrement or autodecrement, make sure that
2085 register is not used between there and I3, and not already used in
2086 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2087 Also insist that I3 not be a jump; if it were one
2088 and the incremented register were spilled, we would lose. */
2090 if (AUTO_INC_DEC)
2091 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2092 if (REG_NOTE_KIND (link) == REG_INC
2093 && (JUMP_P (i3)
2094 || reg_used_between_p (XEXP (link, 0), insn, i3)
2095 || (pred != NULL_RTX
2096 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2097 || (pred2 != NULL_RTX
2098 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2099 || (succ != NULL_RTX
2100 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2101 || (succ2 != NULL_RTX
2102 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2103 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2104 return 0;
2106 /* Don't combine an insn that follows a CC0-setting insn.
2107 An insn that uses CC0 must not be separated from the one that sets it.
2108 We do, however, allow I2 to follow a CC0-setting insn if that insn
2109 is passed as I1; in that case it will be deleted also.
2110 We also allow combining in this case if all the insns are adjacent
2111 because that would leave the two CC0 insns adjacent as well.
2112 It would be more logical to test whether CC0 occurs inside I1 or I2,
2113 but that would be much slower, and this ought to be equivalent. */
2115 if (HAVE_cc0)
2117 p = prev_nonnote_insn (insn);
2118 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2119 && ! all_adjacent)
2120 return 0;
2123 /* If we get here, we have passed all the tests and the combination is
2124 to be allowed. */
2126 *pdest = dest;
2127 *psrc = src;
2129 return 1;
2132 /* LOC is the location within I3 that contains its pattern or the component
2133 of a PARALLEL of the pattern. We validate that it is valid for combining.
2135 One problem is if I3 modifies its output, as opposed to replacing it
2136 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2137 doing so would produce an insn that is not equivalent to the original insns.
2139 Consider:
2141 (set (reg:DI 101) (reg:DI 100))
2142 (set (subreg:SI (reg:DI 101) 0) <foo>)
2144 This is NOT equivalent to:
2146 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2147 (set (reg:DI 101) (reg:DI 100))])
2149 Not only does this modify 100 (in which case it might still be valid
2150 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2152 We can also run into a problem if I2 sets a register that I1
2153 uses and I1 gets directly substituted into I3 (not via I2). In that
2154 case, we would be getting the wrong value of I2DEST into I3, so we
2155 must reject the combination. This case occurs when I2 and I1 both
2156 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2157 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2158 of a SET must prevent combination from occurring. The same situation
2159 can occur for I0, in which case I0_NOT_IN_SRC is set.
2161 Before doing the above check, we first try to expand a field assignment
2162 into a set of logical operations.
2164 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2165 we place a register that is both set and used within I3. If more than one
2166 such register is detected, we fail.
2168 Return 1 if the combination is valid, zero otherwise. */
2170 static int
2171 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2172 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2174 rtx x = *loc;
2176 if (GET_CODE (x) == SET)
2178 rtx set = x ;
2179 rtx dest = SET_DEST (set);
2180 rtx src = SET_SRC (set);
2181 rtx inner_dest = dest;
2182 rtx subdest;
2184 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2185 || GET_CODE (inner_dest) == SUBREG
2186 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2187 inner_dest = XEXP (inner_dest, 0);
2189 /* Check for the case where I3 modifies its output, as discussed
2190 above. We don't want to prevent pseudos from being combined
2191 into the address of a MEM, so only prevent the combination if
2192 i1 or i2 set the same MEM. */
2193 if ((inner_dest != dest &&
2194 (!MEM_P (inner_dest)
2195 || rtx_equal_p (i2dest, inner_dest)
2196 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2197 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2198 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2199 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2200 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2202 /* This is the same test done in can_combine_p except we can't test
2203 all_adjacent; we don't have to, since this instruction will stay
2204 in place, thus we are not considering increasing the lifetime of
2205 INNER_DEST.
2207 Also, if this insn sets a function argument, combining it with
2208 something that might need a spill could clobber a previous
2209 function argument; the all_adjacent test in can_combine_p also
2210 checks this; here, we do a more specific test for this case. */
2212 || (REG_P (inner_dest)
2213 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2214 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2215 GET_MODE (inner_dest)))
2216 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2217 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2218 return 0;
2220 /* If DEST is used in I3, it is being killed in this insn, so
2221 record that for later. We have to consider paradoxical
2222 subregs here, since they kill the whole register, but we
2223 ignore partial subregs, STRICT_LOW_PART, etc.
2224 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2225 STACK_POINTER_REGNUM, since these are always considered to be
2226 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2227 subdest = dest;
2228 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2229 subdest = SUBREG_REG (subdest);
2230 if (pi3dest_killed
2231 && REG_P (subdest)
2232 && reg_referenced_p (subdest, PATTERN (i3))
2233 && REGNO (subdest) != FRAME_POINTER_REGNUM
2234 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2235 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2236 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2237 || (REGNO (subdest) != ARG_POINTER_REGNUM
2238 || ! fixed_regs [REGNO (subdest)]))
2239 && REGNO (subdest) != STACK_POINTER_REGNUM)
2241 if (*pi3dest_killed)
2242 return 0;
2244 *pi3dest_killed = subdest;
2248 else if (GET_CODE (x) == PARALLEL)
2250 int i;
2252 for (i = 0; i < XVECLEN (x, 0); i++)
2253 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2254 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2255 return 0;
2258 return 1;
2261 /* Return 1 if X is an arithmetic expression that contains a multiplication
2262 and division. We don't count multiplications by powers of two here. */
2264 static int
2265 contains_muldiv (rtx x)
2267 switch (GET_CODE (x))
2269 case MOD: case DIV: case UMOD: case UDIV:
2270 return 1;
2272 case MULT:
2273 return ! (CONST_INT_P (XEXP (x, 1))
2274 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2275 default:
2276 if (BINARY_P (x))
2277 return contains_muldiv (XEXP (x, 0))
2278 || contains_muldiv (XEXP (x, 1));
2280 if (UNARY_P (x))
2281 return contains_muldiv (XEXP (x, 0));
2283 return 0;
2287 /* Determine whether INSN can be used in a combination. Return nonzero if
2288 not. This is used in try_combine to detect early some cases where we
2289 can't perform combinations. */
2291 static int
2292 cant_combine_insn_p (rtx_insn *insn)
2294 rtx set;
2295 rtx src, dest;
2297 /* If this isn't really an insn, we can't do anything.
2298 This can occur when flow deletes an insn that it has merged into an
2299 auto-increment address. */
2300 if (!NONDEBUG_INSN_P (insn))
2301 return 1;
2303 /* Never combine loads and stores involving hard regs that are likely
2304 to be spilled. The register allocator can usually handle such
2305 reg-reg moves by tying. If we allow the combiner to make
2306 substitutions of likely-spilled regs, reload might die.
2307 As an exception, we allow combinations involving fixed regs; these are
2308 not available to the register allocator so there's no risk involved. */
2310 set = single_set (insn);
2311 if (! set)
2312 return 0;
2313 src = SET_SRC (set);
2314 dest = SET_DEST (set);
2315 if (GET_CODE (src) == SUBREG)
2316 src = SUBREG_REG (src);
2317 if (GET_CODE (dest) == SUBREG)
2318 dest = SUBREG_REG (dest);
2319 if (REG_P (src) && REG_P (dest)
2320 && ((HARD_REGISTER_P (src)
2321 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2322 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2323 || (HARD_REGISTER_P (dest)
2324 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2325 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2326 return 1;
2328 return 0;
2331 struct likely_spilled_retval_info
2333 unsigned regno, nregs;
2334 unsigned mask;
2337 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2338 hard registers that are known to be written to / clobbered in full. */
2339 static void
2340 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2342 struct likely_spilled_retval_info *const info =
2343 (struct likely_spilled_retval_info *) data;
2344 unsigned regno, nregs;
2345 unsigned new_mask;
2347 if (!REG_P (XEXP (set, 0)))
2348 return;
2349 regno = REGNO (x);
2350 if (regno >= info->regno + info->nregs)
2351 return;
2352 nregs = REG_NREGS (x);
2353 if (regno + nregs <= info->regno)
2354 return;
2355 new_mask = (2U << (nregs - 1)) - 1;
2356 if (regno < info->regno)
2357 new_mask >>= info->regno - regno;
2358 else
2359 new_mask <<= regno - info->regno;
2360 info->mask &= ~new_mask;
2363 /* Return nonzero iff part of the return value is live during INSN, and
2364 it is likely spilled. This can happen when more than one insn is needed
2365 to copy the return value, e.g. when we consider to combine into the
2366 second copy insn for a complex value. */
2368 static int
2369 likely_spilled_retval_p (rtx_insn *insn)
2371 rtx_insn *use = BB_END (this_basic_block);
2372 rtx reg;
2373 rtx_insn *p;
2374 unsigned regno, nregs;
2375 /* We assume here that no machine mode needs more than
2376 32 hard registers when the value overlaps with a register
2377 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2378 unsigned mask;
2379 struct likely_spilled_retval_info info;
2381 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2382 return 0;
2383 reg = XEXP (PATTERN (use), 0);
2384 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2385 return 0;
2386 regno = REGNO (reg);
2387 nregs = REG_NREGS (reg);
2388 if (nregs == 1)
2389 return 0;
2390 mask = (2U << (nregs - 1)) - 1;
2392 /* Disregard parts of the return value that are set later. */
2393 info.regno = regno;
2394 info.nregs = nregs;
2395 info.mask = mask;
2396 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2397 if (INSN_P (p))
2398 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2399 mask = info.mask;
2401 /* Check if any of the (probably) live return value registers is
2402 likely spilled. */
2403 nregs --;
2406 if ((mask & 1 << nregs)
2407 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2408 return 1;
2409 } while (nregs--);
2410 return 0;
2413 /* Adjust INSN after we made a change to its destination.
2415 Changing the destination can invalidate notes that say something about
2416 the results of the insn and a LOG_LINK pointing to the insn. */
2418 static void
2419 adjust_for_new_dest (rtx_insn *insn)
2421 /* For notes, be conservative and simply remove them. */
2422 remove_reg_equal_equiv_notes (insn);
2424 /* The new insn will have a destination that was previously the destination
2425 of an insn just above it. Call distribute_links to make a LOG_LINK from
2426 the next use of that destination. */
2428 rtx set = single_set (insn);
2429 gcc_assert (set);
2431 rtx reg = SET_DEST (set);
2433 while (GET_CODE (reg) == ZERO_EXTRACT
2434 || GET_CODE (reg) == STRICT_LOW_PART
2435 || GET_CODE (reg) == SUBREG)
2436 reg = XEXP (reg, 0);
2437 gcc_assert (REG_P (reg));
2439 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2441 df_insn_rescan (insn);
2444 /* Return TRUE if combine can reuse reg X in mode MODE.
2445 ADDED_SETS is nonzero if the original set is still required. */
2446 static bool
2447 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2449 unsigned int regno;
2451 if (!REG_P (x))
2452 return false;
2454 regno = REGNO (x);
2455 /* Allow hard registers if the new mode is legal, and occupies no more
2456 registers than the old mode. */
2457 if (regno < FIRST_PSEUDO_REGISTER)
2458 return (targetm.hard_regno_mode_ok (regno, mode)
2459 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2461 /* Or a pseudo that is only used once. */
2462 return (regno < reg_n_sets_max
2463 && REG_N_SETS (regno) == 1
2464 && !added_sets
2465 && !REG_USERVAR_P (x));
2469 /* Check whether X, the destination of a set, refers to part of
2470 the register specified by REG. */
2472 static bool
2473 reg_subword_p (rtx x, rtx reg)
2475 /* Check that reg is an integer mode register. */
2476 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2477 return false;
2479 if (GET_CODE (x) == STRICT_LOW_PART
2480 || GET_CODE (x) == ZERO_EXTRACT)
2481 x = XEXP (x, 0);
2483 return GET_CODE (x) == SUBREG
2484 && SUBREG_REG (x) == reg
2485 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2488 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2489 Note that the INSN should be deleted *after* removing dead edges, so
2490 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2491 but not for a (set (pc) (label_ref FOO)). */
2493 static void
2494 update_cfg_for_uncondjump (rtx_insn *insn)
2496 basic_block bb = BLOCK_FOR_INSN (insn);
2497 gcc_assert (BB_END (bb) == insn);
2499 purge_dead_edges (bb);
2501 delete_insn (insn);
2502 if (EDGE_COUNT (bb->succs) == 1)
2504 rtx_insn *insn;
2506 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2508 /* Remove barriers from the footer if there are any. */
2509 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2510 if (BARRIER_P (insn))
2512 if (PREV_INSN (insn))
2513 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2514 else
2515 BB_FOOTER (bb) = NEXT_INSN (insn);
2516 if (NEXT_INSN (insn))
2517 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2519 else if (LABEL_P (insn))
2520 break;
2524 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2525 by an arbitrary number of CLOBBERs. */
2526 static bool
2527 is_parallel_of_n_reg_sets (rtx pat, int n)
2529 if (GET_CODE (pat) != PARALLEL)
2530 return false;
2532 int len = XVECLEN (pat, 0);
2533 if (len < n)
2534 return false;
2536 int i;
2537 for (i = 0; i < n; i++)
2538 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2539 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2540 return false;
2541 for ( ; i < len; i++)
2542 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2543 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2544 return false;
2546 return true;
2549 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2550 CLOBBERs), can be split into individual SETs in that order, without
2551 changing semantics. */
2552 static bool
2553 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2555 if (!insn_nothrow_p (insn))
2556 return false;
2558 rtx pat = PATTERN (insn);
2560 int i, j;
2561 for (i = 0; i < n; i++)
2563 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2564 return false;
2566 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2568 for (j = i + 1; j < n; j++)
2569 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2570 return false;
2573 return true;
2576 /* Try to combine the insns I0, I1 and I2 into I3.
2577 Here I0, I1 and I2 appear earlier than I3.
2578 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2581 If we are combining more than two insns and the resulting insn is not
2582 recognized, try splitting it into two insns. If that happens, I2 and I3
2583 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2584 Otherwise, I0, I1 and I2 are pseudo-deleted.
2586 Return 0 if the combination does not work. Then nothing is changed.
2587 If we did the combination, return the insn at which combine should
2588 resume scanning.
2590 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2591 new direct jump instruction.
2593 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2594 been I3 passed to an earlier try_combine within the same basic
2595 block. */
2597 static rtx_insn *
2598 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2599 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2601 /* New patterns for I3 and I2, respectively. */
2602 rtx newpat, newi2pat = 0;
2603 rtvec newpat_vec_with_clobbers = 0;
2604 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2605 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2606 dead. */
2607 int added_sets_0, added_sets_1, added_sets_2;
2608 /* Total number of SETs to put into I3. */
2609 int total_sets;
2610 /* Nonzero if I2's or I1's body now appears in I3. */
2611 int i2_is_used = 0, i1_is_used = 0;
2612 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2613 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2614 /* Contains I3 if the destination of I3 is used in its source, which means
2615 that the old life of I3 is being killed. If that usage is placed into
2616 I2 and not in I3, a REG_DEAD note must be made. */
2617 rtx i3dest_killed = 0;
2618 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2619 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2620 /* Copy of SET_SRC of I1 and I0, if needed. */
2621 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2622 /* Set if I2DEST was reused as a scratch register. */
2623 bool i2scratch = false;
2624 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2625 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2626 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2627 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2628 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2629 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2630 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2631 /* Notes that must be added to REG_NOTES in I3 and I2. */
2632 rtx new_i3_notes, new_i2_notes;
2633 /* Notes that we substituted I3 into I2 instead of the normal case. */
2634 int i3_subst_into_i2 = 0;
2635 /* Notes that I1, I2 or I3 is a MULT operation. */
2636 int have_mult = 0;
2637 int swap_i2i3 = 0;
2638 int changed_i3_dest = 0;
2640 int maxreg;
2641 rtx_insn *temp_insn;
2642 rtx temp_expr;
2643 struct insn_link *link;
2644 rtx other_pat = 0;
2645 rtx new_other_notes;
2646 int i;
2647 scalar_int_mode dest_mode, temp_mode;
2649 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2650 never be). */
2651 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2652 return 0;
2654 /* Only try four-insn combinations when there's high likelihood of
2655 success. Look for simple insns, such as loads of constants or
2656 binary operations involving a constant. */
2657 if (i0)
2659 int i;
2660 int ngood = 0;
2661 int nshift = 0;
2662 rtx set0, set3;
2664 if (!flag_expensive_optimizations)
2665 return 0;
2667 for (i = 0; i < 4; i++)
2669 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2670 rtx set = single_set (insn);
2671 rtx src;
2672 if (!set)
2673 continue;
2674 src = SET_SRC (set);
2675 if (CONSTANT_P (src))
2677 ngood += 2;
2678 break;
2680 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2681 ngood++;
2682 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2683 || GET_CODE (src) == LSHIFTRT)
2684 nshift++;
2687 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2688 are likely manipulating its value. Ideally we'll be able to combine
2689 all four insns into a bitfield insertion of some kind.
2691 Note the source in I0 might be inside a sign/zero extension and the
2692 memory modes in I0 and I3 might be different. So extract the address
2693 from the destination of I3 and search for it in the source of I0.
2695 In the event that there's a match but the source/dest do not actually
2696 refer to the same memory, the worst that happens is we try some
2697 combinations that we wouldn't have otherwise. */
2698 if ((set0 = single_set (i0))
2699 /* Ensure the source of SET0 is a MEM, possibly buried inside
2700 an extension. */
2701 && (GET_CODE (SET_SRC (set0)) == MEM
2702 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2703 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2704 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2705 && (set3 = single_set (i3))
2706 /* Ensure the destination of SET3 is a MEM. */
2707 && GET_CODE (SET_DEST (set3)) == MEM
2708 /* Would it be better to extract the base address for the MEM
2709 in SET3 and look for that? I don't have cases where it matters
2710 but I could envision such cases. */
2711 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2712 ngood += 2;
2714 if (ngood < 2 && nshift < 2)
2715 return 0;
2718 /* Exit early if one of the insns involved can't be used for
2719 combinations. */
2720 if (CALL_P (i2)
2721 || (i1 && CALL_P (i1))
2722 || (i0 && CALL_P (i0))
2723 || cant_combine_insn_p (i3)
2724 || cant_combine_insn_p (i2)
2725 || (i1 && cant_combine_insn_p (i1))
2726 || (i0 && cant_combine_insn_p (i0))
2727 || likely_spilled_retval_p (i3))
2728 return 0;
2730 combine_attempts++;
2731 undobuf.other_insn = 0;
2733 /* Reset the hard register usage information. */
2734 CLEAR_HARD_REG_SET (newpat_used_regs);
2736 if (dump_file && (dump_flags & TDF_DETAILS))
2738 if (i0)
2739 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2740 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2741 else if (i1)
2742 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2743 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2744 else
2745 fprintf (dump_file, "\nTrying %d -> %d:\n",
2746 INSN_UID (i2), INSN_UID (i3));
2749 /* If multiple insns feed into one of I2 or I3, they can be in any
2750 order. To simplify the code below, reorder them in sequence. */
2751 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2752 std::swap (i0, i2);
2753 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2754 std::swap (i0, i1);
2755 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2756 std::swap (i1, i2);
2758 added_links_insn = 0;
2760 /* First check for one important special case that the code below will
2761 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2762 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2763 we may be able to replace that destination with the destination of I3.
2764 This occurs in the common code where we compute both a quotient and
2765 remainder into a structure, in which case we want to do the computation
2766 directly into the structure to avoid register-register copies.
2768 Note that this case handles both multiple sets in I2 and also cases
2769 where I2 has a number of CLOBBERs inside the PARALLEL.
2771 We make very conservative checks below and only try to handle the
2772 most common cases of this. For example, we only handle the case
2773 where I2 and I3 are adjacent to avoid making difficult register
2774 usage tests. */
2776 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2777 && REG_P (SET_SRC (PATTERN (i3)))
2778 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2779 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2780 && GET_CODE (PATTERN (i2)) == PARALLEL
2781 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2782 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2783 below would need to check what is inside (and reg_overlap_mentioned_p
2784 doesn't support those codes anyway). Don't allow those destinations;
2785 the resulting insn isn't likely to be recognized anyway. */
2786 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2787 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2788 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2789 SET_DEST (PATTERN (i3)))
2790 && next_active_insn (i2) == i3)
2792 rtx p2 = PATTERN (i2);
2794 /* Make sure that the destination of I3,
2795 which we are going to substitute into one output of I2,
2796 is not used within another output of I2. We must avoid making this:
2797 (parallel [(set (mem (reg 69)) ...)
2798 (set (reg 69) ...)])
2799 which is not well-defined as to order of actions.
2800 (Besides, reload can't handle output reloads for this.)
2802 The problem can also happen if the dest of I3 is a memory ref,
2803 if another dest in I2 is an indirect memory ref.
2805 Neither can this PARALLEL be an asm. We do not allow combining
2806 that usually (see can_combine_p), so do not here either. */
2807 bool ok = true;
2808 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2810 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2811 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2812 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2813 SET_DEST (XVECEXP (p2, 0, i))))
2814 ok = false;
2815 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2816 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2817 ok = false;
2820 if (ok)
2821 for (i = 0; i < XVECLEN (p2, 0); i++)
2822 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2823 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2825 combine_merges++;
2827 subst_insn = i3;
2828 subst_low_luid = DF_INSN_LUID (i2);
2830 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2831 i2src = SET_SRC (XVECEXP (p2, 0, i));
2832 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2833 i2dest_killed = dead_or_set_p (i2, i2dest);
2835 /* Replace the dest in I2 with our dest and make the resulting
2836 insn the new pattern for I3. Then skip to where we validate
2837 the pattern. Everything was set up above. */
2838 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2839 newpat = p2;
2840 i3_subst_into_i2 = 1;
2841 goto validate_replacement;
2845 /* If I2 is setting a pseudo to a constant and I3 is setting some
2846 sub-part of it to another constant, merge them by making a new
2847 constant. */
2848 if (i1 == 0
2849 && (temp_expr = single_set (i2)) != 0
2850 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2851 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2852 && GET_CODE (PATTERN (i3)) == SET
2853 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2854 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2856 rtx dest = SET_DEST (PATTERN (i3));
2857 rtx temp_dest = SET_DEST (temp_expr);
2858 int offset = -1;
2859 int width = 0;
2861 if (GET_CODE (dest) == ZERO_EXTRACT)
2863 if (CONST_INT_P (XEXP (dest, 1))
2864 && CONST_INT_P (XEXP (dest, 2))
2865 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2866 &dest_mode))
2868 width = INTVAL (XEXP (dest, 1));
2869 offset = INTVAL (XEXP (dest, 2));
2870 dest = XEXP (dest, 0);
2871 if (BITS_BIG_ENDIAN)
2872 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2875 else
2877 if (GET_CODE (dest) == STRICT_LOW_PART)
2878 dest = XEXP (dest, 0);
2879 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2881 width = GET_MODE_PRECISION (dest_mode);
2882 offset = 0;
2886 if (offset >= 0)
2888 /* If this is the low part, we're done. */
2889 if (subreg_lowpart_p (dest))
2891 /* Handle the case where inner is twice the size of outer. */
2892 else if (GET_MODE_PRECISION (temp_mode)
2893 == 2 * GET_MODE_PRECISION (dest_mode))
2894 offset += GET_MODE_PRECISION (dest_mode);
2895 /* Otherwise give up for now. */
2896 else
2897 offset = -1;
2900 if (offset >= 0)
2902 rtx inner = SET_SRC (PATTERN (i3));
2903 rtx outer = SET_SRC (temp_expr);
2905 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2906 rtx_mode_t (inner, dest_mode),
2907 offset, width);
2909 combine_merges++;
2910 subst_insn = i3;
2911 subst_low_luid = DF_INSN_LUID (i2);
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2dest = temp_dest;
2914 i2dest_killed = dead_or_set_p (i2, i2dest);
2916 /* Replace the source in I2 with the new constant and make the
2917 resulting insn the new pattern for I3. Then skip to where we
2918 validate the pattern. Everything was set up above. */
2919 SUBST (SET_SRC (temp_expr),
2920 immed_wide_int_const (o, temp_mode));
2922 newpat = PATTERN (i2);
2924 /* The dest of I3 has been replaced with the dest of I2. */
2925 changed_i3_dest = 1;
2926 goto validate_replacement;
2930 /* If we have no I1 and I2 looks like:
2931 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2932 (set Y OP)])
2933 make up a dummy I1 that is
2934 (set Y OP)
2935 and change I2 to be
2936 (set (reg:CC X) (compare:CC Y (const_int 0)))
2938 (We can ignore any trailing CLOBBERs.)
2940 This undoes a previous combination and allows us to match a branch-and-
2941 decrement insn. */
2943 if (!HAVE_cc0 && i1 == 0
2944 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2945 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2946 == MODE_CC)
2947 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2948 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2949 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2950 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2951 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2954 /* We make I1 with the same INSN_UID as I2. This gives it
2955 the same DF_INSN_LUID for value tracking. Our fake I1 will
2956 never appear in the insn stream so giving it the same INSN_UID
2957 as I2 will not cause a problem. */
2959 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2960 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2961 -1, NULL_RTX);
2962 INSN_UID (i1) = INSN_UID (i2);
2964 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2965 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2966 SET_DEST (PATTERN (i1)));
2967 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2968 SUBST_LINK (LOG_LINKS (i2),
2969 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2972 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2973 make those two SETs separate I1 and I2 insns, and make an I0 that is
2974 the original I1. */
2975 if (!HAVE_cc0 && i0 == 0
2976 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2977 && can_split_parallel_of_n_reg_sets (i2, 2)
2978 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2979 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2981 /* If there is no I1, there is no I0 either. */
2982 i0 = i1;
2984 /* We make I1 with the same INSN_UID as I2. This gives it
2985 the same DF_INSN_LUID for value tracking. Our fake I1 will
2986 never appear in the insn stream so giving it the same INSN_UID
2987 as I2 will not cause a problem. */
2989 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2990 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2991 -1, NULL_RTX);
2992 INSN_UID (i1) = INSN_UID (i2);
2994 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2997 /* Verify that I2 and I1 are valid for combining. */
2998 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2999 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3000 &i1dest, &i1src))
3001 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3002 &i0dest, &i0src)))
3004 undo_all ();
3005 return 0;
3008 /* Record whether I2DEST is used in I2SRC and similarly for the other
3009 cases. Knowing this will help in register status updating below. */
3010 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3011 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3012 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3013 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3014 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3015 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3016 i2dest_killed = dead_or_set_p (i2, i2dest);
3017 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3018 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3020 /* For the earlier insns, determine which of the subsequent ones they
3021 feed. */
3022 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3023 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3024 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3025 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3026 && reg_overlap_mentioned_p (i0dest, i2src))));
3028 /* Ensure that I3's pattern can be the destination of combines. */
3029 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3030 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3031 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3032 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3033 &i3dest_killed))
3035 undo_all ();
3036 return 0;
3039 /* See if any of the insns is a MULT operation. Unless one is, we will
3040 reject a combination that is, since it must be slower. Be conservative
3041 here. */
3042 if (GET_CODE (i2src) == MULT
3043 || (i1 != 0 && GET_CODE (i1src) == MULT)
3044 || (i0 != 0 && GET_CODE (i0src) == MULT)
3045 || (GET_CODE (PATTERN (i3)) == SET
3046 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3047 have_mult = 1;
3049 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3050 We used to do this EXCEPT in one case: I3 has a post-inc in an
3051 output operand. However, that exception can give rise to insns like
3052 mov r3,(r3)+
3053 which is a famous insn on the PDP-11 where the value of r3 used as the
3054 source was model-dependent. Avoid this sort of thing. */
3056 #if 0
3057 if (!(GET_CODE (PATTERN (i3)) == SET
3058 && REG_P (SET_SRC (PATTERN (i3)))
3059 && MEM_P (SET_DEST (PATTERN (i3)))
3060 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3061 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3062 /* It's not the exception. */
3063 #endif
3064 if (AUTO_INC_DEC)
3066 rtx link;
3067 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3068 if (REG_NOTE_KIND (link) == REG_INC
3069 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3070 || (i1 != 0
3071 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3073 undo_all ();
3074 return 0;
3078 /* See if the SETs in I1 or I2 need to be kept around in the merged
3079 instruction: whenever the value set there is still needed past I3.
3080 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3082 For the SET in I1, we have two cases: if I1 and I2 independently feed
3083 into I3, the set in I1 needs to be kept around unless I1DEST dies
3084 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3085 in I1 needs to be kept around unless I1DEST dies or is set in either
3086 I2 or I3. The same considerations apply to I0. */
3088 added_sets_2 = !dead_or_set_p (i3, i2dest);
3090 if (i1)
3091 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3092 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3093 else
3094 added_sets_1 = 0;
3096 if (i0)
3097 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3098 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3099 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3100 && dead_or_set_p (i2, i0dest)));
3101 else
3102 added_sets_0 = 0;
3104 /* We are about to copy insns for the case where they need to be kept
3105 around. Check that they can be copied in the merged instruction. */
3107 if (targetm.cannot_copy_insn_p
3108 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3109 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3110 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3112 undo_all ();
3113 return 0;
3116 /* If the set in I2 needs to be kept around, we must make a copy of
3117 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3118 PATTERN (I2), we are only substituting for the original I1DEST, not into
3119 an already-substituted copy. This also prevents making self-referential
3120 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3121 I2DEST. */
3123 if (added_sets_2)
3125 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3126 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3127 else
3128 i2pat = copy_rtx (PATTERN (i2));
3131 if (added_sets_1)
3133 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3134 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3135 else
3136 i1pat = copy_rtx (PATTERN (i1));
3139 if (added_sets_0)
3141 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3142 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3143 else
3144 i0pat = copy_rtx (PATTERN (i0));
3147 combine_merges++;
3149 /* Substitute in the latest insn for the regs set by the earlier ones. */
3151 maxreg = max_reg_num ();
3153 subst_insn = i3;
3155 /* Many machines that don't use CC0 have insns that can both perform an
3156 arithmetic operation and set the condition code. These operations will
3157 be represented as a PARALLEL with the first element of the vector
3158 being a COMPARE of an arithmetic operation with the constant zero.
3159 The second element of the vector will set some pseudo to the result
3160 of the same arithmetic operation. If we simplify the COMPARE, we won't
3161 match such a pattern and so will generate an extra insn. Here we test
3162 for this case, where both the comparison and the operation result are
3163 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3164 I2SRC. Later we will make the PARALLEL that contains I2. */
3166 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3167 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3168 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3169 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3171 rtx newpat_dest;
3172 rtx *cc_use_loc = NULL;
3173 rtx_insn *cc_use_insn = NULL;
3174 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3175 machine_mode compare_mode, orig_compare_mode;
3176 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3177 scalar_int_mode mode;
3179 newpat = PATTERN (i3);
3180 newpat_dest = SET_DEST (newpat);
3181 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3183 if (undobuf.other_insn == 0
3184 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3185 &cc_use_insn)))
3187 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3188 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3189 compare_code = simplify_compare_const (compare_code, mode,
3190 op0, &op1);
3191 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3194 /* Do the rest only if op1 is const0_rtx, which may be the
3195 result of simplification. */
3196 if (op1 == const0_rtx)
3198 /* If a single use of the CC is found, prepare to modify it
3199 when SELECT_CC_MODE returns a new CC-class mode, or when
3200 the above simplify_compare_const() returned a new comparison
3201 operator. undobuf.other_insn is assigned the CC use insn
3202 when modifying it. */
3203 if (cc_use_loc)
3205 #ifdef SELECT_CC_MODE
3206 machine_mode new_mode
3207 = SELECT_CC_MODE (compare_code, op0, op1);
3208 if (new_mode != orig_compare_mode
3209 && can_change_dest_mode (SET_DEST (newpat),
3210 added_sets_2, new_mode))
3212 unsigned int regno = REGNO (newpat_dest);
3213 compare_mode = new_mode;
3214 if (regno < FIRST_PSEUDO_REGISTER)
3215 newpat_dest = gen_rtx_REG (compare_mode, regno);
3216 else
3218 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3219 newpat_dest = regno_reg_rtx[regno];
3222 #endif
3223 /* Cases for modifying the CC-using comparison. */
3224 if (compare_code != orig_compare_code
3225 /* ??? Do we need to verify the zero rtx? */
3226 && XEXP (*cc_use_loc, 1) == const0_rtx)
3228 /* Replace cc_use_loc with entire new RTX. */
3229 SUBST (*cc_use_loc,
3230 gen_rtx_fmt_ee (compare_code, compare_mode,
3231 newpat_dest, const0_rtx));
3232 undobuf.other_insn = cc_use_insn;
3234 else if (compare_mode != orig_compare_mode)
3236 /* Just replace the CC reg with a new mode. */
3237 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3238 undobuf.other_insn = cc_use_insn;
3242 /* Now we modify the current newpat:
3243 First, SET_DEST(newpat) is updated if the CC mode has been
3244 altered. For targets without SELECT_CC_MODE, this should be
3245 optimized away. */
3246 if (compare_mode != orig_compare_mode)
3247 SUBST (SET_DEST (newpat), newpat_dest);
3248 /* This is always done to propagate i2src into newpat. */
3249 SUBST (SET_SRC (newpat),
3250 gen_rtx_COMPARE (compare_mode, op0, op1));
3251 /* Create new version of i2pat if needed; the below PARALLEL
3252 creation needs this to work correctly. */
3253 if (! rtx_equal_p (i2src, op0))
3254 i2pat = gen_rtx_SET (i2dest, op0);
3255 i2_is_used = 1;
3259 if (i2_is_used == 0)
3261 /* It is possible that the source of I2 or I1 may be performing
3262 an unneeded operation, such as a ZERO_EXTEND of something
3263 that is known to have the high part zero. Handle that case
3264 by letting subst look at the inner insns.
3266 Another way to do this would be to have a function that tries
3267 to simplify a single insn instead of merging two or more
3268 insns. We don't do this because of the potential of infinite
3269 loops and because of the potential extra memory required.
3270 However, doing it the way we are is a bit of a kludge and
3271 doesn't catch all cases.
3273 But only do this if -fexpensive-optimizations since it slows
3274 things down and doesn't usually win.
3276 This is not done in the COMPARE case above because the
3277 unmodified I2PAT is used in the PARALLEL and so a pattern
3278 with a modified I2SRC would not match. */
3280 if (flag_expensive_optimizations)
3282 /* Pass pc_rtx so no substitutions are done, just
3283 simplifications. */
3284 if (i1)
3286 subst_low_luid = DF_INSN_LUID (i1);
3287 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3290 subst_low_luid = DF_INSN_LUID (i2);
3291 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3294 n_occurrences = 0; /* `subst' counts here */
3295 subst_low_luid = DF_INSN_LUID (i2);
3297 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3298 copy of I2SRC each time we substitute it, in order to avoid creating
3299 self-referential RTL when we will be substituting I1SRC for I1DEST
3300 later. Likewise if I0 feeds into I2, either directly or indirectly
3301 through I1, and I0DEST is in I0SRC. */
3302 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3303 (i1_feeds_i2_n && i1dest_in_i1src)
3304 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3305 && i0dest_in_i0src));
3306 substed_i2 = 1;
3308 /* Record whether I2's body now appears within I3's body. */
3309 i2_is_used = n_occurrences;
3312 /* If we already got a failure, don't try to do more. Otherwise, try to
3313 substitute I1 if we have it. */
3315 if (i1 && GET_CODE (newpat) != CLOBBER)
3317 /* Check that an autoincrement side-effect on I1 has not been lost.
3318 This happens if I1DEST is mentioned in I2 and dies there, and
3319 has disappeared from the new pattern. */
3320 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3321 && i1_feeds_i2_n
3322 && dead_or_set_p (i2, i1dest)
3323 && !reg_overlap_mentioned_p (i1dest, newpat))
3324 /* Before we can do this substitution, we must redo the test done
3325 above (see detailed comments there) that ensures I1DEST isn't
3326 mentioned in any SETs in NEWPAT that are field assignments. */
3327 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3328 0, 0, 0))
3330 undo_all ();
3331 return 0;
3334 n_occurrences = 0;
3335 subst_low_luid = DF_INSN_LUID (i1);
3337 /* If the following substitution will modify I1SRC, make a copy of it
3338 for the case where it is substituted for I1DEST in I2PAT later. */
3339 if (added_sets_2 && i1_feeds_i2_n)
3340 i1src_copy = copy_rtx (i1src);
3342 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3343 copy of I1SRC each time we substitute it, in order to avoid creating
3344 self-referential RTL when we will be substituting I0SRC for I0DEST
3345 later. */
3346 newpat = subst (newpat, i1dest, i1src, 0, 0,
3347 i0_feeds_i1_n && i0dest_in_i0src);
3348 substed_i1 = 1;
3350 /* Record whether I1's body now appears within I3's body. */
3351 i1_is_used = n_occurrences;
3354 /* Likewise for I0 if we have it. */
3356 if (i0 && GET_CODE (newpat) != CLOBBER)
3358 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3359 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3360 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3361 && !reg_overlap_mentioned_p (i0dest, newpat))
3362 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3363 0, 0, 0))
3365 undo_all ();
3366 return 0;
3369 /* If the following substitution will modify I0SRC, make a copy of it
3370 for the case where it is substituted for I0DEST in I1PAT later. */
3371 if (added_sets_1 && i0_feeds_i1_n)
3372 i0src_copy = copy_rtx (i0src);
3373 /* And a copy for I0DEST in I2PAT substitution. */
3374 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3375 || (i0_feeds_i2_n)))
3376 i0src_copy2 = copy_rtx (i0src);
3378 n_occurrences = 0;
3379 subst_low_luid = DF_INSN_LUID (i0);
3380 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3381 substed_i0 = 1;
3384 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3385 to count all the ways that I2SRC and I1SRC can be used. */
3386 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3387 && i2_is_used + added_sets_2 > 1)
3388 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3389 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3390 > 1))
3391 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3392 && (n_occurrences + added_sets_0
3393 + (added_sets_1 && i0_feeds_i1_n)
3394 + (added_sets_2 && i0_feeds_i2_n)
3395 > 1))
3396 /* Fail if we tried to make a new register. */
3397 || max_reg_num () != maxreg
3398 /* Fail if we couldn't do something and have a CLOBBER. */
3399 || GET_CODE (newpat) == CLOBBER
3400 /* Fail if this new pattern is a MULT and we didn't have one before
3401 at the outer level. */
3402 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3403 && ! have_mult))
3405 undo_all ();
3406 return 0;
3409 /* If the actions of the earlier insns must be kept
3410 in addition to substituting them into the latest one,
3411 we must make a new PARALLEL for the latest insn
3412 to hold additional the SETs. */
3414 if (added_sets_0 || added_sets_1 || added_sets_2)
3416 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3417 combine_extras++;
3419 if (GET_CODE (newpat) == PARALLEL)
3421 rtvec old = XVEC (newpat, 0);
3422 total_sets = XVECLEN (newpat, 0) + extra_sets;
3423 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3424 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3425 sizeof (old->elem[0]) * old->num_elem);
3427 else
3429 rtx old = newpat;
3430 total_sets = 1 + extra_sets;
3431 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3432 XVECEXP (newpat, 0, 0) = old;
3435 if (added_sets_0)
3436 XVECEXP (newpat, 0, --total_sets) = i0pat;
3438 if (added_sets_1)
3440 rtx t = i1pat;
3441 if (i0_feeds_i1_n)
3442 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3444 XVECEXP (newpat, 0, --total_sets) = t;
3446 if (added_sets_2)
3448 rtx t = i2pat;
3449 if (i1_feeds_i2_n)
3450 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3451 i0_feeds_i1_n && i0dest_in_i0src);
3452 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3453 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3455 XVECEXP (newpat, 0, --total_sets) = t;
3459 validate_replacement:
3461 /* Note which hard regs this insn has as inputs. */
3462 mark_used_regs_combine (newpat);
3464 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3465 consider splitting this pattern, we might need these clobbers. */
3466 if (i1 && GET_CODE (newpat) == PARALLEL
3467 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3469 int len = XVECLEN (newpat, 0);
3471 newpat_vec_with_clobbers = rtvec_alloc (len);
3472 for (i = 0; i < len; i++)
3473 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3476 /* We have recognized nothing yet. */
3477 insn_code_number = -1;
3479 /* See if this is a PARALLEL of two SETs where one SET's destination is
3480 a register that is unused and this isn't marked as an instruction that
3481 might trap in an EH region. In that case, we just need the other SET.
3482 We prefer this over the PARALLEL.
3484 This can occur when simplifying a divmod insn. We *must* test for this
3485 case here because the code below that splits two independent SETs doesn't
3486 handle this case correctly when it updates the register status.
3488 It's pointless doing this if we originally had two sets, one from
3489 i3, and one from i2. Combining then splitting the parallel results
3490 in the original i2 again plus an invalid insn (which we delete).
3491 The net effect is only to move instructions around, which makes
3492 debug info less accurate.
3494 If the remaining SET came from I2 its destination should not be used
3495 between I2 and I3. See PR82024. */
3497 if (!(added_sets_2 && i1 == 0)
3498 && is_parallel_of_n_reg_sets (newpat, 2)
3499 && asm_noperands (newpat) < 0)
3501 rtx set0 = XVECEXP (newpat, 0, 0);
3502 rtx set1 = XVECEXP (newpat, 0, 1);
3503 rtx oldpat = newpat;
3505 if (((REG_P (SET_DEST (set1))
3506 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3507 || (GET_CODE (SET_DEST (set1)) == SUBREG
3508 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3509 && insn_nothrow_p (i3)
3510 && !side_effects_p (SET_SRC (set1)))
3512 newpat = set0;
3513 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3516 else if (((REG_P (SET_DEST (set0))
3517 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3518 || (GET_CODE (SET_DEST (set0)) == SUBREG
3519 && find_reg_note (i3, REG_UNUSED,
3520 SUBREG_REG (SET_DEST (set0)))))
3521 && insn_nothrow_p (i3)
3522 && !side_effects_p (SET_SRC (set0)))
3524 rtx dest = SET_DEST (set1);
3525 if (GET_CODE (dest) == SUBREG)
3526 dest = SUBREG_REG (dest);
3527 if (!reg_used_between_p (dest, i2, i3))
3529 newpat = set1;
3530 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3532 if (insn_code_number >= 0)
3533 changed_i3_dest = 1;
3537 if (insn_code_number < 0)
3538 newpat = oldpat;
3541 /* Is the result of combination a valid instruction? */
3542 if (insn_code_number < 0)
3543 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3545 /* If we were combining three insns and the result is a simple SET
3546 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3547 insns. There are two ways to do this. It can be split using a
3548 machine-specific method (like when you have an addition of a large
3549 constant) or by combine in the function find_split_point. */
3551 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3552 && asm_noperands (newpat) < 0)
3554 rtx parallel, *split;
3555 rtx_insn *m_split_insn;
3557 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3558 use I2DEST as a scratch register will help. In the latter case,
3559 convert I2DEST to the mode of the source of NEWPAT if we can. */
3561 m_split_insn = combine_split_insns (newpat, i3);
3563 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3564 inputs of NEWPAT. */
3566 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3567 possible to try that as a scratch reg. This would require adding
3568 more code to make it work though. */
3570 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3572 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3574 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3575 (temporarily, until we are committed to this instruction
3576 combination) does not work: for example, any call to nonzero_bits
3577 on the register (from a splitter in the MD file, for example)
3578 will get the old information, which is invalid.
3580 Since nowadays we can create registers during combine just fine,
3581 we should just create a new one here, not reuse i2dest. */
3583 /* First try to split using the original register as a
3584 scratch register. */
3585 parallel = gen_rtx_PARALLEL (VOIDmode,
3586 gen_rtvec (2, newpat,
3587 gen_rtx_CLOBBER (VOIDmode,
3588 i2dest)));
3589 m_split_insn = combine_split_insns (parallel, i3);
3591 /* If that didn't work, try changing the mode of I2DEST if
3592 we can. */
3593 if (m_split_insn == 0
3594 && new_mode != GET_MODE (i2dest)
3595 && new_mode != VOIDmode
3596 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3598 machine_mode old_mode = GET_MODE (i2dest);
3599 rtx ni2dest;
3601 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3602 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3603 else
3605 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3606 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3609 parallel = (gen_rtx_PARALLEL
3610 (VOIDmode,
3611 gen_rtvec (2, newpat,
3612 gen_rtx_CLOBBER (VOIDmode,
3613 ni2dest))));
3614 m_split_insn = combine_split_insns (parallel, i3);
3616 if (m_split_insn == 0
3617 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3619 struct undo *buf;
3621 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3622 buf = undobuf.undos;
3623 undobuf.undos = buf->next;
3624 buf->next = undobuf.frees;
3625 undobuf.frees = buf;
3629 i2scratch = m_split_insn != 0;
3632 /* If recog_for_combine has discarded clobbers, try to use them
3633 again for the split. */
3634 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3636 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3637 m_split_insn = combine_split_insns (parallel, i3);
3640 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3642 rtx m_split_pat = PATTERN (m_split_insn);
3643 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3644 if (insn_code_number >= 0)
3645 newpat = m_split_pat;
3647 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3648 && (next_nonnote_nondebug_insn (i2) == i3
3649 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3651 rtx i2set, i3set;
3652 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3653 newi2pat = PATTERN (m_split_insn);
3655 i3set = single_set (NEXT_INSN (m_split_insn));
3656 i2set = single_set (m_split_insn);
3658 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3660 /* If I2 or I3 has multiple SETs, we won't know how to track
3661 register status, so don't use these insns. If I2's destination
3662 is used between I2 and I3, we also can't use these insns. */
3664 if (i2_code_number >= 0 && i2set && i3set
3665 && (next_nonnote_nondebug_insn (i2) == i3
3666 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3667 insn_code_number = recog_for_combine (&newi3pat, i3,
3668 &new_i3_notes);
3669 if (insn_code_number >= 0)
3670 newpat = newi3pat;
3672 /* It is possible that both insns now set the destination of I3.
3673 If so, we must show an extra use of it. */
3675 if (insn_code_number >= 0)
3677 rtx new_i3_dest = SET_DEST (i3set);
3678 rtx new_i2_dest = SET_DEST (i2set);
3680 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3681 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3682 || GET_CODE (new_i3_dest) == SUBREG)
3683 new_i3_dest = XEXP (new_i3_dest, 0);
3685 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3686 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3687 || GET_CODE (new_i2_dest) == SUBREG)
3688 new_i2_dest = XEXP (new_i2_dest, 0);
3690 if (REG_P (new_i3_dest)
3691 && REG_P (new_i2_dest)
3692 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3693 && REGNO (new_i2_dest) < reg_n_sets_max)
3694 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3698 /* If we can split it and use I2DEST, go ahead and see if that
3699 helps things be recognized. Verify that none of the registers
3700 are set between I2 and I3. */
3701 if (insn_code_number < 0
3702 && (split = find_split_point (&newpat, i3, false)) != 0
3703 && (!HAVE_cc0 || REG_P (i2dest))
3704 /* We need I2DEST in the proper mode. If it is a hard register
3705 or the only use of a pseudo, we can change its mode.
3706 Make sure we don't change a hard register to have a mode that
3707 isn't valid for it, or change the number of registers. */
3708 && (GET_MODE (*split) == GET_MODE (i2dest)
3709 || GET_MODE (*split) == VOIDmode
3710 || can_change_dest_mode (i2dest, added_sets_2,
3711 GET_MODE (*split)))
3712 && (next_nonnote_nondebug_insn (i2) == i3
3713 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3714 /* We can't overwrite I2DEST if its value is still used by
3715 NEWPAT. */
3716 && ! reg_referenced_p (i2dest, newpat))
3718 rtx newdest = i2dest;
3719 enum rtx_code split_code = GET_CODE (*split);
3720 machine_mode split_mode = GET_MODE (*split);
3721 bool subst_done = false;
3722 newi2pat = NULL_RTX;
3724 i2scratch = true;
3726 /* *SPLIT may be part of I2SRC, so make sure we have the
3727 original expression around for later debug processing.
3728 We should not need I2SRC any more in other cases. */
3729 if (MAY_HAVE_DEBUG_INSNS)
3730 i2src = copy_rtx (i2src);
3731 else
3732 i2src = NULL;
3734 /* Get NEWDEST as a register in the proper mode. We have already
3735 validated that we can do this. */
3736 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3738 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3739 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3740 else
3742 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3743 newdest = regno_reg_rtx[REGNO (i2dest)];
3747 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3748 an ASHIFT. This can occur if it was inside a PLUS and hence
3749 appeared to be a memory address. This is a kludge. */
3750 if (split_code == MULT
3751 && CONST_INT_P (XEXP (*split, 1))
3752 && INTVAL (XEXP (*split, 1)) > 0
3753 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3755 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3756 XEXP (*split, 0), GEN_INT (i)));
3757 /* Update split_code because we may not have a multiply
3758 anymore. */
3759 split_code = GET_CODE (*split);
3762 /* Similarly for (plus (mult FOO (const_int pow2))). */
3763 if (split_code == PLUS
3764 && GET_CODE (XEXP (*split, 0)) == MULT
3765 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3766 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3767 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3769 rtx nsplit = XEXP (*split, 0);
3770 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3771 XEXP (nsplit, 0), GEN_INT (i)));
3772 /* Update split_code because we may not have a multiply
3773 anymore. */
3774 split_code = GET_CODE (*split);
3777 #ifdef INSN_SCHEDULING
3778 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3779 be written as a ZERO_EXTEND. */
3780 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3782 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3783 what it really is. */
3784 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3785 == SIGN_EXTEND)
3786 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3787 SUBREG_REG (*split)));
3788 else
3789 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3790 SUBREG_REG (*split)));
3792 #endif
3794 /* Attempt to split binary operators using arithmetic identities. */
3795 if (BINARY_P (SET_SRC (newpat))
3796 && split_mode == GET_MODE (SET_SRC (newpat))
3797 && ! side_effects_p (SET_SRC (newpat)))
3799 rtx setsrc = SET_SRC (newpat);
3800 machine_mode mode = GET_MODE (setsrc);
3801 enum rtx_code code = GET_CODE (setsrc);
3802 rtx src_op0 = XEXP (setsrc, 0);
3803 rtx src_op1 = XEXP (setsrc, 1);
3805 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3806 if (rtx_equal_p (src_op0, src_op1))
3808 newi2pat = gen_rtx_SET (newdest, src_op0);
3809 SUBST (XEXP (setsrc, 0), newdest);
3810 SUBST (XEXP (setsrc, 1), newdest);
3811 subst_done = true;
3813 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3814 else if ((code == PLUS || code == MULT)
3815 && GET_CODE (src_op0) == code
3816 && GET_CODE (XEXP (src_op0, 0)) == code
3817 && (INTEGRAL_MODE_P (mode)
3818 || (FLOAT_MODE_P (mode)
3819 && flag_unsafe_math_optimizations)))
3821 rtx p = XEXP (XEXP (src_op0, 0), 0);
3822 rtx q = XEXP (XEXP (src_op0, 0), 1);
3823 rtx r = XEXP (src_op0, 1);
3824 rtx s = src_op1;
3826 /* Split both "((X op Y) op X) op Y" and
3827 "((X op Y) op Y) op X" as "T op T" where T is
3828 "X op Y". */
3829 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3830 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3832 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3833 SUBST (XEXP (setsrc, 0), newdest);
3834 SUBST (XEXP (setsrc, 1), newdest);
3835 subst_done = true;
3837 /* Split "((X op X) op Y) op Y)" as "T op T" where
3838 T is "X op Y". */
3839 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3841 rtx tmp = simplify_gen_binary (code, mode, p, r);
3842 newi2pat = gen_rtx_SET (newdest, tmp);
3843 SUBST (XEXP (setsrc, 0), newdest);
3844 SUBST (XEXP (setsrc, 1), newdest);
3845 subst_done = true;
3850 if (!subst_done)
3852 newi2pat = gen_rtx_SET (newdest, *split);
3853 SUBST (*split, newdest);
3856 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3858 /* recog_for_combine might have added CLOBBERs to newi2pat.
3859 Make sure NEWPAT does not depend on the clobbered regs. */
3860 if (GET_CODE (newi2pat) == PARALLEL)
3861 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3862 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3864 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3865 if (reg_overlap_mentioned_p (reg, newpat))
3867 undo_all ();
3868 return 0;
3872 /* If the split point was a MULT and we didn't have one before,
3873 don't use one now. */
3874 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3875 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3879 /* Check for a case where we loaded from memory in a narrow mode and
3880 then sign extended it, but we need both registers. In that case,
3881 we have a PARALLEL with both loads from the same memory location.
3882 We can split this into a load from memory followed by a register-register
3883 copy. This saves at least one insn, more if register allocation can
3884 eliminate the copy.
3886 We cannot do this if the destination of the first assignment is a
3887 condition code register or cc0. We eliminate this case by making sure
3888 the SET_DEST and SET_SRC have the same mode.
3890 We cannot do this if the destination of the second assignment is
3891 a register that we have already assumed is zero-extended. Similarly
3892 for a SUBREG of such a register. */
3894 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3895 && GET_CODE (newpat) == PARALLEL
3896 && XVECLEN (newpat, 0) == 2
3897 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3898 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3899 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3900 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3901 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3902 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3903 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3904 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3905 DF_INSN_LUID (i2))
3906 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3907 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3908 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3909 (REG_P (temp_expr)
3910 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3911 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3912 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3913 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3914 != GET_MODE_MASK (word_mode))))
3915 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3916 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3917 (REG_P (temp_expr)
3918 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3919 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3920 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3921 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3922 != GET_MODE_MASK (word_mode)))))
3923 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3924 SET_SRC (XVECEXP (newpat, 0, 1)))
3925 && ! find_reg_note (i3, REG_UNUSED,
3926 SET_DEST (XVECEXP (newpat, 0, 0))))
3928 rtx ni2dest;
3930 newi2pat = XVECEXP (newpat, 0, 0);
3931 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3932 newpat = XVECEXP (newpat, 0, 1);
3933 SUBST (SET_SRC (newpat),
3934 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3935 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3937 if (i2_code_number >= 0)
3938 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3940 if (insn_code_number >= 0)
3941 swap_i2i3 = 1;
3944 /* Similarly, check for a case where we have a PARALLEL of two independent
3945 SETs but we started with three insns. In this case, we can do the sets
3946 as two separate insns. This case occurs when some SET allows two
3947 other insns to combine, but the destination of that SET is still live.
3949 Also do this if we started with two insns and (at least) one of the
3950 resulting sets is a noop; this noop will be deleted later. */
3952 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3953 && GET_CODE (newpat) == PARALLEL
3954 && XVECLEN (newpat, 0) == 2
3955 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3956 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3957 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3958 || set_noop_p (XVECEXP (newpat, 0, 1)))
3959 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3960 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3961 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3962 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3963 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3964 XVECEXP (newpat, 0, 0))
3965 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3966 XVECEXP (newpat, 0, 1))
3967 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3968 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3970 rtx set0 = XVECEXP (newpat, 0, 0);
3971 rtx set1 = XVECEXP (newpat, 0, 1);
3973 /* Normally, it doesn't matter which of the two is done first,
3974 but the one that references cc0 can't be the second, and
3975 one which uses any regs/memory set in between i2 and i3 can't
3976 be first. The PARALLEL might also have been pre-existing in i3,
3977 so we need to make sure that we won't wrongly hoist a SET to i2
3978 that would conflict with a death note present in there. */
3979 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3980 && !(REG_P (SET_DEST (set1))
3981 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3982 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3983 && find_reg_note (i2, REG_DEAD,
3984 SUBREG_REG (SET_DEST (set1))))
3985 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3986 /* If I3 is a jump, ensure that set0 is a jump so that
3987 we do not create invalid RTL. */
3988 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3991 newi2pat = set1;
3992 newpat = set0;
3994 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3995 && !(REG_P (SET_DEST (set0))
3996 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3997 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3998 && find_reg_note (i2, REG_DEAD,
3999 SUBREG_REG (SET_DEST (set0))))
4000 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4001 /* If I3 is a jump, ensure that set1 is a jump so that
4002 we do not create invalid RTL. */
4003 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4006 newi2pat = set0;
4007 newpat = set1;
4009 else
4011 undo_all ();
4012 return 0;
4015 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4017 if (i2_code_number >= 0)
4019 /* recog_for_combine might have added CLOBBERs to newi2pat.
4020 Make sure NEWPAT does not depend on the clobbered regs. */
4021 if (GET_CODE (newi2pat) == PARALLEL)
4023 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4024 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4026 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4027 if (reg_overlap_mentioned_p (reg, newpat))
4029 undo_all ();
4030 return 0;
4035 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4039 /* If it still isn't recognized, fail and change things back the way they
4040 were. */
4041 if ((insn_code_number < 0
4042 /* Is the result a reasonable ASM_OPERANDS? */
4043 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4045 undo_all ();
4046 return 0;
4049 /* If we had to change another insn, make sure it is valid also. */
4050 if (undobuf.other_insn)
4052 CLEAR_HARD_REG_SET (newpat_used_regs);
4054 other_pat = PATTERN (undobuf.other_insn);
4055 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4056 &new_other_notes);
4058 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4060 undo_all ();
4061 return 0;
4065 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4066 they are adjacent to each other or not. */
4067 if (HAVE_cc0)
4069 rtx_insn *p = prev_nonnote_insn (i3);
4070 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4071 && sets_cc0_p (newi2pat))
4073 undo_all ();
4074 return 0;
4078 /* Only allow this combination if insn_rtx_costs reports that the
4079 replacement instructions are cheaper than the originals. */
4080 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4082 undo_all ();
4083 return 0;
4086 if (MAY_HAVE_DEBUG_INSNS)
4088 struct undo *undo;
4090 for (undo = undobuf.undos; undo; undo = undo->next)
4091 if (undo->kind == UNDO_MODE)
4093 rtx reg = *undo->where.r;
4094 machine_mode new_mode = GET_MODE (reg);
4095 machine_mode old_mode = undo->old_contents.m;
4097 /* Temporarily revert mode back. */
4098 adjust_reg_mode (reg, old_mode);
4100 if (reg == i2dest && i2scratch)
4102 /* If we used i2dest as a scratch register with a
4103 different mode, substitute it for the original
4104 i2src while its original mode is temporarily
4105 restored, and then clear i2scratch so that we don't
4106 do it again later. */
4107 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4108 this_basic_block);
4109 i2scratch = false;
4110 /* Put back the new mode. */
4111 adjust_reg_mode (reg, new_mode);
4113 else
4115 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4116 rtx_insn *first, *last;
4118 if (reg == i2dest)
4120 first = i2;
4121 last = last_combined_insn;
4123 else
4125 first = i3;
4126 last = undobuf.other_insn;
4127 gcc_assert (last);
4128 if (DF_INSN_LUID (last)
4129 < DF_INSN_LUID (last_combined_insn))
4130 last = last_combined_insn;
4133 /* We're dealing with a reg that changed mode but not
4134 meaning, so we want to turn it into a subreg for
4135 the new mode. However, because of REG sharing and
4136 because its mode had already changed, we have to do
4137 it in two steps. First, replace any debug uses of
4138 reg, with its original mode temporarily restored,
4139 with this copy we have created; then, replace the
4140 copy with the SUBREG of the original shared reg,
4141 once again changed to the new mode. */
4142 propagate_for_debug (first, last, reg, tempreg,
4143 this_basic_block);
4144 adjust_reg_mode (reg, new_mode);
4145 propagate_for_debug (first, last, tempreg,
4146 lowpart_subreg (old_mode, reg, new_mode),
4147 this_basic_block);
4152 /* If we will be able to accept this, we have made a
4153 change to the destination of I3. This requires us to
4154 do a few adjustments. */
4156 if (changed_i3_dest)
4158 PATTERN (i3) = newpat;
4159 adjust_for_new_dest (i3);
4162 /* We now know that we can do this combination. Merge the insns and
4163 update the status of registers and LOG_LINKS. */
4165 if (undobuf.other_insn)
4167 rtx note, next;
4169 PATTERN (undobuf.other_insn) = other_pat;
4171 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4172 ensure that they are still valid. Then add any non-duplicate
4173 notes added by recog_for_combine. */
4174 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4176 next = XEXP (note, 1);
4178 if ((REG_NOTE_KIND (note) == REG_DEAD
4179 && !reg_referenced_p (XEXP (note, 0),
4180 PATTERN (undobuf.other_insn)))
4181 ||(REG_NOTE_KIND (note) == REG_UNUSED
4182 && !reg_set_p (XEXP (note, 0),
4183 PATTERN (undobuf.other_insn)))
4184 /* Simply drop equal note since it may be no longer valid
4185 for other_insn. It may be possible to record that CC
4186 register is changed and only discard those notes, but
4187 in practice it's unnecessary complication and doesn't
4188 give any meaningful improvement.
4190 See PR78559. */
4191 || REG_NOTE_KIND (note) == REG_EQUAL
4192 || REG_NOTE_KIND (note) == REG_EQUIV)
4193 remove_note (undobuf.other_insn, note);
4196 distribute_notes (new_other_notes, undobuf.other_insn,
4197 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4198 NULL_RTX);
4201 if (swap_i2i3)
4203 rtx_insn *insn;
4204 struct insn_link *link;
4205 rtx ni2dest;
4207 /* I3 now uses what used to be its destination and which is now
4208 I2's destination. This requires us to do a few adjustments. */
4209 PATTERN (i3) = newpat;
4210 adjust_for_new_dest (i3);
4212 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4213 so we still will.
4215 However, some later insn might be using I2's dest and have
4216 a LOG_LINK pointing at I3. We must remove this link.
4217 The simplest way to remove the link is to point it at I1,
4218 which we know will be a NOTE. */
4220 /* newi2pat is usually a SET here; however, recog_for_combine might
4221 have added some clobbers. */
4222 if (GET_CODE (newi2pat) == PARALLEL)
4223 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4224 else
4225 ni2dest = SET_DEST (newi2pat);
4227 for (insn = NEXT_INSN (i3);
4228 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4229 || insn != BB_HEAD (this_basic_block->next_bb));
4230 insn = NEXT_INSN (insn))
4232 if (NONDEBUG_INSN_P (insn)
4233 && reg_referenced_p (ni2dest, PATTERN (insn)))
4235 FOR_EACH_LOG_LINK (link, insn)
4236 if (link->insn == i3)
4237 link->insn = i1;
4239 break;
4245 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4246 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4247 rtx midnotes = 0;
4248 int from_luid;
4249 /* Compute which registers we expect to eliminate. newi2pat may be setting
4250 either i3dest or i2dest, so we must check it. */
4251 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4252 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4253 || !i2dest_killed
4254 ? 0 : i2dest);
4255 /* For i1, we need to compute both local elimination and global
4256 elimination information with respect to newi2pat because i1dest
4257 may be the same as i3dest, in which case newi2pat may be setting
4258 i1dest. Global information is used when distributing REG_DEAD
4259 note for i2 and i3, in which case it does matter if newi2pat sets
4260 i1dest or not.
4262 Local information is used when distributing REG_DEAD note for i1,
4263 in which case it doesn't matter if newi2pat sets i1dest or not.
4264 See PR62151, if we have four insns combination:
4265 i0: r0 <- i0src
4266 i1: r1 <- i1src (using r0)
4267 REG_DEAD (r0)
4268 i2: r0 <- i2src (using r1)
4269 i3: r3 <- i3src (using r0)
4270 ix: using r0
4271 From i1's point of view, r0 is eliminated, no matter if it is set
4272 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4273 should be discarded.
4275 Note local information only affects cases in forms like "I1->I2->I3",
4276 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4277 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4278 i0dest anyway. */
4279 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4280 || !i1dest_killed
4281 ? 0 : i1dest);
4282 rtx elim_i1 = (local_elim_i1 == 0
4283 || (newi2pat && reg_set_p (i1dest, newi2pat))
4284 ? 0 : i1dest);
4285 /* Same case as i1. */
4286 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4287 ? 0 : i0dest);
4288 rtx elim_i0 = (local_elim_i0 == 0
4289 || (newi2pat && reg_set_p (i0dest, newi2pat))
4290 ? 0 : i0dest);
4292 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4293 clear them. */
4294 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4295 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4296 if (i1)
4297 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4298 if (i0)
4299 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4301 /* Ensure that we do not have something that should not be shared but
4302 occurs multiple times in the new insns. Check this by first
4303 resetting all the `used' flags and then copying anything is shared. */
4305 reset_used_flags (i3notes);
4306 reset_used_flags (i2notes);
4307 reset_used_flags (i1notes);
4308 reset_used_flags (i0notes);
4309 reset_used_flags (newpat);
4310 reset_used_flags (newi2pat);
4311 if (undobuf.other_insn)
4312 reset_used_flags (PATTERN (undobuf.other_insn));
4314 i3notes = copy_rtx_if_shared (i3notes);
4315 i2notes = copy_rtx_if_shared (i2notes);
4316 i1notes = copy_rtx_if_shared (i1notes);
4317 i0notes = copy_rtx_if_shared (i0notes);
4318 newpat = copy_rtx_if_shared (newpat);
4319 newi2pat = copy_rtx_if_shared (newi2pat);
4320 if (undobuf.other_insn)
4321 reset_used_flags (PATTERN (undobuf.other_insn));
4323 INSN_CODE (i3) = insn_code_number;
4324 PATTERN (i3) = newpat;
4326 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4328 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4329 link = XEXP (link, 1))
4331 if (substed_i2)
4333 /* I2SRC must still be meaningful at this point. Some
4334 splitting operations can invalidate I2SRC, but those
4335 operations do not apply to calls. */
4336 gcc_assert (i2src);
4337 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4338 i2dest, i2src);
4340 if (substed_i1)
4341 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4342 i1dest, i1src);
4343 if (substed_i0)
4344 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4345 i0dest, i0src);
4349 if (undobuf.other_insn)
4350 INSN_CODE (undobuf.other_insn) = other_code_number;
4352 /* We had one special case above where I2 had more than one set and
4353 we replaced a destination of one of those sets with the destination
4354 of I3. In that case, we have to update LOG_LINKS of insns later
4355 in this basic block. Note that this (expensive) case is rare.
4357 Also, in this case, we must pretend that all REG_NOTEs for I2
4358 actually came from I3, so that REG_UNUSED notes from I2 will be
4359 properly handled. */
4361 if (i3_subst_into_i2)
4363 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4364 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4365 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4366 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4367 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4368 && ! find_reg_note (i2, REG_UNUSED,
4369 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4370 for (temp_insn = NEXT_INSN (i2);
4371 temp_insn
4372 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4373 || BB_HEAD (this_basic_block) != temp_insn);
4374 temp_insn = NEXT_INSN (temp_insn))
4375 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4376 FOR_EACH_LOG_LINK (link, temp_insn)
4377 if (link->insn == i2)
4378 link->insn = i3;
4380 if (i3notes)
4382 rtx link = i3notes;
4383 while (XEXP (link, 1))
4384 link = XEXP (link, 1);
4385 XEXP (link, 1) = i2notes;
4387 else
4388 i3notes = i2notes;
4389 i2notes = 0;
4392 LOG_LINKS (i3) = NULL;
4393 REG_NOTES (i3) = 0;
4394 LOG_LINKS (i2) = NULL;
4395 REG_NOTES (i2) = 0;
4397 if (newi2pat)
4399 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4400 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4401 this_basic_block);
4402 INSN_CODE (i2) = i2_code_number;
4403 PATTERN (i2) = newi2pat;
4405 else
4407 if (MAY_HAVE_DEBUG_INSNS && i2src)
4408 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4409 this_basic_block);
4410 SET_INSN_DELETED (i2);
4413 if (i1)
4415 LOG_LINKS (i1) = NULL;
4416 REG_NOTES (i1) = 0;
4417 if (MAY_HAVE_DEBUG_INSNS)
4418 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4419 this_basic_block);
4420 SET_INSN_DELETED (i1);
4423 if (i0)
4425 LOG_LINKS (i0) = NULL;
4426 REG_NOTES (i0) = 0;
4427 if (MAY_HAVE_DEBUG_INSNS)
4428 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4429 this_basic_block);
4430 SET_INSN_DELETED (i0);
4433 /* Get death notes for everything that is now used in either I3 or
4434 I2 and used to die in a previous insn. If we built two new
4435 patterns, move from I1 to I2 then I2 to I3 so that we get the
4436 proper movement on registers that I2 modifies. */
4438 if (i0)
4439 from_luid = DF_INSN_LUID (i0);
4440 else if (i1)
4441 from_luid = DF_INSN_LUID (i1);
4442 else
4443 from_luid = DF_INSN_LUID (i2);
4444 if (newi2pat)
4445 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4446 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4448 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4449 if (i3notes)
4450 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4451 elim_i2, elim_i1, elim_i0);
4452 if (i2notes)
4453 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4454 elim_i2, elim_i1, elim_i0);
4455 if (i1notes)
4456 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4457 elim_i2, local_elim_i1, local_elim_i0);
4458 if (i0notes)
4459 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4460 elim_i2, elim_i1, local_elim_i0);
4461 if (midnotes)
4462 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4463 elim_i2, elim_i1, elim_i0);
4465 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4466 know these are REG_UNUSED and want them to go to the desired insn,
4467 so we always pass it as i3. */
4469 if (newi2pat && new_i2_notes)
4470 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4471 NULL_RTX);
4473 if (new_i3_notes)
4474 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4475 NULL_RTX);
4477 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4478 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4479 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4480 in that case, it might delete I2. Similarly for I2 and I1.
4481 Show an additional death due to the REG_DEAD note we make here. If
4482 we discard it in distribute_notes, we will decrement it again. */
4484 if (i3dest_killed)
4486 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4487 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4488 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4489 elim_i1, elim_i0);
4490 else
4491 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4492 elim_i2, elim_i1, elim_i0);
4495 if (i2dest_in_i2src)
4497 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4498 if (newi2pat && reg_set_p (i2dest, newi2pat))
4499 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4500 NULL_RTX, NULL_RTX);
4501 else
4502 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4503 NULL_RTX, NULL_RTX, NULL_RTX);
4506 if (i1dest_in_i1src)
4508 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4509 if (newi2pat && reg_set_p (i1dest, newi2pat))
4510 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4511 NULL_RTX, NULL_RTX);
4512 else
4513 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4514 NULL_RTX, NULL_RTX, NULL_RTX);
4517 if (i0dest_in_i0src)
4519 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4520 if (newi2pat && reg_set_p (i0dest, newi2pat))
4521 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4522 NULL_RTX, NULL_RTX);
4523 else
4524 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4525 NULL_RTX, NULL_RTX, NULL_RTX);
4528 distribute_links (i3links);
4529 distribute_links (i2links);
4530 distribute_links (i1links);
4531 distribute_links (i0links);
4533 if (REG_P (i2dest))
4535 struct insn_link *link;
4536 rtx_insn *i2_insn = 0;
4537 rtx i2_val = 0, set;
4539 /* The insn that used to set this register doesn't exist, and
4540 this life of the register may not exist either. See if one of
4541 I3's links points to an insn that sets I2DEST. If it does,
4542 that is now the last known value for I2DEST. If we don't update
4543 this and I2 set the register to a value that depended on its old
4544 contents, we will get confused. If this insn is used, thing
4545 will be set correctly in combine_instructions. */
4546 FOR_EACH_LOG_LINK (link, i3)
4547 if ((set = single_set (link->insn)) != 0
4548 && rtx_equal_p (i2dest, SET_DEST (set)))
4549 i2_insn = link->insn, i2_val = SET_SRC (set);
4551 record_value_for_reg (i2dest, i2_insn, i2_val);
4553 /* If the reg formerly set in I2 died only once and that was in I3,
4554 zero its use count so it won't make `reload' do any work. */
4555 if (! added_sets_2
4556 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4557 && ! i2dest_in_i2src
4558 && REGNO (i2dest) < reg_n_sets_max)
4559 INC_REG_N_SETS (REGNO (i2dest), -1);
4562 if (i1 && REG_P (i1dest))
4564 struct insn_link *link;
4565 rtx_insn *i1_insn = 0;
4566 rtx i1_val = 0, set;
4568 FOR_EACH_LOG_LINK (link, i3)
4569 if ((set = single_set (link->insn)) != 0
4570 && rtx_equal_p (i1dest, SET_DEST (set)))
4571 i1_insn = link->insn, i1_val = SET_SRC (set);
4573 record_value_for_reg (i1dest, i1_insn, i1_val);
4575 if (! added_sets_1
4576 && ! i1dest_in_i1src
4577 && REGNO (i1dest) < reg_n_sets_max)
4578 INC_REG_N_SETS (REGNO (i1dest), -1);
4581 if (i0 && REG_P (i0dest))
4583 struct insn_link *link;
4584 rtx_insn *i0_insn = 0;
4585 rtx i0_val = 0, set;
4587 FOR_EACH_LOG_LINK (link, i3)
4588 if ((set = single_set (link->insn)) != 0
4589 && rtx_equal_p (i0dest, SET_DEST (set)))
4590 i0_insn = link->insn, i0_val = SET_SRC (set);
4592 record_value_for_reg (i0dest, i0_insn, i0_val);
4594 if (! added_sets_0
4595 && ! i0dest_in_i0src
4596 && REGNO (i0dest) < reg_n_sets_max)
4597 INC_REG_N_SETS (REGNO (i0dest), -1);
4600 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4601 been made to this insn. The order is important, because newi2pat
4602 can affect nonzero_bits of newpat. */
4603 if (newi2pat)
4604 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4605 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4608 if (undobuf.other_insn != NULL_RTX)
4610 if (dump_file)
4612 fprintf (dump_file, "modifying other_insn ");
4613 dump_insn_slim (dump_file, undobuf.other_insn);
4615 df_insn_rescan (undobuf.other_insn);
4618 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4620 if (dump_file)
4622 fprintf (dump_file, "modifying insn i0 ");
4623 dump_insn_slim (dump_file, i0);
4625 df_insn_rescan (i0);
4628 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4630 if (dump_file)
4632 fprintf (dump_file, "modifying insn i1 ");
4633 dump_insn_slim (dump_file, i1);
4635 df_insn_rescan (i1);
4638 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4640 if (dump_file)
4642 fprintf (dump_file, "modifying insn i2 ");
4643 dump_insn_slim (dump_file, i2);
4645 df_insn_rescan (i2);
4648 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4650 if (dump_file)
4652 fprintf (dump_file, "modifying insn i3 ");
4653 dump_insn_slim (dump_file, i3);
4655 df_insn_rescan (i3);
4658 /* Set new_direct_jump_p if a new return or simple jump instruction
4659 has been created. Adjust the CFG accordingly. */
4660 if (returnjump_p (i3) || any_uncondjump_p (i3))
4662 *new_direct_jump_p = 1;
4663 mark_jump_label (PATTERN (i3), i3, 0);
4664 update_cfg_for_uncondjump (i3);
4667 if (undobuf.other_insn != NULL_RTX
4668 && (returnjump_p (undobuf.other_insn)
4669 || any_uncondjump_p (undobuf.other_insn)))
4671 *new_direct_jump_p = 1;
4672 update_cfg_for_uncondjump (undobuf.other_insn);
4675 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4676 && XEXP (PATTERN (i3), 0) == const1_rtx)
4678 basic_block bb = BLOCK_FOR_INSN (i3);
4679 gcc_assert (bb);
4680 remove_edge (split_block (bb, i3));
4681 emit_barrier_after_bb (bb);
4682 *new_direct_jump_p = 1;
4685 if (undobuf.other_insn
4686 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4687 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4689 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4690 gcc_assert (bb);
4691 remove_edge (split_block (bb, undobuf.other_insn));
4692 emit_barrier_after_bb (bb);
4693 *new_direct_jump_p = 1;
4696 /* A noop might also need cleaning up of CFG, if it comes from the
4697 simplification of a jump. */
4698 if (JUMP_P (i3)
4699 && GET_CODE (newpat) == SET
4700 && SET_SRC (newpat) == pc_rtx
4701 && SET_DEST (newpat) == pc_rtx)
4703 *new_direct_jump_p = 1;
4704 update_cfg_for_uncondjump (i3);
4707 if (undobuf.other_insn != NULL_RTX
4708 && JUMP_P (undobuf.other_insn)
4709 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4710 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4711 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4713 *new_direct_jump_p = 1;
4714 update_cfg_for_uncondjump (undobuf.other_insn);
4717 combine_successes++;
4718 undo_commit ();
4720 if (added_links_insn
4721 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4722 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4723 return added_links_insn;
4724 else
4725 return newi2pat ? i2 : i3;
4728 /* Get a marker for undoing to the current state. */
4730 static void *
4731 get_undo_marker (void)
4733 return undobuf.undos;
4736 /* Undo the modifications up to the marker. */
4738 static void
4739 undo_to_marker (void *marker)
4741 struct undo *undo, *next;
4743 for (undo = undobuf.undos; undo != marker; undo = next)
4745 gcc_assert (undo);
4747 next = undo->next;
4748 switch (undo->kind)
4750 case UNDO_RTX:
4751 *undo->where.r = undo->old_contents.r;
4752 break;
4753 case UNDO_INT:
4754 *undo->where.i = undo->old_contents.i;
4755 break;
4756 case UNDO_MODE:
4757 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4758 break;
4759 case UNDO_LINKS:
4760 *undo->where.l = undo->old_contents.l;
4761 break;
4762 default:
4763 gcc_unreachable ();
4766 undo->next = undobuf.frees;
4767 undobuf.frees = undo;
4770 undobuf.undos = (struct undo *) marker;
4773 /* Undo all the modifications recorded in undobuf. */
4775 static void
4776 undo_all (void)
4778 undo_to_marker (0);
4781 /* We've committed to accepting the changes we made. Move all
4782 of the undos to the free list. */
4784 static void
4785 undo_commit (void)
4787 struct undo *undo, *next;
4789 for (undo = undobuf.undos; undo; undo = next)
4791 next = undo->next;
4792 undo->next = undobuf.frees;
4793 undobuf.frees = undo;
4795 undobuf.undos = 0;
4798 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4799 where we have an arithmetic expression and return that point. LOC will
4800 be inside INSN.
4802 try_combine will call this function to see if an insn can be split into
4803 two insns. */
4805 static rtx *
4806 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4808 rtx x = *loc;
4809 enum rtx_code code = GET_CODE (x);
4810 rtx *split;
4811 unsigned HOST_WIDE_INT len = 0;
4812 HOST_WIDE_INT pos = 0;
4813 int unsignedp = 0;
4814 rtx inner = NULL_RTX;
4815 scalar_int_mode mode, inner_mode;
4817 /* First special-case some codes. */
4818 switch (code)
4820 case SUBREG:
4821 #ifdef INSN_SCHEDULING
4822 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4823 point. */
4824 if (MEM_P (SUBREG_REG (x)))
4825 return loc;
4826 #endif
4827 return find_split_point (&SUBREG_REG (x), insn, false);
4829 case MEM:
4830 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4831 using LO_SUM and HIGH. */
4832 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4833 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4835 machine_mode address_mode = get_address_mode (x);
4837 SUBST (XEXP (x, 0),
4838 gen_rtx_LO_SUM (address_mode,
4839 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4840 XEXP (x, 0)));
4841 return &XEXP (XEXP (x, 0), 0);
4844 /* If we have a PLUS whose second operand is a constant and the
4845 address is not valid, perhaps will can split it up using
4846 the machine-specific way to split large constants. We use
4847 the first pseudo-reg (one of the virtual regs) as a placeholder;
4848 it will not remain in the result. */
4849 if (GET_CODE (XEXP (x, 0)) == PLUS
4850 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4851 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4852 MEM_ADDR_SPACE (x)))
4854 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4855 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4856 subst_insn);
4858 /* This should have produced two insns, each of which sets our
4859 placeholder. If the source of the second is a valid address,
4860 we can make put both sources together and make a split point
4861 in the middle. */
4863 if (seq
4864 && NEXT_INSN (seq) != NULL_RTX
4865 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4866 && NONJUMP_INSN_P (seq)
4867 && GET_CODE (PATTERN (seq)) == SET
4868 && SET_DEST (PATTERN (seq)) == reg
4869 && ! reg_mentioned_p (reg,
4870 SET_SRC (PATTERN (seq)))
4871 && NONJUMP_INSN_P (NEXT_INSN (seq))
4872 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4873 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4874 && memory_address_addr_space_p
4875 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4876 MEM_ADDR_SPACE (x)))
4878 rtx src1 = SET_SRC (PATTERN (seq));
4879 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4881 /* Replace the placeholder in SRC2 with SRC1. If we can
4882 find where in SRC2 it was placed, that can become our
4883 split point and we can replace this address with SRC2.
4884 Just try two obvious places. */
4886 src2 = replace_rtx (src2, reg, src1);
4887 split = 0;
4888 if (XEXP (src2, 0) == src1)
4889 split = &XEXP (src2, 0);
4890 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4891 && XEXP (XEXP (src2, 0), 0) == src1)
4892 split = &XEXP (XEXP (src2, 0), 0);
4894 if (split)
4896 SUBST (XEXP (x, 0), src2);
4897 return split;
4901 /* If that didn't work, perhaps the first operand is complex and
4902 needs to be computed separately, so make a split point there.
4903 This will occur on machines that just support REG + CONST
4904 and have a constant moved through some previous computation. */
4906 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4907 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4908 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4909 return &XEXP (XEXP (x, 0), 0);
4912 /* If we have a PLUS whose first operand is complex, try computing it
4913 separately by making a split there. */
4914 if (GET_CODE (XEXP (x, 0)) == PLUS
4915 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4916 MEM_ADDR_SPACE (x))
4917 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4918 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4919 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4920 return &XEXP (XEXP (x, 0), 0);
4921 break;
4923 case SET:
4924 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4925 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4926 we need to put the operand into a register. So split at that
4927 point. */
4929 if (SET_DEST (x) == cc0_rtx
4930 && GET_CODE (SET_SRC (x)) != COMPARE
4931 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4932 && !OBJECT_P (SET_SRC (x))
4933 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4934 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4935 return &SET_SRC (x);
4937 /* See if we can split SET_SRC as it stands. */
4938 split = find_split_point (&SET_SRC (x), insn, true);
4939 if (split && split != &SET_SRC (x))
4940 return split;
4942 /* See if we can split SET_DEST as it stands. */
4943 split = find_split_point (&SET_DEST (x), insn, false);
4944 if (split && split != &SET_DEST (x))
4945 return split;
4947 /* See if this is a bitfield assignment with everything constant. If
4948 so, this is an IOR of an AND, so split it into that. */
4949 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4950 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4951 &inner_mode)
4952 && HWI_COMPUTABLE_MODE_P (inner_mode)
4953 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4954 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4955 && CONST_INT_P (SET_SRC (x))
4956 && ((INTVAL (XEXP (SET_DEST (x), 1))
4957 + INTVAL (XEXP (SET_DEST (x), 2)))
4958 <= GET_MODE_PRECISION (inner_mode))
4959 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4961 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4962 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4963 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4964 rtx dest = XEXP (SET_DEST (x), 0);
4965 unsigned HOST_WIDE_INT mask
4966 = (HOST_WIDE_INT_1U << len) - 1;
4967 rtx or_mask;
4969 if (BITS_BIG_ENDIAN)
4970 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
4972 or_mask = gen_int_mode (src << pos, inner_mode);
4973 if (src == mask)
4974 SUBST (SET_SRC (x),
4975 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
4976 else
4978 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
4979 SUBST (SET_SRC (x),
4980 simplify_gen_binary (IOR, inner_mode,
4981 simplify_gen_binary (AND, inner_mode,
4982 dest, negmask),
4983 or_mask));
4986 SUBST (SET_DEST (x), dest);
4988 split = find_split_point (&SET_SRC (x), insn, true);
4989 if (split && split != &SET_SRC (x))
4990 return split;
4993 /* Otherwise, see if this is an operation that we can split into two.
4994 If so, try to split that. */
4995 code = GET_CODE (SET_SRC (x));
4997 switch (code)
4999 case AND:
5000 /* If we are AND'ing with a large constant that is only a single
5001 bit and the result is only being used in a context where we
5002 need to know if it is zero or nonzero, replace it with a bit
5003 extraction. This will avoid the large constant, which might
5004 have taken more than one insn to make. If the constant were
5005 not a valid argument to the AND but took only one insn to make,
5006 this is no worse, but if it took more than one insn, it will
5007 be better. */
5009 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5010 && REG_P (XEXP (SET_SRC (x), 0))
5011 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5012 && REG_P (SET_DEST (x))
5013 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5014 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5015 && XEXP (*split, 0) == SET_DEST (x)
5016 && XEXP (*split, 1) == const0_rtx)
5018 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5019 XEXP (SET_SRC (x), 0),
5020 pos, NULL_RTX, 1, 1, 0, 0);
5021 if (extraction != 0)
5023 SUBST (SET_SRC (x), extraction);
5024 return find_split_point (loc, insn, false);
5027 break;
5029 case NE:
5030 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5031 is known to be on, this can be converted into a NEG of a shift. */
5032 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5033 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5034 && 1 <= (pos = exact_log2
5035 (nonzero_bits (XEXP (SET_SRC (x), 0),
5036 GET_MODE (XEXP (SET_SRC (x), 0))))))
5038 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5040 SUBST (SET_SRC (x),
5041 gen_rtx_NEG (mode,
5042 gen_rtx_LSHIFTRT (mode,
5043 XEXP (SET_SRC (x), 0),
5044 GEN_INT (pos))));
5046 split = find_split_point (&SET_SRC (x), insn, true);
5047 if (split && split != &SET_SRC (x))
5048 return split;
5050 break;
5052 case SIGN_EXTEND:
5053 inner = XEXP (SET_SRC (x), 0);
5055 /* We can't optimize if either mode is a partial integer
5056 mode as we don't know how many bits are significant
5057 in those modes. */
5058 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5059 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5060 break;
5062 pos = 0;
5063 len = GET_MODE_PRECISION (inner_mode);
5064 unsignedp = 0;
5065 break;
5067 case SIGN_EXTRACT:
5068 case ZERO_EXTRACT:
5069 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5070 &inner_mode)
5071 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5072 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5074 inner = XEXP (SET_SRC (x), 0);
5075 len = INTVAL (XEXP (SET_SRC (x), 1));
5076 pos = INTVAL (XEXP (SET_SRC (x), 2));
5078 if (BITS_BIG_ENDIAN)
5079 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5080 unsignedp = (code == ZERO_EXTRACT);
5082 break;
5084 default:
5085 break;
5088 if (len && pos >= 0
5089 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5090 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5092 /* For unsigned, we have a choice of a shift followed by an
5093 AND or two shifts. Use two shifts for field sizes where the
5094 constant might be too large. We assume here that we can
5095 always at least get 8-bit constants in an AND insn, which is
5096 true for every current RISC. */
5098 if (unsignedp && len <= 8)
5100 unsigned HOST_WIDE_INT mask
5101 = (HOST_WIDE_INT_1U << len) - 1;
5102 SUBST (SET_SRC (x),
5103 gen_rtx_AND (mode,
5104 gen_rtx_LSHIFTRT
5105 (mode, gen_lowpart (mode, inner),
5106 GEN_INT (pos)),
5107 gen_int_mode (mask, mode)));
5109 split = find_split_point (&SET_SRC (x), insn, true);
5110 if (split && split != &SET_SRC (x))
5111 return split;
5113 else
5115 SUBST (SET_SRC (x),
5116 gen_rtx_fmt_ee
5117 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5118 gen_rtx_ASHIFT (mode,
5119 gen_lowpart (mode, inner),
5120 GEN_INT (GET_MODE_PRECISION (mode)
5121 - len - pos)),
5122 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5124 split = find_split_point (&SET_SRC (x), insn, true);
5125 if (split && split != &SET_SRC (x))
5126 return split;
5130 /* See if this is a simple operation with a constant as the second
5131 operand. It might be that this constant is out of range and hence
5132 could be used as a split point. */
5133 if (BINARY_P (SET_SRC (x))
5134 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5135 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5136 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5137 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5138 return &XEXP (SET_SRC (x), 1);
5140 /* Finally, see if this is a simple operation with its first operand
5141 not in a register. The operation might require this operand in a
5142 register, so return it as a split point. We can always do this
5143 because if the first operand were another operation, we would have
5144 already found it as a split point. */
5145 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5146 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5147 return &XEXP (SET_SRC (x), 0);
5149 return 0;
5151 case AND:
5152 case IOR:
5153 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5154 it is better to write this as (not (ior A B)) so we can split it.
5155 Similarly for IOR. */
5156 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5158 SUBST (*loc,
5159 gen_rtx_NOT (GET_MODE (x),
5160 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5161 GET_MODE (x),
5162 XEXP (XEXP (x, 0), 0),
5163 XEXP (XEXP (x, 1), 0))));
5164 return find_split_point (loc, insn, set_src);
5167 /* Many RISC machines have a large set of logical insns. If the
5168 second operand is a NOT, put it first so we will try to split the
5169 other operand first. */
5170 if (GET_CODE (XEXP (x, 1)) == NOT)
5172 rtx tem = XEXP (x, 0);
5173 SUBST (XEXP (x, 0), XEXP (x, 1));
5174 SUBST (XEXP (x, 1), tem);
5176 break;
5178 case PLUS:
5179 case MINUS:
5180 /* Canonicalization can produce (minus A (mult B C)), where C is a
5181 constant. It may be better to try splitting (plus (mult B -C) A)
5182 instead if this isn't a multiply by a power of two. */
5183 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5184 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5185 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5187 machine_mode mode = GET_MODE (x);
5188 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5189 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5190 SUBST (*loc, gen_rtx_PLUS (mode,
5191 gen_rtx_MULT (mode,
5192 XEXP (XEXP (x, 1), 0),
5193 gen_int_mode (other_int,
5194 mode)),
5195 XEXP (x, 0)));
5196 return find_split_point (loc, insn, set_src);
5199 /* Split at a multiply-accumulate instruction. However if this is
5200 the SET_SRC, we likely do not have such an instruction and it's
5201 worthless to try this split. */
5202 if (!set_src
5203 && (GET_CODE (XEXP (x, 0)) == MULT
5204 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5205 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5206 return loc;
5208 default:
5209 break;
5212 /* Otherwise, select our actions depending on our rtx class. */
5213 switch (GET_RTX_CLASS (code))
5215 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5216 case RTX_TERNARY:
5217 split = find_split_point (&XEXP (x, 2), insn, false);
5218 if (split)
5219 return split;
5220 /* fall through */
5221 case RTX_BIN_ARITH:
5222 case RTX_COMM_ARITH:
5223 case RTX_COMPARE:
5224 case RTX_COMM_COMPARE:
5225 split = find_split_point (&XEXP (x, 1), insn, false);
5226 if (split)
5227 return split;
5228 /* fall through */
5229 case RTX_UNARY:
5230 /* Some machines have (and (shift ...) ...) insns. If X is not
5231 an AND, but XEXP (X, 0) is, use it as our split point. */
5232 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5233 return &XEXP (x, 0);
5235 split = find_split_point (&XEXP (x, 0), insn, false);
5236 if (split)
5237 return split;
5238 return loc;
5240 default:
5241 /* Otherwise, we don't have a split point. */
5242 return 0;
5246 /* Throughout X, replace FROM with TO, and return the result.
5247 The result is TO if X is FROM;
5248 otherwise the result is X, but its contents may have been modified.
5249 If they were modified, a record was made in undobuf so that
5250 undo_all will (among other things) return X to its original state.
5252 If the number of changes necessary is too much to record to undo,
5253 the excess changes are not made, so the result is invalid.
5254 The changes already made can still be undone.
5255 undobuf.num_undo is incremented for such changes, so by testing that
5256 the caller can tell whether the result is valid.
5258 `n_occurrences' is incremented each time FROM is replaced.
5260 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5262 IN_COND is nonzero if we are at the top level of a condition.
5264 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5265 by copying if `n_occurrences' is nonzero. */
5267 static rtx
5268 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5270 enum rtx_code code = GET_CODE (x);
5271 machine_mode op0_mode = VOIDmode;
5272 const char *fmt;
5273 int len, i;
5274 rtx new_rtx;
5276 /* Two expressions are equal if they are identical copies of a shared
5277 RTX or if they are both registers with the same register number
5278 and mode. */
5280 #define COMBINE_RTX_EQUAL_P(X,Y) \
5281 ((X) == (Y) \
5282 || (REG_P (X) && REG_P (Y) \
5283 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5285 /* Do not substitute into clobbers of regs -- this will never result in
5286 valid RTL. */
5287 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5288 return x;
5290 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5292 n_occurrences++;
5293 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5296 /* If X and FROM are the same register but different modes, they
5297 will not have been seen as equal above. However, the log links code
5298 will make a LOG_LINKS entry for that case. If we do nothing, we
5299 will try to rerecognize our original insn and, when it succeeds,
5300 we will delete the feeding insn, which is incorrect.
5302 So force this insn not to match in this (rare) case. */
5303 if (! in_dest && code == REG && REG_P (from)
5304 && reg_overlap_mentioned_p (x, from))
5305 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5307 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5308 of which may contain things that can be combined. */
5309 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5310 return x;
5312 /* It is possible to have a subexpression appear twice in the insn.
5313 Suppose that FROM is a register that appears within TO.
5314 Then, after that subexpression has been scanned once by `subst',
5315 the second time it is scanned, TO may be found. If we were
5316 to scan TO here, we would find FROM within it and create a
5317 self-referent rtl structure which is completely wrong. */
5318 if (COMBINE_RTX_EQUAL_P (x, to))
5319 return to;
5321 /* Parallel asm_operands need special attention because all of the
5322 inputs are shared across the arms. Furthermore, unsharing the
5323 rtl results in recognition failures. Failure to handle this case
5324 specially can result in circular rtl.
5326 Solve this by doing a normal pass across the first entry of the
5327 parallel, and only processing the SET_DESTs of the subsequent
5328 entries. Ug. */
5330 if (code == PARALLEL
5331 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5332 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5334 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5336 /* If this substitution failed, this whole thing fails. */
5337 if (GET_CODE (new_rtx) == CLOBBER
5338 && XEXP (new_rtx, 0) == const0_rtx)
5339 return new_rtx;
5341 SUBST (XVECEXP (x, 0, 0), new_rtx);
5343 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5345 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5347 if (!REG_P (dest)
5348 && GET_CODE (dest) != CC0
5349 && GET_CODE (dest) != PC)
5351 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5353 /* If this substitution failed, this whole thing fails. */
5354 if (GET_CODE (new_rtx) == CLOBBER
5355 && XEXP (new_rtx, 0) == const0_rtx)
5356 return new_rtx;
5358 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5362 else
5364 len = GET_RTX_LENGTH (code);
5365 fmt = GET_RTX_FORMAT (code);
5367 /* We don't need to process a SET_DEST that is a register, CC0,
5368 or PC, so set up to skip this common case. All other cases
5369 where we want to suppress replacing something inside a
5370 SET_SRC are handled via the IN_DEST operand. */
5371 if (code == SET
5372 && (REG_P (SET_DEST (x))
5373 || GET_CODE (SET_DEST (x)) == CC0
5374 || GET_CODE (SET_DEST (x)) == PC))
5375 fmt = "ie";
5377 /* Trying to simplify the operands of a widening MULT is not likely
5378 to create RTL matching a machine insn. */
5379 if (code == MULT
5380 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5381 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5382 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5383 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5384 && REG_P (XEXP (XEXP (x, 0), 0))
5385 && REG_P (XEXP (XEXP (x, 1), 0))
5386 && from == to)
5387 return x;
5390 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5391 constant. */
5392 if (fmt[0] == 'e')
5393 op0_mode = GET_MODE (XEXP (x, 0));
5395 for (i = 0; i < len; i++)
5397 if (fmt[i] == 'E')
5399 int j;
5400 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5402 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5404 new_rtx = (unique_copy && n_occurrences
5405 ? copy_rtx (to) : to);
5406 n_occurrences++;
5408 else
5410 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5411 unique_copy);
5413 /* If this substitution failed, this whole thing
5414 fails. */
5415 if (GET_CODE (new_rtx) == CLOBBER
5416 && XEXP (new_rtx, 0) == const0_rtx)
5417 return new_rtx;
5420 SUBST (XVECEXP (x, i, j), new_rtx);
5423 else if (fmt[i] == 'e')
5425 /* If this is a register being set, ignore it. */
5426 new_rtx = XEXP (x, i);
5427 if (in_dest
5428 && i == 0
5429 && (((code == SUBREG || code == ZERO_EXTRACT)
5430 && REG_P (new_rtx))
5431 || code == STRICT_LOW_PART))
5434 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5436 /* In general, don't install a subreg involving two
5437 modes not tieable. It can worsen register
5438 allocation, and can even make invalid reload
5439 insns, since the reg inside may need to be copied
5440 from in the outside mode, and that may be invalid
5441 if it is an fp reg copied in integer mode.
5443 We allow two exceptions to this: It is valid if
5444 it is inside another SUBREG and the mode of that
5445 SUBREG and the mode of the inside of TO is
5446 tieable and it is valid if X is a SET that copies
5447 FROM to CC0. */
5449 if (GET_CODE (to) == SUBREG
5450 && !targetm.modes_tieable_p (GET_MODE (to),
5451 GET_MODE (SUBREG_REG (to)))
5452 && ! (code == SUBREG
5453 && (targetm.modes_tieable_p
5454 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5455 && (!HAVE_cc0
5456 || (! (code == SET
5457 && i == 1
5458 && XEXP (x, 0) == cc0_rtx))))
5459 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5461 if (code == SUBREG
5462 && REG_P (to)
5463 && REGNO (to) < FIRST_PSEUDO_REGISTER
5464 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5465 SUBREG_BYTE (x),
5466 GET_MODE (x)) < 0)
5467 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5469 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5470 n_occurrences++;
5472 else
5473 /* If we are in a SET_DEST, suppress most cases unless we
5474 have gone inside a MEM, in which case we want to
5475 simplify the address. We assume here that things that
5476 are actually part of the destination have their inner
5477 parts in the first expression. This is true for SUBREG,
5478 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5479 things aside from REG and MEM that should appear in a
5480 SET_DEST. */
5481 new_rtx = subst (XEXP (x, i), from, to,
5482 (((in_dest
5483 && (code == SUBREG || code == STRICT_LOW_PART
5484 || code == ZERO_EXTRACT))
5485 || code == SET)
5486 && i == 0),
5487 code == IF_THEN_ELSE && i == 0,
5488 unique_copy);
5490 /* If we found that we will have to reject this combination,
5491 indicate that by returning the CLOBBER ourselves, rather than
5492 an expression containing it. This will speed things up as
5493 well as prevent accidents where two CLOBBERs are considered
5494 to be equal, thus producing an incorrect simplification. */
5496 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5497 return new_rtx;
5499 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5501 machine_mode mode = GET_MODE (x);
5503 x = simplify_subreg (GET_MODE (x), new_rtx,
5504 GET_MODE (SUBREG_REG (x)),
5505 SUBREG_BYTE (x));
5506 if (! x)
5507 x = gen_rtx_CLOBBER (mode, const0_rtx);
5509 else if (CONST_SCALAR_INT_P (new_rtx)
5510 && GET_CODE (x) == ZERO_EXTEND)
5512 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5513 new_rtx, GET_MODE (XEXP (x, 0)));
5514 gcc_assert (x);
5516 else
5517 SUBST (XEXP (x, i), new_rtx);
5522 /* Check if we are loading something from the constant pool via float
5523 extension; in this case we would undo compress_float_constant
5524 optimization and degenerate constant load to an immediate value. */
5525 if (GET_CODE (x) == FLOAT_EXTEND
5526 && MEM_P (XEXP (x, 0))
5527 && MEM_READONLY_P (XEXP (x, 0)))
5529 rtx tmp = avoid_constant_pool_reference (x);
5530 if (x != tmp)
5531 return x;
5534 /* Try to simplify X. If the simplification changed the code, it is likely
5535 that further simplification will help, so loop, but limit the number
5536 of repetitions that will be performed. */
5538 for (i = 0; i < 4; i++)
5540 /* If X is sufficiently simple, don't bother trying to do anything
5541 with it. */
5542 if (code != CONST_INT && code != REG && code != CLOBBER)
5543 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5545 if (GET_CODE (x) == code)
5546 break;
5548 code = GET_CODE (x);
5550 /* We no longer know the original mode of operand 0 since we
5551 have changed the form of X) */
5552 op0_mode = VOIDmode;
5555 return x;
5558 /* If X is a commutative operation whose operands are not in the canonical
5559 order, use substitutions to swap them. */
5561 static void
5562 maybe_swap_commutative_operands (rtx x)
5564 if (COMMUTATIVE_ARITH_P (x)
5565 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5567 rtx temp = XEXP (x, 0);
5568 SUBST (XEXP (x, 0), XEXP (x, 1));
5569 SUBST (XEXP (x, 1), temp);
5573 /* Simplify X, a piece of RTL. We just operate on the expression at the
5574 outer level; call `subst' to simplify recursively. Return the new
5575 expression.
5577 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5578 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5579 of a condition. */
5581 static rtx
5582 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5583 int in_cond)
5585 enum rtx_code code = GET_CODE (x);
5586 machine_mode mode = GET_MODE (x);
5587 scalar_int_mode int_mode;
5588 rtx temp;
5589 int i;
5591 /* If this is a commutative operation, put a constant last and a complex
5592 expression first. We don't need to do this for comparisons here. */
5593 maybe_swap_commutative_operands (x);
5595 /* Try to fold this expression in case we have constants that weren't
5596 present before. */
5597 temp = 0;
5598 switch (GET_RTX_CLASS (code))
5600 case RTX_UNARY:
5601 if (op0_mode == VOIDmode)
5602 op0_mode = GET_MODE (XEXP (x, 0));
5603 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5604 break;
5605 case RTX_COMPARE:
5606 case RTX_COMM_COMPARE:
5608 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5609 if (cmp_mode == VOIDmode)
5611 cmp_mode = GET_MODE (XEXP (x, 1));
5612 if (cmp_mode == VOIDmode)
5613 cmp_mode = op0_mode;
5615 temp = simplify_relational_operation (code, mode, cmp_mode,
5616 XEXP (x, 0), XEXP (x, 1));
5618 break;
5619 case RTX_COMM_ARITH:
5620 case RTX_BIN_ARITH:
5621 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5622 break;
5623 case RTX_BITFIELD_OPS:
5624 case RTX_TERNARY:
5625 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5626 XEXP (x, 1), XEXP (x, 2));
5627 break;
5628 default:
5629 break;
5632 if (temp)
5634 x = temp;
5635 code = GET_CODE (temp);
5636 op0_mode = VOIDmode;
5637 mode = GET_MODE (temp);
5640 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5641 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5642 things. Check for cases where both arms are testing the same
5643 condition.
5645 Don't do anything if all operands are very simple. */
5647 if ((BINARY_P (x)
5648 && ((!OBJECT_P (XEXP (x, 0))
5649 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5650 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5651 || (!OBJECT_P (XEXP (x, 1))
5652 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5653 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5654 || (UNARY_P (x)
5655 && (!OBJECT_P (XEXP (x, 0))
5656 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5657 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5659 rtx cond, true_rtx, false_rtx;
5661 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5662 if (cond != 0
5663 /* If everything is a comparison, what we have is highly unlikely
5664 to be simpler, so don't use it. */
5665 && ! (COMPARISON_P (x)
5666 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5668 rtx cop1 = const0_rtx;
5669 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5671 if (cond_code == NE && COMPARISON_P (cond))
5672 return x;
5674 /* Simplify the alternative arms; this may collapse the true and
5675 false arms to store-flag values. Be careful to use copy_rtx
5676 here since true_rtx or false_rtx might share RTL with x as a
5677 result of the if_then_else_cond call above. */
5678 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5679 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5681 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5682 is unlikely to be simpler. */
5683 if (general_operand (true_rtx, VOIDmode)
5684 && general_operand (false_rtx, VOIDmode))
5686 enum rtx_code reversed;
5688 /* Restarting if we generate a store-flag expression will cause
5689 us to loop. Just drop through in this case. */
5691 /* If the result values are STORE_FLAG_VALUE and zero, we can
5692 just make the comparison operation. */
5693 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5694 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5695 cond, cop1);
5696 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5697 && ((reversed = reversed_comparison_code_parts
5698 (cond_code, cond, cop1, NULL))
5699 != UNKNOWN))
5700 x = simplify_gen_relational (reversed, mode, VOIDmode,
5701 cond, cop1);
5703 /* Likewise, we can make the negate of a comparison operation
5704 if the result values are - STORE_FLAG_VALUE and zero. */
5705 else if (CONST_INT_P (true_rtx)
5706 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5707 && false_rtx == const0_rtx)
5708 x = simplify_gen_unary (NEG, mode,
5709 simplify_gen_relational (cond_code,
5710 mode, VOIDmode,
5711 cond, cop1),
5712 mode);
5713 else if (CONST_INT_P (false_rtx)
5714 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5715 && true_rtx == const0_rtx
5716 && ((reversed = reversed_comparison_code_parts
5717 (cond_code, cond, cop1, NULL))
5718 != UNKNOWN))
5719 x = simplify_gen_unary (NEG, mode,
5720 simplify_gen_relational (reversed,
5721 mode, VOIDmode,
5722 cond, cop1),
5723 mode);
5724 else
5725 return gen_rtx_IF_THEN_ELSE (mode,
5726 simplify_gen_relational (cond_code,
5727 mode,
5728 VOIDmode,
5729 cond,
5730 cop1),
5731 true_rtx, false_rtx);
5733 code = GET_CODE (x);
5734 op0_mode = VOIDmode;
5739 /* First see if we can apply the inverse distributive law. */
5740 if (code == PLUS || code == MINUS
5741 || code == AND || code == IOR || code == XOR)
5743 x = apply_distributive_law (x);
5744 code = GET_CODE (x);
5745 op0_mode = VOIDmode;
5748 /* If CODE is an associative operation not otherwise handled, see if we
5749 can associate some operands. This can win if they are constants or
5750 if they are logically related (i.e. (a & b) & a). */
5751 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5752 || code == AND || code == IOR || code == XOR
5753 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5754 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5755 || (flag_associative_math && FLOAT_MODE_P (mode))))
5757 if (GET_CODE (XEXP (x, 0)) == code)
5759 rtx other = XEXP (XEXP (x, 0), 0);
5760 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5761 rtx inner_op1 = XEXP (x, 1);
5762 rtx inner;
5764 /* Make sure we pass the constant operand if any as the second
5765 one if this is a commutative operation. */
5766 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5767 std::swap (inner_op0, inner_op1);
5768 inner = simplify_binary_operation (code == MINUS ? PLUS
5769 : code == DIV ? MULT
5770 : code,
5771 mode, inner_op0, inner_op1);
5773 /* For commutative operations, try the other pair if that one
5774 didn't simplify. */
5775 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5777 other = XEXP (XEXP (x, 0), 1);
5778 inner = simplify_binary_operation (code, mode,
5779 XEXP (XEXP (x, 0), 0),
5780 XEXP (x, 1));
5783 if (inner)
5784 return simplify_gen_binary (code, mode, other, inner);
5788 /* A little bit of algebraic simplification here. */
5789 switch (code)
5791 case MEM:
5792 /* Ensure that our address has any ASHIFTs converted to MULT in case
5793 address-recognizing predicates are called later. */
5794 temp = make_compound_operation (XEXP (x, 0), MEM);
5795 SUBST (XEXP (x, 0), temp);
5796 break;
5798 case SUBREG:
5799 if (op0_mode == VOIDmode)
5800 op0_mode = GET_MODE (SUBREG_REG (x));
5802 /* See if this can be moved to simplify_subreg. */
5803 if (CONSTANT_P (SUBREG_REG (x))
5804 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5805 /* Don't call gen_lowpart if the inner mode
5806 is VOIDmode and we cannot simplify it, as SUBREG without
5807 inner mode is invalid. */
5808 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5809 || gen_lowpart_common (mode, SUBREG_REG (x))))
5810 return gen_lowpart (mode, SUBREG_REG (x));
5812 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5813 break;
5815 rtx temp;
5816 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5817 SUBREG_BYTE (x));
5818 if (temp)
5819 return temp;
5821 /* If op is known to have all lower bits zero, the result is zero. */
5822 scalar_int_mode int_mode, int_op0_mode;
5823 if (!in_dest
5824 && is_a <scalar_int_mode> (mode, &int_mode)
5825 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5826 && (GET_MODE_PRECISION (int_mode)
5827 < GET_MODE_PRECISION (int_op0_mode))
5828 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5829 == SUBREG_BYTE (x))
5830 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5831 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5832 & GET_MODE_MASK (int_mode)) == 0)
5833 return CONST0_RTX (int_mode);
5836 /* Don't change the mode of the MEM if that would change the meaning
5837 of the address. */
5838 if (MEM_P (SUBREG_REG (x))
5839 && (MEM_VOLATILE_P (SUBREG_REG (x))
5840 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5841 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5842 return gen_rtx_CLOBBER (mode, const0_rtx);
5844 /* Note that we cannot do any narrowing for non-constants since
5845 we might have been counting on using the fact that some bits were
5846 zero. We now do this in the SET. */
5848 break;
5850 case NEG:
5851 temp = expand_compound_operation (XEXP (x, 0));
5853 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5854 replaced by (lshiftrt X C). This will convert
5855 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5857 if (GET_CODE (temp) == ASHIFTRT
5858 && CONST_INT_P (XEXP (temp, 1))
5859 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5860 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5861 INTVAL (XEXP (temp, 1)));
5863 /* If X has only a single bit that might be nonzero, say, bit I, convert
5864 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5865 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5866 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5867 or a SUBREG of one since we'd be making the expression more
5868 complex if it was just a register. */
5870 if (!REG_P (temp)
5871 && ! (GET_CODE (temp) == SUBREG
5872 && REG_P (SUBREG_REG (temp)))
5873 && is_a <scalar_int_mode> (mode, &int_mode)
5874 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5876 rtx temp1 = simplify_shift_const
5877 (NULL_RTX, ASHIFTRT, int_mode,
5878 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5879 GET_MODE_PRECISION (int_mode) - 1 - i),
5880 GET_MODE_PRECISION (int_mode) - 1 - i);
5882 /* If all we did was surround TEMP with the two shifts, we
5883 haven't improved anything, so don't use it. Otherwise,
5884 we are better off with TEMP1. */
5885 if (GET_CODE (temp1) != ASHIFTRT
5886 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5887 || XEXP (XEXP (temp1, 0), 0) != temp)
5888 return temp1;
5890 break;
5892 case TRUNCATE:
5893 /* We can't handle truncation to a partial integer mode here
5894 because we don't know the real bitsize of the partial
5895 integer mode. */
5896 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5897 break;
5899 if (HWI_COMPUTABLE_MODE_P (mode))
5900 SUBST (XEXP (x, 0),
5901 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5902 GET_MODE_MASK (mode), 0));
5904 /* We can truncate a constant value and return it. */
5905 if (CONST_INT_P (XEXP (x, 0)))
5906 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5908 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5909 whose value is a comparison can be replaced with a subreg if
5910 STORE_FLAG_VALUE permits. */
5911 if (HWI_COMPUTABLE_MODE_P (mode)
5912 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5913 && (temp = get_last_value (XEXP (x, 0)))
5914 && COMPARISON_P (temp))
5915 return gen_lowpart (mode, XEXP (x, 0));
5916 break;
5918 case CONST:
5919 /* (const (const X)) can become (const X). Do it this way rather than
5920 returning the inner CONST since CONST can be shared with a
5921 REG_EQUAL note. */
5922 if (GET_CODE (XEXP (x, 0)) == CONST)
5923 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5924 break;
5926 case LO_SUM:
5927 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5928 can add in an offset. find_split_point will split this address up
5929 again if it doesn't match. */
5930 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5931 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5932 return XEXP (x, 1);
5933 break;
5935 case PLUS:
5936 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5937 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5938 bit-field and can be replaced by either a sign_extend or a
5939 sign_extract. The `and' may be a zero_extend and the two
5940 <c>, -<c> constants may be reversed. */
5941 if (GET_CODE (XEXP (x, 0)) == XOR
5942 && is_a <scalar_int_mode> (mode, &int_mode)
5943 && CONST_INT_P (XEXP (x, 1))
5944 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5945 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5946 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5947 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5948 && HWI_COMPUTABLE_MODE_P (int_mode)
5949 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5950 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5951 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5952 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5953 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5954 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5955 == (unsigned int) i + 1))))
5956 return simplify_shift_const
5957 (NULL_RTX, ASHIFTRT, int_mode,
5958 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5959 XEXP (XEXP (XEXP (x, 0), 0), 0),
5960 GET_MODE_PRECISION (int_mode) - (i + 1)),
5961 GET_MODE_PRECISION (int_mode) - (i + 1));
5963 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5964 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5965 the bitsize of the mode - 1. This allows simplification of
5966 "a = (b & 8) == 0;" */
5967 if (XEXP (x, 1) == constm1_rtx
5968 && !REG_P (XEXP (x, 0))
5969 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5970 && REG_P (SUBREG_REG (XEXP (x, 0))))
5971 && is_a <scalar_int_mode> (mode, &int_mode)
5972 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
5973 return simplify_shift_const
5974 (NULL_RTX, ASHIFTRT, int_mode,
5975 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5976 gen_rtx_XOR (int_mode, XEXP (x, 0),
5977 const1_rtx),
5978 GET_MODE_PRECISION (int_mode) - 1),
5979 GET_MODE_PRECISION (int_mode) - 1);
5981 /* If we are adding two things that have no bits in common, convert
5982 the addition into an IOR. This will often be further simplified,
5983 for example in cases like ((a & 1) + (a & 2)), which can
5984 become a & 3. */
5986 if (HWI_COMPUTABLE_MODE_P (mode)
5987 && (nonzero_bits (XEXP (x, 0), mode)
5988 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5990 /* Try to simplify the expression further. */
5991 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5992 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5994 /* If we could, great. If not, do not go ahead with the IOR
5995 replacement, since PLUS appears in many special purpose
5996 address arithmetic instructions. */
5997 if (GET_CODE (temp) != CLOBBER
5998 && (GET_CODE (temp) != IOR
5999 || ((XEXP (temp, 0) != XEXP (x, 0)
6000 || XEXP (temp, 1) != XEXP (x, 1))
6001 && (XEXP (temp, 0) != XEXP (x, 1)
6002 || XEXP (temp, 1) != XEXP (x, 0)))))
6003 return temp;
6006 /* Canonicalize x + x into x << 1. */
6007 if (GET_MODE_CLASS (mode) == MODE_INT
6008 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6009 && !side_effects_p (XEXP (x, 0)))
6010 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6012 break;
6014 case MINUS:
6015 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6016 (and <foo> (const_int pow2-1)) */
6017 if (is_a <scalar_int_mode> (mode, &int_mode)
6018 && GET_CODE (XEXP (x, 1)) == AND
6019 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6020 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6021 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6022 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6023 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6024 break;
6026 case MULT:
6027 /* If we have (mult (plus A B) C), apply the distributive law and then
6028 the inverse distributive law to see if things simplify. This
6029 occurs mostly in addresses, often when unrolling loops. */
6031 if (GET_CODE (XEXP (x, 0)) == PLUS)
6033 rtx result = distribute_and_simplify_rtx (x, 0);
6034 if (result)
6035 return result;
6038 /* Try simplify a*(b/c) as (a*b)/c. */
6039 if (FLOAT_MODE_P (mode) && flag_associative_math
6040 && GET_CODE (XEXP (x, 0)) == DIV)
6042 rtx tem = simplify_binary_operation (MULT, mode,
6043 XEXP (XEXP (x, 0), 0),
6044 XEXP (x, 1));
6045 if (tem)
6046 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6048 break;
6050 case UDIV:
6051 /* If this is a divide by a power of two, treat it as a shift if
6052 its first operand is a shift. */
6053 if (is_a <scalar_int_mode> (mode, &int_mode)
6054 && CONST_INT_P (XEXP (x, 1))
6055 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6056 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6057 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6058 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6059 || GET_CODE (XEXP (x, 0)) == ROTATE
6060 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6061 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6062 XEXP (x, 0), i);
6063 break;
6065 case EQ: case NE:
6066 case GT: case GTU: case GE: case GEU:
6067 case LT: case LTU: case LE: case LEU:
6068 case UNEQ: case LTGT:
6069 case UNGT: case UNGE:
6070 case UNLT: case UNLE:
6071 case UNORDERED: case ORDERED:
6072 /* If the first operand is a condition code, we can't do anything
6073 with it. */
6074 if (GET_CODE (XEXP (x, 0)) == COMPARE
6075 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6076 && ! CC0_P (XEXP (x, 0))))
6078 rtx op0 = XEXP (x, 0);
6079 rtx op1 = XEXP (x, 1);
6080 enum rtx_code new_code;
6082 if (GET_CODE (op0) == COMPARE)
6083 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6085 /* Simplify our comparison, if possible. */
6086 new_code = simplify_comparison (code, &op0, &op1);
6088 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6089 if only the low-order bit is possibly nonzero in X (such as when
6090 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6091 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6092 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6093 (plus X 1).
6095 Remove any ZERO_EXTRACT we made when thinking this was a
6096 comparison. It may now be simpler to use, e.g., an AND. If a
6097 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6098 the call to make_compound_operation in the SET case.
6100 Don't apply these optimizations if the caller would
6101 prefer a comparison rather than a value.
6102 E.g., for the condition in an IF_THEN_ELSE most targets need
6103 an explicit comparison. */
6105 if (in_cond)
6108 else if (STORE_FLAG_VALUE == 1
6109 && new_code == NE
6110 && is_int_mode (mode, &int_mode)
6111 && op1 == const0_rtx
6112 && int_mode == GET_MODE (op0)
6113 && nonzero_bits (op0, int_mode) == 1)
6114 return gen_lowpart (int_mode,
6115 expand_compound_operation (op0));
6117 else if (STORE_FLAG_VALUE == 1
6118 && new_code == NE
6119 && is_int_mode (mode, &int_mode)
6120 && op1 == const0_rtx
6121 && int_mode == GET_MODE (op0)
6122 && (num_sign_bit_copies (op0, int_mode)
6123 == GET_MODE_PRECISION (int_mode)))
6125 op0 = expand_compound_operation (op0);
6126 return simplify_gen_unary (NEG, int_mode,
6127 gen_lowpart (int_mode, op0),
6128 int_mode);
6131 else if (STORE_FLAG_VALUE == 1
6132 && new_code == EQ
6133 && is_int_mode (mode, &int_mode)
6134 && op1 == const0_rtx
6135 && int_mode == GET_MODE (op0)
6136 && nonzero_bits (op0, int_mode) == 1)
6138 op0 = expand_compound_operation (op0);
6139 return simplify_gen_binary (XOR, int_mode,
6140 gen_lowpart (int_mode, op0),
6141 const1_rtx);
6144 else if (STORE_FLAG_VALUE == 1
6145 && new_code == EQ
6146 && is_int_mode (mode, &int_mode)
6147 && op1 == const0_rtx
6148 && int_mode == GET_MODE (op0)
6149 && (num_sign_bit_copies (op0, int_mode)
6150 == GET_MODE_PRECISION (int_mode)))
6152 op0 = expand_compound_operation (op0);
6153 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6156 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6157 those above. */
6158 if (in_cond)
6161 else if (STORE_FLAG_VALUE == -1
6162 && new_code == NE
6163 && is_int_mode (mode, &int_mode)
6164 && op1 == const0_rtx
6165 && int_mode == GET_MODE (op0)
6166 && (num_sign_bit_copies (op0, int_mode)
6167 == GET_MODE_PRECISION (int_mode)))
6168 return gen_lowpart (int_mode, expand_compound_operation (op0));
6170 else if (STORE_FLAG_VALUE == -1
6171 && new_code == NE
6172 && is_int_mode (mode, &int_mode)
6173 && op1 == const0_rtx
6174 && int_mode == GET_MODE (op0)
6175 && nonzero_bits (op0, int_mode) == 1)
6177 op0 = expand_compound_operation (op0);
6178 return simplify_gen_unary (NEG, int_mode,
6179 gen_lowpart (int_mode, op0),
6180 int_mode);
6183 else if (STORE_FLAG_VALUE == -1
6184 && new_code == EQ
6185 && is_int_mode (mode, &int_mode)
6186 && op1 == const0_rtx
6187 && int_mode == GET_MODE (op0)
6188 && (num_sign_bit_copies (op0, int_mode)
6189 == GET_MODE_PRECISION (int_mode)))
6191 op0 = expand_compound_operation (op0);
6192 return simplify_gen_unary (NOT, int_mode,
6193 gen_lowpart (int_mode, op0),
6194 int_mode);
6197 /* If X is 0/1, (eq X 0) is X-1. */
6198 else if (STORE_FLAG_VALUE == -1
6199 && new_code == EQ
6200 && is_int_mode (mode, &int_mode)
6201 && op1 == const0_rtx
6202 && int_mode == GET_MODE (op0)
6203 && nonzero_bits (op0, int_mode) == 1)
6205 op0 = expand_compound_operation (op0);
6206 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6209 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6210 one bit that might be nonzero, we can convert (ne x 0) to
6211 (ashift x c) where C puts the bit in the sign bit. Remove any
6212 AND with STORE_FLAG_VALUE when we are done, since we are only
6213 going to test the sign bit. */
6214 if (new_code == NE
6215 && is_int_mode (mode, &int_mode)
6216 && HWI_COMPUTABLE_MODE_P (int_mode)
6217 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6218 && op1 == const0_rtx
6219 && int_mode == GET_MODE (op0)
6220 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6222 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6223 expand_compound_operation (op0),
6224 GET_MODE_PRECISION (int_mode) - 1 - i);
6225 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6226 return XEXP (x, 0);
6227 else
6228 return x;
6231 /* If the code changed, return a whole new comparison.
6232 We also need to avoid using SUBST in cases where
6233 simplify_comparison has widened a comparison with a CONST_INT,
6234 since in that case the wider CONST_INT may fail the sanity
6235 checks in do_SUBST. */
6236 if (new_code != code
6237 || (CONST_INT_P (op1)
6238 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6239 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6240 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6242 /* Otherwise, keep this operation, but maybe change its operands.
6243 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6244 SUBST (XEXP (x, 0), op0);
6245 SUBST (XEXP (x, 1), op1);
6247 break;
6249 case IF_THEN_ELSE:
6250 return simplify_if_then_else (x);
6252 case ZERO_EXTRACT:
6253 case SIGN_EXTRACT:
6254 case ZERO_EXTEND:
6255 case SIGN_EXTEND:
6256 /* If we are processing SET_DEST, we are done. */
6257 if (in_dest)
6258 return x;
6260 return expand_compound_operation (x);
6262 case SET:
6263 return simplify_set (x);
6265 case AND:
6266 case IOR:
6267 return simplify_logical (x);
6269 case ASHIFT:
6270 case LSHIFTRT:
6271 case ASHIFTRT:
6272 case ROTATE:
6273 case ROTATERT:
6274 /* If this is a shift by a constant amount, simplify it. */
6275 if (CONST_INT_P (XEXP (x, 1)))
6276 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6277 INTVAL (XEXP (x, 1)));
6279 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6280 SUBST (XEXP (x, 1),
6281 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6282 (HOST_WIDE_INT_1U
6283 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6284 - 1,
6285 0));
6286 break;
6288 default:
6289 break;
6292 return x;
6295 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6297 static rtx
6298 simplify_if_then_else (rtx x)
6300 machine_mode mode = GET_MODE (x);
6301 rtx cond = XEXP (x, 0);
6302 rtx true_rtx = XEXP (x, 1);
6303 rtx false_rtx = XEXP (x, 2);
6304 enum rtx_code true_code = GET_CODE (cond);
6305 int comparison_p = COMPARISON_P (cond);
6306 rtx temp;
6307 int i;
6308 enum rtx_code false_code;
6309 rtx reversed;
6310 scalar_int_mode int_mode, inner_mode;
6312 /* Simplify storing of the truth value. */
6313 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6314 return simplify_gen_relational (true_code, mode, VOIDmode,
6315 XEXP (cond, 0), XEXP (cond, 1));
6317 /* Also when the truth value has to be reversed. */
6318 if (comparison_p
6319 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6320 && (reversed = reversed_comparison (cond, mode)))
6321 return reversed;
6323 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6324 in it is being compared against certain values. Get the true and false
6325 comparisons and see if that says anything about the value of each arm. */
6327 if (comparison_p
6328 && ((false_code = reversed_comparison_code (cond, NULL))
6329 != UNKNOWN)
6330 && REG_P (XEXP (cond, 0)))
6332 HOST_WIDE_INT nzb;
6333 rtx from = XEXP (cond, 0);
6334 rtx true_val = XEXP (cond, 1);
6335 rtx false_val = true_val;
6336 int swapped = 0;
6338 /* If FALSE_CODE is EQ, swap the codes and arms. */
6340 if (false_code == EQ)
6342 swapped = 1, true_code = EQ, false_code = NE;
6343 std::swap (true_rtx, false_rtx);
6346 scalar_int_mode from_mode;
6347 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6349 /* If we are comparing against zero and the expression being
6350 tested has only a single bit that might be nonzero, that is
6351 its value when it is not equal to zero. Similarly if it is
6352 known to be -1 or 0. */
6353 if (true_code == EQ
6354 && true_val == const0_rtx
6355 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6357 false_code = EQ;
6358 false_val = gen_int_mode (nzb, from_mode);
6360 else if (true_code == EQ
6361 && true_val == const0_rtx
6362 && (num_sign_bit_copies (from, from_mode)
6363 == GET_MODE_PRECISION (from_mode)))
6365 false_code = EQ;
6366 false_val = constm1_rtx;
6370 /* Now simplify an arm if we know the value of the register in the
6371 branch and it is used in the arm. Be careful due to the potential
6372 of locally-shared RTL. */
6374 if (reg_mentioned_p (from, true_rtx))
6375 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6376 from, true_val),
6377 pc_rtx, pc_rtx, 0, 0, 0);
6378 if (reg_mentioned_p (from, false_rtx))
6379 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6380 from, false_val),
6381 pc_rtx, pc_rtx, 0, 0, 0);
6383 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6384 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6386 true_rtx = XEXP (x, 1);
6387 false_rtx = XEXP (x, 2);
6388 true_code = GET_CODE (cond);
6391 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6392 reversed, do so to avoid needing two sets of patterns for
6393 subtract-and-branch insns. Similarly if we have a constant in the true
6394 arm, the false arm is the same as the first operand of the comparison, or
6395 the false arm is more complicated than the true arm. */
6397 if (comparison_p
6398 && reversed_comparison_code (cond, NULL) != UNKNOWN
6399 && (true_rtx == pc_rtx
6400 || (CONSTANT_P (true_rtx)
6401 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6402 || true_rtx == const0_rtx
6403 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6404 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6405 && !OBJECT_P (false_rtx))
6406 || reg_mentioned_p (true_rtx, false_rtx)
6407 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6409 true_code = reversed_comparison_code (cond, NULL);
6410 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6411 SUBST (XEXP (x, 1), false_rtx);
6412 SUBST (XEXP (x, 2), true_rtx);
6414 std::swap (true_rtx, false_rtx);
6415 cond = XEXP (x, 0);
6417 /* It is possible that the conditional has been simplified out. */
6418 true_code = GET_CODE (cond);
6419 comparison_p = COMPARISON_P (cond);
6422 /* If the two arms are identical, we don't need the comparison. */
6424 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6425 return true_rtx;
6427 /* Convert a == b ? b : a to "a". */
6428 if (true_code == EQ && ! side_effects_p (cond)
6429 && !HONOR_NANS (mode)
6430 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6431 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6432 return false_rtx;
6433 else if (true_code == NE && ! side_effects_p (cond)
6434 && !HONOR_NANS (mode)
6435 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6436 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6437 return true_rtx;
6439 /* Look for cases where we have (abs x) or (neg (abs X)). */
6441 if (GET_MODE_CLASS (mode) == MODE_INT
6442 && comparison_p
6443 && XEXP (cond, 1) == const0_rtx
6444 && GET_CODE (false_rtx) == NEG
6445 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6446 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6447 && ! side_effects_p (true_rtx))
6448 switch (true_code)
6450 case GT:
6451 case GE:
6452 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6453 case LT:
6454 case LE:
6455 return
6456 simplify_gen_unary (NEG, mode,
6457 simplify_gen_unary (ABS, mode, true_rtx, mode),
6458 mode);
6459 default:
6460 break;
6463 /* Look for MIN or MAX. */
6465 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6466 && comparison_p
6467 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6468 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6469 && ! side_effects_p (cond))
6470 switch (true_code)
6472 case GE:
6473 case GT:
6474 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6475 case LE:
6476 case LT:
6477 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6478 case GEU:
6479 case GTU:
6480 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6481 case LEU:
6482 case LTU:
6483 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6484 default:
6485 break;
6488 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6489 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6490 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6491 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6492 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6493 neither 1 or -1, but it isn't worth checking for. */
6495 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6496 && comparison_p
6497 && is_int_mode (mode, &int_mode)
6498 && ! side_effects_p (x))
6500 rtx t = make_compound_operation (true_rtx, SET);
6501 rtx f = make_compound_operation (false_rtx, SET);
6502 rtx cond_op0 = XEXP (cond, 0);
6503 rtx cond_op1 = XEXP (cond, 1);
6504 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6505 scalar_int_mode m = int_mode;
6506 rtx z = 0, c1 = NULL_RTX;
6508 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6509 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6510 || GET_CODE (t) == ASHIFT
6511 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6512 && rtx_equal_p (XEXP (t, 0), f))
6513 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6515 /* If an identity-zero op is commutative, check whether there
6516 would be a match if we swapped the operands. */
6517 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6518 || GET_CODE (t) == XOR)
6519 && rtx_equal_p (XEXP (t, 1), f))
6520 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6521 else if (GET_CODE (t) == SIGN_EXTEND
6522 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6523 && (GET_CODE (XEXP (t, 0)) == PLUS
6524 || GET_CODE (XEXP (t, 0)) == MINUS
6525 || GET_CODE (XEXP (t, 0)) == IOR
6526 || GET_CODE (XEXP (t, 0)) == XOR
6527 || GET_CODE (XEXP (t, 0)) == ASHIFT
6528 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6529 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6530 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6531 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6532 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6533 && (num_sign_bit_copies (f, GET_MODE (f))
6534 > (unsigned int)
6535 (GET_MODE_PRECISION (int_mode)
6536 - GET_MODE_PRECISION (inner_mode))))
6538 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6539 extend_op = SIGN_EXTEND;
6540 m = inner_mode;
6542 else if (GET_CODE (t) == SIGN_EXTEND
6543 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6544 && (GET_CODE (XEXP (t, 0)) == PLUS
6545 || GET_CODE (XEXP (t, 0)) == IOR
6546 || GET_CODE (XEXP (t, 0)) == XOR)
6547 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6548 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6549 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6550 && (num_sign_bit_copies (f, GET_MODE (f))
6551 > (unsigned int)
6552 (GET_MODE_PRECISION (int_mode)
6553 - GET_MODE_PRECISION (inner_mode))))
6555 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6556 extend_op = SIGN_EXTEND;
6557 m = inner_mode;
6559 else if (GET_CODE (t) == ZERO_EXTEND
6560 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6561 && (GET_CODE (XEXP (t, 0)) == PLUS
6562 || GET_CODE (XEXP (t, 0)) == MINUS
6563 || GET_CODE (XEXP (t, 0)) == IOR
6564 || GET_CODE (XEXP (t, 0)) == XOR
6565 || GET_CODE (XEXP (t, 0)) == ASHIFT
6566 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6567 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6568 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6569 && HWI_COMPUTABLE_MODE_P (int_mode)
6570 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6571 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6572 && ((nonzero_bits (f, GET_MODE (f))
6573 & ~GET_MODE_MASK (inner_mode))
6574 == 0))
6576 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6577 extend_op = ZERO_EXTEND;
6578 m = inner_mode;
6580 else if (GET_CODE (t) == ZERO_EXTEND
6581 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6582 && (GET_CODE (XEXP (t, 0)) == PLUS
6583 || GET_CODE (XEXP (t, 0)) == IOR
6584 || GET_CODE (XEXP (t, 0)) == XOR)
6585 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6586 && HWI_COMPUTABLE_MODE_P (int_mode)
6587 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6588 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6589 && ((nonzero_bits (f, GET_MODE (f))
6590 & ~GET_MODE_MASK (inner_mode))
6591 == 0))
6593 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6594 extend_op = ZERO_EXTEND;
6595 m = inner_mode;
6598 if (z)
6600 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6601 cond_op0, cond_op1),
6602 pc_rtx, pc_rtx, 0, 0, 0);
6603 temp = simplify_gen_binary (MULT, m, temp,
6604 simplify_gen_binary (MULT, m, c1,
6605 const_true_rtx));
6606 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6607 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6609 if (extend_op != UNKNOWN)
6610 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6612 return temp;
6616 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6617 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6618 negation of a single bit, we can convert this operation to a shift. We
6619 can actually do this more generally, but it doesn't seem worth it. */
6621 if (true_code == NE
6622 && is_a <scalar_int_mode> (mode, &int_mode)
6623 && XEXP (cond, 1) == const0_rtx
6624 && false_rtx == const0_rtx
6625 && CONST_INT_P (true_rtx)
6626 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6627 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6628 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6629 == GET_MODE_PRECISION (int_mode))
6630 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6631 return
6632 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6633 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6635 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6636 non-zero bit in A is C1. */
6637 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6638 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6639 && is_a <scalar_int_mode> (mode, &int_mode)
6640 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6641 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6642 == nonzero_bits (XEXP (cond, 0), inner_mode)
6643 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6645 rtx val = XEXP (cond, 0);
6646 if (inner_mode == int_mode)
6647 return val;
6648 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6649 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6652 return x;
6655 /* Simplify X, a SET expression. Return the new expression. */
6657 static rtx
6658 simplify_set (rtx x)
6660 rtx src = SET_SRC (x);
6661 rtx dest = SET_DEST (x);
6662 machine_mode mode
6663 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6664 rtx_insn *other_insn;
6665 rtx *cc_use;
6666 scalar_int_mode int_mode;
6668 /* (set (pc) (return)) gets written as (return). */
6669 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6670 return src;
6672 /* Now that we know for sure which bits of SRC we are using, see if we can
6673 simplify the expression for the object knowing that we only need the
6674 low-order bits. */
6676 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6678 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6679 SUBST (SET_SRC (x), src);
6682 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6683 the comparison result and try to simplify it unless we already have used
6684 undobuf.other_insn. */
6685 if ((GET_MODE_CLASS (mode) == MODE_CC
6686 || GET_CODE (src) == COMPARE
6687 || CC0_P (dest))
6688 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6689 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6690 && COMPARISON_P (*cc_use)
6691 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6693 enum rtx_code old_code = GET_CODE (*cc_use);
6694 enum rtx_code new_code;
6695 rtx op0, op1, tmp;
6696 int other_changed = 0;
6697 rtx inner_compare = NULL_RTX;
6698 machine_mode compare_mode = GET_MODE (dest);
6700 if (GET_CODE (src) == COMPARE)
6702 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6703 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6705 inner_compare = op0;
6706 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6709 else
6710 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6712 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6713 op0, op1);
6714 if (!tmp)
6715 new_code = old_code;
6716 else if (!CONSTANT_P (tmp))
6718 new_code = GET_CODE (tmp);
6719 op0 = XEXP (tmp, 0);
6720 op1 = XEXP (tmp, 1);
6722 else
6724 rtx pat = PATTERN (other_insn);
6725 undobuf.other_insn = other_insn;
6726 SUBST (*cc_use, tmp);
6728 /* Attempt to simplify CC user. */
6729 if (GET_CODE (pat) == SET)
6731 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6732 if (new_rtx != NULL_RTX)
6733 SUBST (SET_SRC (pat), new_rtx);
6736 /* Convert X into a no-op move. */
6737 SUBST (SET_DEST (x), pc_rtx);
6738 SUBST (SET_SRC (x), pc_rtx);
6739 return x;
6742 /* Simplify our comparison, if possible. */
6743 new_code = simplify_comparison (new_code, &op0, &op1);
6745 #ifdef SELECT_CC_MODE
6746 /* If this machine has CC modes other than CCmode, check to see if we
6747 need to use a different CC mode here. */
6748 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6749 compare_mode = GET_MODE (op0);
6750 else if (inner_compare
6751 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6752 && new_code == old_code
6753 && op0 == XEXP (inner_compare, 0)
6754 && op1 == XEXP (inner_compare, 1))
6755 compare_mode = GET_MODE (inner_compare);
6756 else
6757 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6759 /* If the mode changed, we have to change SET_DEST, the mode in the
6760 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6761 a hard register, just build new versions with the proper mode. If it
6762 is a pseudo, we lose unless it is only time we set the pseudo, in
6763 which case we can safely change its mode. */
6764 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6766 if (can_change_dest_mode (dest, 0, compare_mode))
6768 unsigned int regno = REGNO (dest);
6769 rtx new_dest;
6771 if (regno < FIRST_PSEUDO_REGISTER)
6772 new_dest = gen_rtx_REG (compare_mode, regno);
6773 else
6775 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6776 new_dest = regno_reg_rtx[regno];
6779 SUBST (SET_DEST (x), new_dest);
6780 SUBST (XEXP (*cc_use, 0), new_dest);
6781 other_changed = 1;
6783 dest = new_dest;
6786 #endif /* SELECT_CC_MODE */
6788 /* If the code changed, we have to build a new comparison in
6789 undobuf.other_insn. */
6790 if (new_code != old_code)
6792 int other_changed_previously = other_changed;
6793 unsigned HOST_WIDE_INT mask;
6794 rtx old_cc_use = *cc_use;
6796 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6797 dest, const0_rtx));
6798 other_changed = 1;
6800 /* If the only change we made was to change an EQ into an NE or
6801 vice versa, OP0 has only one bit that might be nonzero, and OP1
6802 is zero, check if changing the user of the condition code will
6803 produce a valid insn. If it won't, we can keep the original code
6804 in that insn by surrounding our operation with an XOR. */
6806 if (((old_code == NE && new_code == EQ)
6807 || (old_code == EQ && new_code == NE))
6808 && ! other_changed_previously && op1 == const0_rtx
6809 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6810 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6812 rtx pat = PATTERN (other_insn), note = 0;
6814 if ((recog_for_combine (&pat, other_insn, &note) < 0
6815 && ! check_asm_operands (pat)))
6817 *cc_use = old_cc_use;
6818 other_changed = 0;
6820 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6821 gen_int_mode (mask,
6822 GET_MODE (op0)));
6827 if (other_changed)
6828 undobuf.other_insn = other_insn;
6830 /* Don't generate a compare of a CC with 0, just use that CC. */
6831 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6833 SUBST (SET_SRC (x), op0);
6834 src = SET_SRC (x);
6836 /* Otherwise, if we didn't previously have the same COMPARE we
6837 want, create it from scratch. */
6838 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6839 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6841 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6842 src = SET_SRC (x);
6845 else
6847 /* Get SET_SRC in a form where we have placed back any
6848 compound expressions. Then do the checks below. */
6849 src = make_compound_operation (src, SET);
6850 SUBST (SET_SRC (x), src);
6853 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6854 and X being a REG or (subreg (reg)), we may be able to convert this to
6855 (set (subreg:m2 x) (op)).
6857 We can always do this if M1 is narrower than M2 because that means that
6858 we only care about the low bits of the result.
6860 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6861 perform a narrower operation than requested since the high-order bits will
6862 be undefined. On machine where it is defined, this transformation is safe
6863 as long as M1 and M2 have the same number of words. */
6865 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6866 && !OBJECT_P (SUBREG_REG (src))
6867 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6868 / UNITS_PER_WORD)
6869 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6870 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6871 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6872 #ifdef CANNOT_CHANGE_MODE_CLASS
6873 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6874 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6875 GET_MODE (SUBREG_REG (src)),
6876 GET_MODE (src)))
6877 #endif
6878 && (REG_P (dest)
6879 || (GET_CODE (dest) == SUBREG
6880 && REG_P (SUBREG_REG (dest)))))
6882 SUBST (SET_DEST (x),
6883 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6884 dest));
6885 SUBST (SET_SRC (x), SUBREG_REG (src));
6887 src = SET_SRC (x), dest = SET_DEST (x);
6890 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6891 in SRC. */
6892 if (dest == cc0_rtx
6893 && partial_subreg_p (src)
6894 && subreg_lowpart_p (src))
6896 rtx inner = SUBREG_REG (src);
6897 machine_mode inner_mode = GET_MODE (inner);
6899 /* Here we make sure that we don't have a sign bit on. */
6900 if (val_signbit_known_clear_p (GET_MODE (src),
6901 nonzero_bits (inner, inner_mode)))
6903 SUBST (SET_SRC (x), inner);
6904 src = SET_SRC (x);
6908 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6909 would require a paradoxical subreg. Replace the subreg with a
6910 zero_extend to avoid the reload that would otherwise be required. */
6912 enum rtx_code extend_op;
6913 if (paradoxical_subreg_p (src)
6914 && MEM_P (SUBREG_REG (src))
6915 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6917 SUBST (SET_SRC (x),
6918 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6920 src = SET_SRC (x);
6923 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6924 are comparing an item known to be 0 or -1 against 0, use a logical
6925 operation instead. Check for one of the arms being an IOR of the other
6926 arm with some value. We compute three terms to be IOR'ed together. In
6927 practice, at most two will be nonzero. Then we do the IOR's. */
6929 if (GET_CODE (dest) != PC
6930 && GET_CODE (src) == IF_THEN_ELSE
6931 && is_int_mode (GET_MODE (src), &int_mode)
6932 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6933 && XEXP (XEXP (src, 0), 1) == const0_rtx
6934 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6935 && (!HAVE_conditional_move
6936 || ! can_conditionally_move_p (int_mode))
6937 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6938 == GET_MODE_PRECISION (int_mode))
6939 && ! side_effects_p (src))
6941 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6942 ? XEXP (src, 1) : XEXP (src, 2));
6943 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6944 ? XEXP (src, 2) : XEXP (src, 1));
6945 rtx term1 = const0_rtx, term2, term3;
6947 if (GET_CODE (true_rtx) == IOR
6948 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6949 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6950 else if (GET_CODE (true_rtx) == IOR
6951 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6952 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6953 else if (GET_CODE (false_rtx) == IOR
6954 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6955 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6956 else if (GET_CODE (false_rtx) == IOR
6957 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6958 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6960 term2 = simplify_gen_binary (AND, int_mode,
6961 XEXP (XEXP (src, 0), 0), true_rtx);
6962 term3 = simplify_gen_binary (AND, int_mode,
6963 simplify_gen_unary (NOT, int_mode,
6964 XEXP (XEXP (src, 0), 0),
6965 int_mode),
6966 false_rtx);
6968 SUBST (SET_SRC (x),
6969 simplify_gen_binary (IOR, int_mode,
6970 simplify_gen_binary (IOR, int_mode,
6971 term1, term2),
6972 term3));
6974 src = SET_SRC (x);
6977 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6978 whole thing fail. */
6979 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6980 return src;
6981 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6982 return dest;
6983 else
6984 /* Convert this into a field assignment operation, if possible. */
6985 return make_field_assignment (x);
6988 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6989 result. */
6991 static rtx
6992 simplify_logical (rtx x)
6994 rtx op0 = XEXP (x, 0);
6995 rtx op1 = XEXP (x, 1);
6996 scalar_int_mode mode;
6998 switch (GET_CODE (x))
7000 case AND:
7001 /* We can call simplify_and_const_int only if we don't lose
7002 any (sign) bits when converting INTVAL (op1) to
7003 "unsigned HOST_WIDE_INT". */
7004 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7005 && CONST_INT_P (op1)
7006 && (HWI_COMPUTABLE_MODE_P (mode)
7007 || INTVAL (op1) > 0))
7009 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7010 if (GET_CODE (x) != AND)
7011 return x;
7013 op0 = XEXP (x, 0);
7014 op1 = XEXP (x, 1);
7017 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7018 apply the distributive law and then the inverse distributive
7019 law to see if things simplify. */
7020 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7022 rtx result = distribute_and_simplify_rtx (x, 0);
7023 if (result)
7024 return result;
7026 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7028 rtx result = distribute_and_simplify_rtx (x, 1);
7029 if (result)
7030 return result;
7032 break;
7034 case IOR:
7035 /* If we have (ior (and A B) C), apply the distributive law and then
7036 the inverse distributive law to see if things simplify. */
7038 if (GET_CODE (op0) == AND)
7040 rtx result = distribute_and_simplify_rtx (x, 0);
7041 if (result)
7042 return result;
7045 if (GET_CODE (op1) == AND)
7047 rtx result = distribute_and_simplify_rtx (x, 1);
7048 if (result)
7049 return result;
7051 break;
7053 default:
7054 gcc_unreachable ();
7057 return x;
7060 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7061 operations" because they can be replaced with two more basic operations.
7062 ZERO_EXTEND is also considered "compound" because it can be replaced with
7063 an AND operation, which is simpler, though only one operation.
7065 The function expand_compound_operation is called with an rtx expression
7066 and will convert it to the appropriate shifts and AND operations,
7067 simplifying at each stage.
7069 The function make_compound_operation is called to convert an expression
7070 consisting of shifts and ANDs into the equivalent compound expression.
7071 It is the inverse of this function, loosely speaking. */
7073 static rtx
7074 expand_compound_operation (rtx x)
7076 unsigned HOST_WIDE_INT pos = 0, len;
7077 int unsignedp = 0;
7078 unsigned int modewidth;
7079 rtx tem;
7080 scalar_int_mode inner_mode;
7082 switch (GET_CODE (x))
7084 case ZERO_EXTEND:
7085 unsignedp = 1;
7086 /* FALLTHRU */
7087 case SIGN_EXTEND:
7088 /* We can't necessarily use a const_int for a multiword mode;
7089 it depends on implicitly extending the value.
7090 Since we don't know the right way to extend it,
7091 we can't tell whether the implicit way is right.
7093 Even for a mode that is no wider than a const_int,
7094 we can't win, because we need to sign extend one of its bits through
7095 the rest of it, and we don't know which bit. */
7096 if (CONST_INT_P (XEXP (x, 0)))
7097 return x;
7099 /* Reject modes that aren't scalar integers because turning vector
7100 or complex modes into shifts causes problems. */
7101 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7102 return x;
7104 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7105 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7106 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7107 reloaded. If not for that, MEM's would very rarely be safe.
7109 Reject modes bigger than a word, because we might not be able
7110 to reference a two-register group starting with an arbitrary register
7111 (and currently gen_lowpart might crash for a SUBREG). */
7113 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7114 return x;
7116 len = GET_MODE_PRECISION (inner_mode);
7117 /* If the inner object has VOIDmode (the only way this can happen
7118 is if it is an ASM_OPERANDS), we can't do anything since we don't
7119 know how much masking to do. */
7120 if (len == 0)
7121 return x;
7123 break;
7125 case ZERO_EXTRACT:
7126 unsignedp = 1;
7128 /* fall through */
7130 case SIGN_EXTRACT:
7131 /* If the operand is a CLOBBER, just return it. */
7132 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7133 return XEXP (x, 0);
7135 if (!CONST_INT_P (XEXP (x, 1))
7136 || !CONST_INT_P (XEXP (x, 2)))
7137 return x;
7139 /* Reject modes that aren't scalar integers because turning vector
7140 or complex modes into shifts causes problems. */
7141 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7142 return x;
7144 len = INTVAL (XEXP (x, 1));
7145 pos = INTVAL (XEXP (x, 2));
7147 /* This should stay within the object being extracted, fail otherwise. */
7148 if (len + pos > GET_MODE_PRECISION (inner_mode))
7149 return x;
7151 if (BITS_BIG_ENDIAN)
7152 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7154 break;
7156 default:
7157 return x;
7160 /* We've rejected non-scalar operations by now. */
7161 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7163 /* Convert sign extension to zero extension, if we know that the high
7164 bit is not set, as this is easier to optimize. It will be converted
7165 back to cheaper alternative in make_extraction. */
7166 if (GET_CODE (x) == SIGN_EXTEND
7167 && HWI_COMPUTABLE_MODE_P (mode)
7168 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7169 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7170 == 0))
7172 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7173 rtx temp2 = expand_compound_operation (temp);
7175 /* Make sure this is a profitable operation. */
7176 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7177 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7178 return temp2;
7179 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7180 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7181 return temp;
7182 else
7183 return x;
7186 /* We can optimize some special cases of ZERO_EXTEND. */
7187 if (GET_CODE (x) == ZERO_EXTEND)
7189 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7190 know that the last value didn't have any inappropriate bits
7191 set. */
7192 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7193 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7194 && HWI_COMPUTABLE_MODE_P (mode)
7195 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7196 & ~GET_MODE_MASK (inner_mode)) == 0)
7197 return XEXP (XEXP (x, 0), 0);
7199 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7200 if (GET_CODE (XEXP (x, 0)) == SUBREG
7201 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7202 && subreg_lowpart_p (XEXP (x, 0))
7203 && HWI_COMPUTABLE_MODE_P (mode)
7204 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7205 & ~GET_MODE_MASK (inner_mode)) == 0)
7206 return SUBREG_REG (XEXP (x, 0));
7208 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7209 is a comparison and STORE_FLAG_VALUE permits. This is like
7210 the first case, but it works even when MODE is larger
7211 than HOST_WIDE_INT. */
7212 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7213 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7214 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7215 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7216 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7217 return XEXP (XEXP (x, 0), 0);
7219 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7220 if (GET_CODE (XEXP (x, 0)) == SUBREG
7221 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7222 && subreg_lowpart_p (XEXP (x, 0))
7223 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7224 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7225 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7226 return SUBREG_REG (XEXP (x, 0));
7230 /* If we reach here, we want to return a pair of shifts. The inner
7231 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7232 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7233 logical depending on the value of UNSIGNEDP.
7235 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7236 converted into an AND of a shift.
7238 We must check for the case where the left shift would have a negative
7239 count. This can happen in a case like (x >> 31) & 255 on machines
7240 that can't shift by a constant. On those machines, we would first
7241 combine the shift with the AND to produce a variable-position
7242 extraction. Then the constant of 31 would be substituted in
7243 to produce such a position. */
7245 modewidth = GET_MODE_PRECISION (mode);
7246 if (modewidth >= pos + len)
7248 tem = gen_lowpart (mode, XEXP (x, 0));
7249 if (!tem || GET_CODE (tem) == CLOBBER)
7250 return x;
7251 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7252 tem, modewidth - pos - len);
7253 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7254 mode, tem, modewidth - len);
7256 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7257 tem = simplify_and_const_int (NULL_RTX, mode,
7258 simplify_shift_const (NULL_RTX, LSHIFTRT,
7259 mode, XEXP (x, 0),
7260 pos),
7261 (HOST_WIDE_INT_1U << len) - 1);
7262 else
7263 /* Any other cases we can't handle. */
7264 return x;
7266 /* If we couldn't do this for some reason, return the original
7267 expression. */
7268 if (GET_CODE (tem) == CLOBBER)
7269 return x;
7271 return tem;
7274 /* X is a SET which contains an assignment of one object into
7275 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7276 or certain SUBREGS). If possible, convert it into a series of
7277 logical operations.
7279 We half-heartedly support variable positions, but do not at all
7280 support variable lengths. */
7282 static const_rtx
7283 expand_field_assignment (const_rtx x)
7285 rtx inner;
7286 rtx pos; /* Always counts from low bit. */
7287 int len;
7288 rtx mask, cleared, masked;
7289 scalar_int_mode compute_mode;
7291 /* Loop until we find something we can't simplify. */
7292 while (1)
7294 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7295 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7297 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7298 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7299 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7301 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7302 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7304 inner = XEXP (SET_DEST (x), 0);
7305 len = INTVAL (XEXP (SET_DEST (x), 1));
7306 pos = XEXP (SET_DEST (x), 2);
7308 /* A constant position should stay within the width of INNER. */
7309 if (CONST_INT_P (pos)
7310 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7311 break;
7313 if (BITS_BIG_ENDIAN)
7315 if (CONST_INT_P (pos))
7316 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7317 - INTVAL (pos));
7318 else if (GET_CODE (pos) == MINUS
7319 && CONST_INT_P (XEXP (pos, 1))
7320 && (INTVAL (XEXP (pos, 1))
7321 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7322 /* If position is ADJUST - X, new position is X. */
7323 pos = XEXP (pos, 0);
7324 else
7326 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7327 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7328 gen_int_mode (prec - len,
7329 GET_MODE (pos)),
7330 pos);
7335 /* A SUBREG between two modes that occupy the same numbers of words
7336 can be done by moving the SUBREG to the source. */
7337 else if (GET_CODE (SET_DEST (x)) == SUBREG
7338 /* We need SUBREGs to compute nonzero_bits properly. */
7339 && nonzero_sign_valid
7340 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7341 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7342 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7343 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7345 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7346 gen_lowpart
7347 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7348 SET_SRC (x)));
7349 continue;
7351 else
7352 break;
7354 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7355 inner = SUBREG_REG (inner);
7357 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7358 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7360 /* Don't do anything for vector or complex integral types. */
7361 if (! FLOAT_MODE_P (GET_MODE (inner)))
7362 break;
7364 /* Try to find an integral mode to pun with. */
7365 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7366 .exists (&compute_mode))
7367 break;
7369 inner = gen_lowpart (compute_mode, inner);
7372 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7373 if (len >= HOST_BITS_PER_WIDE_INT)
7374 break;
7376 /* Don't try to compute in too wide unsupported modes. */
7377 if (!targetm.scalar_mode_supported_p (compute_mode))
7378 break;
7380 /* Now compute the equivalent expression. Make a copy of INNER
7381 for the SET_DEST in case it is a MEM into which we will substitute;
7382 we don't want shared RTL in that case. */
7383 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7384 compute_mode);
7385 cleared = simplify_gen_binary (AND, compute_mode,
7386 simplify_gen_unary (NOT, compute_mode,
7387 simplify_gen_binary (ASHIFT,
7388 compute_mode,
7389 mask, pos),
7390 compute_mode),
7391 inner);
7392 masked = simplify_gen_binary (ASHIFT, compute_mode,
7393 simplify_gen_binary (
7394 AND, compute_mode,
7395 gen_lowpart (compute_mode, SET_SRC (x)),
7396 mask),
7397 pos);
7399 x = gen_rtx_SET (copy_rtx (inner),
7400 simplify_gen_binary (IOR, compute_mode,
7401 cleared, masked));
7404 return x;
7407 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7408 it is an RTX that represents the (variable) starting position; otherwise,
7409 POS is the (constant) starting bit position. Both are counted from the LSB.
7411 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7413 IN_DEST is nonzero if this is a reference in the destination of a SET.
7414 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7415 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7416 be used.
7418 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7419 ZERO_EXTRACT should be built even for bits starting at bit 0.
7421 MODE is the desired mode of the result (if IN_DEST == 0).
7423 The result is an RTX for the extraction or NULL_RTX if the target
7424 can't handle it. */
7426 static rtx
7427 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7428 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7429 int in_dest, int in_compare)
7431 /* This mode describes the size of the storage area
7432 to fetch the overall value from. Within that, we
7433 ignore the POS lowest bits, etc. */
7434 machine_mode is_mode = GET_MODE (inner);
7435 machine_mode inner_mode;
7436 scalar_int_mode wanted_inner_mode;
7437 scalar_int_mode wanted_inner_reg_mode = word_mode;
7438 scalar_int_mode pos_mode = word_mode;
7439 machine_mode extraction_mode = word_mode;
7440 rtx new_rtx = 0;
7441 rtx orig_pos_rtx = pos_rtx;
7442 HOST_WIDE_INT orig_pos;
7444 if (pos_rtx && CONST_INT_P (pos_rtx))
7445 pos = INTVAL (pos_rtx), pos_rtx = 0;
7447 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7449 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7450 consider just the QI as the memory to extract from.
7451 The subreg adds or removes high bits; its mode is
7452 irrelevant to the meaning of this extraction,
7453 since POS and LEN count from the lsb. */
7454 if (MEM_P (SUBREG_REG (inner)))
7455 is_mode = GET_MODE (SUBREG_REG (inner));
7456 inner = SUBREG_REG (inner);
7458 else if (GET_CODE (inner) == ASHIFT
7459 && CONST_INT_P (XEXP (inner, 1))
7460 && pos_rtx == 0 && pos == 0
7461 && len > UINTVAL (XEXP (inner, 1)))
7463 /* We're extracting the least significant bits of an rtx
7464 (ashift X (const_int C)), where LEN > C. Extract the
7465 least significant (LEN - C) bits of X, giving an rtx
7466 whose mode is MODE, then shift it left C times. */
7467 new_rtx = make_extraction (mode, XEXP (inner, 0),
7468 0, 0, len - INTVAL (XEXP (inner, 1)),
7469 unsignedp, in_dest, in_compare);
7470 if (new_rtx != 0)
7471 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7473 else if (GET_CODE (inner) == TRUNCATE)
7474 inner = XEXP (inner, 0);
7476 inner_mode = GET_MODE (inner);
7478 /* See if this can be done without an extraction. We never can if the
7479 width of the field is not the same as that of some integer mode. For
7480 registers, we can only avoid the extraction if the position is at the
7481 low-order bit and this is either not in the destination or we have the
7482 appropriate STRICT_LOW_PART operation available.
7484 For MEM, we can avoid an extract if the field starts on an appropriate
7485 boundary and we can change the mode of the memory reference. */
7487 scalar_int_mode tmode;
7488 if (int_mode_for_size (len, 1).exists (&tmode)
7489 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7490 && !MEM_P (inner)
7491 && (pos == 0 || REG_P (inner))
7492 && (inner_mode == tmode
7493 || !REG_P (inner)
7494 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7495 || reg_truncated_to_mode (tmode, inner))
7496 && (! in_dest
7497 || (REG_P (inner)
7498 && have_insn_for (STRICT_LOW_PART, tmode))))
7499 || (MEM_P (inner) && pos_rtx == 0
7500 && (pos
7501 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7502 : BITS_PER_UNIT)) == 0
7503 /* We can't do this if we are widening INNER_MODE (it
7504 may not be aligned, for one thing). */
7505 && !paradoxical_subreg_p (tmode, inner_mode)
7506 && (inner_mode == tmode
7507 || (! mode_dependent_address_p (XEXP (inner, 0),
7508 MEM_ADDR_SPACE (inner))
7509 && ! MEM_VOLATILE_P (inner))))))
7511 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7512 field. If the original and current mode are the same, we need not
7513 adjust the offset. Otherwise, we do if bytes big endian.
7515 If INNER is not a MEM, get a piece consisting of just the field
7516 of interest (in this case POS % BITS_PER_WORD must be 0). */
7518 if (MEM_P (inner))
7520 HOST_WIDE_INT offset;
7522 /* POS counts from lsb, but make OFFSET count in memory order. */
7523 if (BYTES_BIG_ENDIAN)
7524 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7525 else
7526 offset = pos / BITS_PER_UNIT;
7528 new_rtx = adjust_address_nv (inner, tmode, offset);
7530 else if (REG_P (inner))
7532 if (tmode != inner_mode)
7534 /* We can't call gen_lowpart in a DEST since we
7535 always want a SUBREG (see below) and it would sometimes
7536 return a new hard register. */
7537 if (pos || in_dest)
7539 unsigned int offset
7540 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7542 /* Avoid creating invalid subregs, for example when
7543 simplifying (x>>32)&255. */
7544 if (!validate_subreg (tmode, inner_mode, inner, offset))
7545 return NULL_RTX;
7547 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7549 else
7550 new_rtx = gen_lowpart (tmode, inner);
7552 else
7553 new_rtx = inner;
7555 else
7556 new_rtx = force_to_mode (inner, tmode,
7557 len >= HOST_BITS_PER_WIDE_INT
7558 ? HOST_WIDE_INT_M1U
7559 : (HOST_WIDE_INT_1U << len) - 1, 0);
7561 /* If this extraction is going into the destination of a SET,
7562 make a STRICT_LOW_PART unless we made a MEM. */
7564 if (in_dest)
7565 return (MEM_P (new_rtx) ? new_rtx
7566 : (GET_CODE (new_rtx) != SUBREG
7567 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7568 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7570 if (mode == tmode)
7571 return new_rtx;
7573 if (CONST_SCALAR_INT_P (new_rtx))
7574 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7575 mode, new_rtx, tmode);
7577 /* If we know that no extraneous bits are set, and that the high
7578 bit is not set, convert the extraction to the cheaper of
7579 sign and zero extension, that are equivalent in these cases. */
7580 if (flag_expensive_optimizations
7581 && (HWI_COMPUTABLE_MODE_P (tmode)
7582 && ((nonzero_bits (new_rtx, tmode)
7583 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7584 == 0)))
7586 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7587 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7589 /* Prefer ZERO_EXTENSION, since it gives more information to
7590 backends. */
7591 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7592 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7593 return temp;
7594 return temp1;
7597 /* Otherwise, sign- or zero-extend unless we already are in the
7598 proper mode. */
7600 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7601 mode, new_rtx));
7604 /* Unless this is a COMPARE or we have a funny memory reference,
7605 don't do anything with zero-extending field extracts starting at
7606 the low-order bit since they are simple AND operations. */
7607 if (pos_rtx == 0 && pos == 0 && ! in_dest
7608 && ! in_compare && unsignedp)
7609 return 0;
7611 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7612 if the position is not a constant and the length is not 1. In all
7613 other cases, we would only be going outside our object in cases when
7614 an original shift would have been undefined. */
7615 if (MEM_P (inner)
7616 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7617 || (pos_rtx != 0 && len != 1)))
7618 return 0;
7620 enum extraction_pattern pattern = (in_dest ? EP_insv
7621 : unsignedp ? EP_extzv : EP_extv);
7623 /* If INNER is not from memory, we want it to have the mode of a register
7624 extraction pattern's structure operand, or word_mode if there is no
7625 such pattern. The same applies to extraction_mode and pos_mode
7626 and their respective operands.
7628 For memory, assume that the desired extraction_mode and pos_mode
7629 are the same as for a register operation, since at present we don't
7630 have named patterns for aligned memory structures. */
7631 struct extraction_insn insn;
7632 if (get_best_reg_extraction_insn (&insn, pattern,
7633 GET_MODE_BITSIZE (inner_mode), mode))
7635 wanted_inner_reg_mode = insn.struct_mode.require ();
7636 pos_mode = insn.pos_mode;
7637 extraction_mode = insn.field_mode;
7640 /* Never narrow an object, since that might not be safe. */
7642 if (mode != VOIDmode
7643 && partial_subreg_p (extraction_mode, mode))
7644 extraction_mode = mode;
7646 if (!MEM_P (inner))
7647 wanted_inner_mode = wanted_inner_reg_mode;
7648 else
7650 /* Be careful not to go beyond the extracted object and maintain the
7651 natural alignment of the memory. */
7652 wanted_inner_mode = smallest_int_mode_for_size (len);
7653 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7654 > GET_MODE_BITSIZE (wanted_inner_mode))
7655 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7658 orig_pos = pos;
7660 if (BITS_BIG_ENDIAN)
7662 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7663 BITS_BIG_ENDIAN style. If position is constant, compute new
7664 position. Otherwise, build subtraction.
7665 Note that POS is relative to the mode of the original argument.
7666 If it's a MEM we need to recompute POS relative to that.
7667 However, if we're extracting from (or inserting into) a register,
7668 we want to recompute POS relative to wanted_inner_mode. */
7669 int width = (MEM_P (inner)
7670 ? GET_MODE_BITSIZE (is_mode)
7671 : GET_MODE_BITSIZE (wanted_inner_mode));
7673 if (pos_rtx == 0)
7674 pos = width - len - pos;
7675 else
7676 pos_rtx
7677 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7678 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7679 pos_rtx);
7680 /* POS may be less than 0 now, but we check for that below.
7681 Note that it can only be less than 0 if !MEM_P (inner). */
7684 /* If INNER has a wider mode, and this is a constant extraction, try to
7685 make it smaller and adjust the byte to point to the byte containing
7686 the value. */
7687 if (wanted_inner_mode != VOIDmode
7688 && inner_mode != wanted_inner_mode
7689 && ! pos_rtx
7690 && partial_subreg_p (wanted_inner_mode, is_mode)
7691 && MEM_P (inner)
7692 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7693 && ! MEM_VOLATILE_P (inner))
7695 int offset = 0;
7697 /* The computations below will be correct if the machine is big
7698 endian in both bits and bytes or little endian in bits and bytes.
7699 If it is mixed, we must adjust. */
7701 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7702 adjust OFFSET to compensate. */
7703 if (BYTES_BIG_ENDIAN
7704 && paradoxical_subreg_p (is_mode, inner_mode))
7705 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7707 /* We can now move to the desired byte. */
7708 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7709 * GET_MODE_SIZE (wanted_inner_mode);
7710 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7712 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7713 && is_mode != wanted_inner_mode)
7714 offset = (GET_MODE_SIZE (is_mode)
7715 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7717 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7720 /* If INNER is not memory, get it into the proper mode. If we are changing
7721 its mode, POS must be a constant and smaller than the size of the new
7722 mode. */
7723 else if (!MEM_P (inner))
7725 /* On the LHS, don't create paradoxical subregs implicitely truncating
7726 the register unless TRULY_NOOP_TRUNCATION. */
7727 if (in_dest
7728 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7729 wanted_inner_mode))
7730 return NULL_RTX;
7732 if (GET_MODE (inner) != wanted_inner_mode
7733 && (pos_rtx != 0
7734 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7735 return NULL_RTX;
7737 if (orig_pos < 0)
7738 return NULL_RTX;
7740 inner = force_to_mode (inner, wanted_inner_mode,
7741 pos_rtx
7742 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7743 ? HOST_WIDE_INT_M1U
7744 : (((HOST_WIDE_INT_1U << len) - 1)
7745 << orig_pos),
7749 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7750 have to zero extend. Otherwise, we can just use a SUBREG.
7752 We dealt with constant rtxes earlier, so pos_rtx cannot
7753 have VOIDmode at this point. */
7754 if (pos_rtx != 0
7755 && (GET_MODE_SIZE (pos_mode)
7756 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7758 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7759 GET_MODE (pos_rtx));
7761 /* If we know that no extraneous bits are set, and that the high
7762 bit is not set, convert extraction to cheaper one - either
7763 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7764 cases. */
7765 if (flag_expensive_optimizations
7766 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7767 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7768 & ~(((unsigned HOST_WIDE_INT)
7769 GET_MODE_MASK (GET_MODE (pos_rtx)))
7770 >> 1))
7771 == 0)))
7773 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7774 GET_MODE (pos_rtx));
7776 /* Prefer ZERO_EXTENSION, since it gives more information to
7777 backends. */
7778 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7779 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7780 temp = temp1;
7782 pos_rtx = temp;
7785 /* Make POS_RTX unless we already have it and it is correct. If we don't
7786 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7787 be a CONST_INT. */
7788 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7789 pos_rtx = orig_pos_rtx;
7791 else if (pos_rtx == 0)
7792 pos_rtx = GEN_INT (pos);
7794 /* Make the required operation. See if we can use existing rtx. */
7795 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7796 extraction_mode, inner, GEN_INT (len), pos_rtx);
7797 if (! in_dest)
7798 new_rtx = gen_lowpart (mode, new_rtx);
7800 return new_rtx;
7803 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7804 can be commuted with any other operations in X. Return X without
7805 that shift if so. */
7807 static rtx
7808 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7810 enum rtx_code code = GET_CODE (x);
7811 rtx tem;
7813 switch (code)
7815 case ASHIFT:
7816 /* This is the shift itself. If it is wide enough, we will return
7817 either the value being shifted if the shift count is equal to
7818 COUNT or a shift for the difference. */
7819 if (CONST_INT_P (XEXP (x, 1))
7820 && INTVAL (XEXP (x, 1)) >= count)
7821 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7822 INTVAL (XEXP (x, 1)) - count);
7823 break;
7825 case NEG: case NOT:
7826 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7827 return simplify_gen_unary (code, mode, tem, mode);
7829 break;
7831 case PLUS: case IOR: case XOR: case AND:
7832 /* If we can safely shift this constant and we find the inner shift,
7833 make a new operation. */
7834 if (CONST_INT_P (XEXP (x, 1))
7835 && (UINTVAL (XEXP (x, 1))
7836 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7837 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7839 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7840 return simplify_gen_binary (code, mode, tem,
7841 gen_int_mode (val, mode));
7843 break;
7845 default:
7846 break;
7849 return 0;
7852 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7853 level of the expression and MODE is its mode. IN_CODE is as for
7854 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7855 that should be used when recursing on operands of *X_PTR.
7857 There are two possible actions:
7859 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7860 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7862 - Return a new rtx, which the caller returns directly. */
7864 static rtx
7865 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7866 enum rtx_code in_code,
7867 enum rtx_code *next_code_ptr)
7869 rtx x = *x_ptr;
7870 enum rtx_code next_code = *next_code_ptr;
7871 enum rtx_code code = GET_CODE (x);
7872 int mode_width = GET_MODE_PRECISION (mode);
7873 rtx rhs, lhs;
7874 rtx new_rtx = 0;
7875 int i;
7876 rtx tem;
7877 scalar_int_mode inner_mode;
7878 bool equality_comparison = false;
7880 if (in_code == EQ)
7882 equality_comparison = true;
7883 in_code = COMPARE;
7886 /* Process depending on the code of this operation. If NEW is set
7887 nonzero, it will be returned. */
7889 switch (code)
7891 case ASHIFT:
7892 /* Convert shifts by constants into multiplications if inside
7893 an address. */
7894 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7895 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7896 && INTVAL (XEXP (x, 1)) >= 0)
7898 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7899 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7901 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7902 if (GET_CODE (new_rtx) == NEG)
7904 new_rtx = XEXP (new_rtx, 0);
7905 multval = -multval;
7907 multval = trunc_int_for_mode (multval, mode);
7908 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7910 break;
7912 case PLUS:
7913 lhs = XEXP (x, 0);
7914 rhs = XEXP (x, 1);
7915 lhs = make_compound_operation (lhs, next_code);
7916 rhs = make_compound_operation (rhs, next_code);
7917 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7919 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7920 XEXP (lhs, 1));
7921 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7923 else if (GET_CODE (lhs) == MULT
7924 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7926 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7927 simplify_gen_unary (NEG, mode,
7928 XEXP (lhs, 1),
7929 mode));
7930 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7932 else
7934 SUBST (XEXP (x, 0), lhs);
7935 SUBST (XEXP (x, 1), rhs);
7937 maybe_swap_commutative_operands (x);
7938 return x;
7940 case MINUS:
7941 lhs = XEXP (x, 0);
7942 rhs = XEXP (x, 1);
7943 lhs = make_compound_operation (lhs, next_code);
7944 rhs = make_compound_operation (rhs, next_code);
7945 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7947 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7948 XEXP (rhs, 1));
7949 return simplify_gen_binary (PLUS, mode, tem, lhs);
7951 else if (GET_CODE (rhs) == MULT
7952 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7954 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7955 simplify_gen_unary (NEG, mode,
7956 XEXP (rhs, 1),
7957 mode));
7958 return simplify_gen_binary (PLUS, mode, tem, lhs);
7960 else
7962 SUBST (XEXP (x, 0), lhs);
7963 SUBST (XEXP (x, 1), rhs);
7964 return x;
7967 case AND:
7968 /* If the second operand is not a constant, we can't do anything
7969 with it. */
7970 if (!CONST_INT_P (XEXP (x, 1)))
7971 break;
7973 /* If the constant is a power of two minus one and the first operand
7974 is a logical right shift, make an extraction. */
7975 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7976 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7978 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7979 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7980 0, in_code == COMPARE);
7983 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7984 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7985 && subreg_lowpart_p (XEXP (x, 0))
7986 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
7987 &inner_mode)
7988 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7989 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7991 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
7992 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
7993 new_rtx = make_extraction (inner_mode, new_rtx, 0,
7994 XEXP (inner_x0, 1),
7995 i, 1, 0, in_code == COMPARE);
7997 /* If we narrowed the mode when dropping the subreg, then we lose. */
7998 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
7999 new_rtx = NULL;
8001 /* If that didn't give anything, see if the AND simplifies on
8002 its own. */
8003 if (!new_rtx && i >= 0)
8005 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8006 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8007 0, in_code == COMPARE);
8010 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8011 else if ((GET_CODE (XEXP (x, 0)) == XOR
8012 || GET_CODE (XEXP (x, 0)) == IOR)
8013 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8014 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8015 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8017 /* Apply the distributive law, and then try to make extractions. */
8018 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8019 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8020 XEXP (x, 1)),
8021 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8022 XEXP (x, 1)));
8023 new_rtx = make_compound_operation (new_rtx, in_code);
8026 /* If we are have (and (rotate X C) M) and C is larger than the number
8027 of bits in M, this is an extraction. */
8029 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8030 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8031 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8032 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8034 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8035 new_rtx = make_extraction (mode, new_rtx,
8036 (GET_MODE_PRECISION (mode)
8037 - INTVAL (XEXP (XEXP (x, 0), 1))),
8038 NULL_RTX, i, 1, 0, in_code == COMPARE);
8041 /* On machines without logical shifts, if the operand of the AND is
8042 a logical shift and our mask turns off all the propagated sign
8043 bits, we can replace the logical shift with an arithmetic shift. */
8044 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8045 && !have_insn_for (LSHIFTRT, mode)
8046 && have_insn_for (ASHIFTRT, mode)
8047 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8048 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8049 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8050 && mode_width <= HOST_BITS_PER_WIDE_INT)
8052 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8054 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8055 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8056 SUBST (XEXP (x, 0),
8057 gen_rtx_ASHIFTRT (mode,
8058 make_compound_operation
8059 (XEXP (XEXP (x, 0), 0), next_code),
8060 XEXP (XEXP (x, 0), 1)));
8063 /* If the constant is one less than a power of two, this might be
8064 representable by an extraction even if no shift is present.
8065 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8066 we are in a COMPARE. */
8067 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8068 new_rtx = make_extraction (mode,
8069 make_compound_operation (XEXP (x, 0),
8070 next_code),
8071 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8073 /* If we are in a comparison and this is an AND with a power of two,
8074 convert this into the appropriate bit extract. */
8075 else if (in_code == COMPARE
8076 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8077 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8078 new_rtx = make_extraction (mode,
8079 make_compound_operation (XEXP (x, 0),
8080 next_code),
8081 i, NULL_RTX, 1, 1, 0, 1);
8083 /* If the one operand is a paradoxical subreg of a register or memory and
8084 the constant (limited to the smaller mode) has only zero bits where
8085 the sub expression has known zero bits, this can be expressed as
8086 a zero_extend. */
8087 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8089 rtx sub;
8091 sub = XEXP (XEXP (x, 0), 0);
8092 machine_mode sub_mode = GET_MODE (sub);
8093 if ((REG_P (sub) || MEM_P (sub))
8094 && GET_MODE_PRECISION (sub_mode) < mode_width)
8096 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8097 unsigned HOST_WIDE_INT mask;
8099 /* original AND constant with all the known zero bits set */
8100 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8101 if ((mask & mode_mask) == mode_mask)
8103 new_rtx = make_compound_operation (sub, next_code);
8104 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8105 GET_MODE_PRECISION (sub_mode),
8106 1, 0, in_code == COMPARE);
8111 break;
8113 case LSHIFTRT:
8114 /* If the sign bit is known to be zero, replace this with an
8115 arithmetic shift. */
8116 if (have_insn_for (ASHIFTRT, mode)
8117 && ! have_insn_for (LSHIFTRT, mode)
8118 && mode_width <= HOST_BITS_PER_WIDE_INT
8119 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8121 new_rtx = gen_rtx_ASHIFTRT (mode,
8122 make_compound_operation (XEXP (x, 0),
8123 next_code),
8124 XEXP (x, 1));
8125 break;
8128 /* fall through */
8130 case ASHIFTRT:
8131 lhs = XEXP (x, 0);
8132 rhs = XEXP (x, 1);
8134 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8135 this is a SIGN_EXTRACT. */
8136 if (CONST_INT_P (rhs)
8137 && GET_CODE (lhs) == ASHIFT
8138 && CONST_INT_P (XEXP (lhs, 1))
8139 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8140 && INTVAL (XEXP (lhs, 1)) >= 0
8141 && INTVAL (rhs) < mode_width)
8143 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8144 new_rtx = make_extraction (mode, new_rtx,
8145 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8146 NULL_RTX, mode_width - INTVAL (rhs),
8147 code == LSHIFTRT, 0, in_code == COMPARE);
8148 break;
8151 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8152 If so, try to merge the shifts into a SIGN_EXTEND. We could
8153 also do this for some cases of SIGN_EXTRACT, but it doesn't
8154 seem worth the effort; the case checked for occurs on Alpha. */
8156 if (!OBJECT_P (lhs)
8157 && ! (GET_CODE (lhs) == SUBREG
8158 && (OBJECT_P (SUBREG_REG (lhs))))
8159 && CONST_INT_P (rhs)
8160 && INTVAL (rhs) >= 0
8161 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8162 && INTVAL (rhs) < mode_width
8163 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8164 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
8165 0, NULL_RTX, mode_width - INTVAL (rhs),
8166 code == LSHIFTRT, 0, in_code == COMPARE);
8168 break;
8170 case SUBREG:
8171 /* Call ourselves recursively on the inner expression. If we are
8172 narrowing the object and it has a different RTL code from
8173 what it originally did, do this SUBREG as a force_to_mode. */
8175 rtx inner = SUBREG_REG (x), simplified;
8176 enum rtx_code subreg_code = in_code;
8178 /* If the SUBREG is masking of a logical right shift,
8179 make an extraction. */
8180 if (GET_CODE (inner) == LSHIFTRT
8181 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8182 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8183 && CONST_INT_P (XEXP (inner, 1))
8184 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8185 && subreg_lowpart_p (x))
8187 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8188 int width = GET_MODE_PRECISION (inner_mode)
8189 - INTVAL (XEXP (inner, 1));
8190 if (width > mode_width)
8191 width = mode_width;
8192 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8193 width, 1, 0, in_code == COMPARE);
8194 break;
8197 /* If in_code is COMPARE, it isn't always safe to pass it through
8198 to the recursive make_compound_operation call. */
8199 if (subreg_code == COMPARE
8200 && (!subreg_lowpart_p (x)
8201 || GET_CODE (inner) == SUBREG
8202 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8203 is (const_int 0), rather than
8204 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8205 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8206 for non-equality comparisons against 0 is not equivalent
8207 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8208 || (GET_CODE (inner) == AND
8209 && CONST_INT_P (XEXP (inner, 1))
8210 && partial_subreg_p (x)
8211 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8212 >= GET_MODE_BITSIZE (mode) - 1)))
8213 subreg_code = SET;
8215 tem = make_compound_operation (inner, subreg_code);
8217 simplified
8218 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8219 if (simplified)
8220 tem = simplified;
8222 if (GET_CODE (tem) != GET_CODE (inner)
8223 && partial_subreg_p (x)
8224 && subreg_lowpart_p (x))
8226 rtx newer
8227 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8229 /* If we have something other than a SUBREG, we might have
8230 done an expansion, so rerun ourselves. */
8231 if (GET_CODE (newer) != SUBREG)
8232 newer = make_compound_operation (newer, in_code);
8234 /* force_to_mode can expand compounds. If it just re-expanded
8235 the compound, use gen_lowpart to convert to the desired
8236 mode. */
8237 if (rtx_equal_p (newer, x)
8238 /* Likewise if it re-expanded the compound only partially.
8239 This happens for SUBREG of ZERO_EXTRACT if they extract
8240 the same number of bits. */
8241 || (GET_CODE (newer) == SUBREG
8242 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8243 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8244 && GET_CODE (inner) == AND
8245 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8246 return gen_lowpart (GET_MODE (x), tem);
8248 return newer;
8251 if (simplified)
8252 return tem;
8254 break;
8256 default:
8257 break;
8260 if (new_rtx)
8261 *x_ptr = gen_lowpart (mode, new_rtx);
8262 *next_code_ptr = next_code;
8263 return NULL_RTX;
8266 /* Look at the expression rooted at X. Look for expressions
8267 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8268 Form these expressions.
8270 Return the new rtx, usually just X.
8272 Also, for machines like the VAX that don't have logical shift insns,
8273 try to convert logical to arithmetic shift operations in cases where
8274 they are equivalent. This undoes the canonicalizations to logical
8275 shifts done elsewhere.
8277 We try, as much as possible, to re-use rtl expressions to save memory.
8279 IN_CODE says what kind of expression we are processing. Normally, it is
8280 SET. In a memory address it is MEM. When processing the arguments of
8281 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8282 precisely it is an equality comparison against zero. */
8285 make_compound_operation (rtx x, enum rtx_code in_code)
8287 enum rtx_code code = GET_CODE (x);
8288 const char *fmt;
8289 int i, j;
8290 enum rtx_code next_code;
8291 rtx new_rtx, tem;
8293 /* Select the code to be used in recursive calls. Once we are inside an
8294 address, we stay there. If we have a comparison, set to COMPARE,
8295 but once inside, go back to our default of SET. */
8297 next_code = (code == MEM ? MEM
8298 : ((code == COMPARE || COMPARISON_P (x))
8299 && XEXP (x, 1) == const0_rtx) ? COMPARE
8300 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8302 scalar_int_mode mode;
8303 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8305 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8306 &next_code);
8307 if (new_rtx)
8308 return new_rtx;
8309 code = GET_CODE (x);
8312 /* Now recursively process each operand of this operation. We need to
8313 handle ZERO_EXTEND specially so that we don't lose track of the
8314 inner mode. */
8315 if (code == ZERO_EXTEND)
8317 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8318 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8319 new_rtx, GET_MODE (XEXP (x, 0)));
8320 if (tem)
8321 return tem;
8322 SUBST (XEXP (x, 0), new_rtx);
8323 return x;
8326 fmt = GET_RTX_FORMAT (code);
8327 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8328 if (fmt[i] == 'e')
8330 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8331 SUBST (XEXP (x, i), new_rtx);
8333 else if (fmt[i] == 'E')
8334 for (j = 0; j < XVECLEN (x, i); j++)
8336 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8337 SUBST (XVECEXP (x, i, j), new_rtx);
8340 maybe_swap_commutative_operands (x);
8341 return x;
8344 /* Given M see if it is a value that would select a field of bits
8345 within an item, but not the entire word. Return -1 if not.
8346 Otherwise, return the starting position of the field, where 0 is the
8347 low-order bit.
8349 *PLEN is set to the length of the field. */
8351 static int
8352 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8354 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8355 int pos = m ? ctz_hwi (m) : -1;
8356 int len = 0;
8358 if (pos >= 0)
8359 /* Now shift off the low-order zero bits and see if we have a
8360 power of two minus 1. */
8361 len = exact_log2 ((m >> pos) + 1);
8363 if (len <= 0)
8364 pos = -1;
8366 *plen = len;
8367 return pos;
8370 /* If X refers to a register that equals REG in value, replace these
8371 references with REG. */
8372 static rtx
8373 canon_reg_for_combine (rtx x, rtx reg)
8375 rtx op0, op1, op2;
8376 const char *fmt;
8377 int i;
8378 bool copied;
8380 enum rtx_code code = GET_CODE (x);
8381 switch (GET_RTX_CLASS (code))
8383 case RTX_UNARY:
8384 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8385 if (op0 != XEXP (x, 0))
8386 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8387 GET_MODE (reg));
8388 break;
8390 case RTX_BIN_ARITH:
8391 case RTX_COMM_ARITH:
8392 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8393 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8394 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8395 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8396 break;
8398 case RTX_COMPARE:
8399 case RTX_COMM_COMPARE:
8400 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8401 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8402 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8403 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8404 GET_MODE (op0), op0, op1);
8405 break;
8407 case RTX_TERNARY:
8408 case RTX_BITFIELD_OPS:
8409 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8410 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8411 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8412 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8413 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8414 GET_MODE (op0), op0, op1, op2);
8415 /* FALLTHRU */
8417 case RTX_OBJ:
8418 if (REG_P (x))
8420 if (rtx_equal_p (get_last_value (reg), x)
8421 || rtx_equal_p (reg, get_last_value (x)))
8422 return reg;
8423 else
8424 break;
8427 /* fall through */
8429 default:
8430 fmt = GET_RTX_FORMAT (code);
8431 copied = false;
8432 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8433 if (fmt[i] == 'e')
8435 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8436 if (op != XEXP (x, i))
8438 if (!copied)
8440 copied = true;
8441 x = copy_rtx (x);
8443 XEXP (x, i) = op;
8446 else if (fmt[i] == 'E')
8448 int j;
8449 for (j = 0; j < XVECLEN (x, i); j++)
8451 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8452 if (op != XVECEXP (x, i, j))
8454 if (!copied)
8456 copied = true;
8457 x = copy_rtx (x);
8459 XVECEXP (x, i, j) = op;
8464 break;
8467 return x;
8470 /* Return X converted to MODE. If the value is already truncated to
8471 MODE we can just return a subreg even though in the general case we
8472 would need an explicit truncation. */
8474 static rtx
8475 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8477 if (!CONST_INT_P (x)
8478 && partial_subreg_p (mode, GET_MODE (x))
8479 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8480 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8482 /* Bit-cast X into an integer mode. */
8483 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8484 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8485 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8486 x, GET_MODE (x));
8489 return gen_lowpart (mode, x);
8492 /* See if X can be simplified knowing that we will only refer to it in
8493 MODE and will only refer to those bits that are nonzero in MASK.
8494 If other bits are being computed or if masking operations are done
8495 that select a superset of the bits in MASK, they can sometimes be
8496 ignored.
8498 Return a possibly simplified expression, but always convert X to
8499 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8501 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8502 are all off in X. This is used when X will be complemented, by either
8503 NOT, NEG, or XOR. */
8505 static rtx
8506 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8507 int just_select)
8509 enum rtx_code code = GET_CODE (x);
8510 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8511 machine_mode op_mode;
8512 unsigned HOST_WIDE_INT nonzero;
8514 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8515 code below will do the wrong thing since the mode of such an
8516 expression is VOIDmode.
8518 Also do nothing if X is a CLOBBER; this can happen if X was
8519 the return value from a call to gen_lowpart. */
8520 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8521 return x;
8523 /* We want to perform the operation in its present mode unless we know
8524 that the operation is valid in MODE, in which case we do the operation
8525 in MODE. */
8526 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8527 && have_insn_for (code, mode))
8528 ? mode : GET_MODE (x));
8530 /* It is not valid to do a right-shift in a narrower mode
8531 than the one it came in with. */
8532 if ((code == LSHIFTRT || code == ASHIFTRT)
8533 && partial_subreg_p (mode, GET_MODE (x)))
8534 op_mode = GET_MODE (x);
8536 /* Truncate MASK to fit OP_MODE. */
8537 if (op_mode)
8538 mask &= GET_MODE_MASK (op_mode);
8540 /* Determine what bits of X are guaranteed to be (non)zero. */
8541 nonzero = nonzero_bits (x, mode);
8543 /* If none of the bits in X are needed, return a zero. */
8544 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8545 x = const0_rtx;
8547 /* If X is a CONST_INT, return a new one. Do this here since the
8548 test below will fail. */
8549 if (CONST_INT_P (x))
8551 if (SCALAR_INT_MODE_P (mode))
8552 return gen_int_mode (INTVAL (x) & mask, mode);
8553 else
8555 x = GEN_INT (INTVAL (x) & mask);
8556 return gen_lowpart_common (mode, x);
8560 /* If X is narrower than MODE and we want all the bits in X's mode, just
8561 get X in the proper mode. */
8562 if (paradoxical_subreg_p (mode, GET_MODE (x))
8563 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8564 return gen_lowpart (mode, x);
8566 /* We can ignore the effect of a SUBREG if it narrows the mode or
8567 if the constant masks to zero all the bits the mode doesn't have. */
8568 if (GET_CODE (x) == SUBREG
8569 && subreg_lowpart_p (x)
8570 && (partial_subreg_p (x)
8571 || (0 == (mask
8572 & GET_MODE_MASK (GET_MODE (x))
8573 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8574 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8576 scalar_int_mode int_mode, xmode;
8577 if (is_a <scalar_int_mode> (mode, &int_mode)
8578 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8579 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8580 integer too. */
8581 return force_int_to_mode (x, int_mode, xmode,
8582 as_a <scalar_int_mode> (op_mode),
8583 mask, just_select);
8585 return gen_lowpart_or_truncate (mode, x);
8588 /* Subroutine of force_to_mode that handles cases in which both X and
8589 the result are scalar integers. MODE is the mode of the result,
8590 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8591 is preferred for simplified versions of X. The other arguments
8592 are as for force_to_mode. */
8594 static rtx
8595 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8596 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8597 int just_select)
8599 enum rtx_code code = GET_CODE (x);
8600 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8601 unsigned HOST_WIDE_INT fuller_mask;
8602 rtx op0, op1, temp;
8604 /* When we have an arithmetic operation, or a shift whose count we
8605 do not know, we need to assume that all bits up to the highest-order
8606 bit in MASK will be needed. This is how we form such a mask. */
8607 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8608 fuller_mask = HOST_WIDE_INT_M1U;
8609 else
8610 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8611 - 1);
8613 switch (code)
8615 case CLOBBER:
8616 /* If X is a (clobber (const_int)), return it since we know we are
8617 generating something that won't match. */
8618 return x;
8620 case SIGN_EXTEND:
8621 case ZERO_EXTEND:
8622 case ZERO_EXTRACT:
8623 case SIGN_EXTRACT:
8624 x = expand_compound_operation (x);
8625 if (GET_CODE (x) != code)
8626 return force_to_mode (x, mode, mask, next_select);
8627 break;
8629 case TRUNCATE:
8630 /* Similarly for a truncate. */
8631 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8633 case AND:
8634 /* If this is an AND with a constant, convert it into an AND
8635 whose constant is the AND of that constant with MASK. If it
8636 remains an AND of MASK, delete it since it is redundant. */
8638 if (CONST_INT_P (XEXP (x, 1)))
8640 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8641 mask & INTVAL (XEXP (x, 1)));
8642 xmode = op_mode;
8644 /* If X is still an AND, see if it is an AND with a mask that
8645 is just some low-order bits. If so, and it is MASK, we don't
8646 need it. */
8648 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8649 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8650 x = XEXP (x, 0);
8652 /* If it remains an AND, try making another AND with the bits
8653 in the mode mask that aren't in MASK turned on. If the
8654 constant in the AND is wide enough, this might make a
8655 cheaper constant. */
8657 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8658 && GET_MODE_MASK (xmode) != mask
8659 && HWI_COMPUTABLE_MODE_P (xmode))
8661 unsigned HOST_WIDE_INT cval
8662 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8663 rtx y;
8665 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8666 gen_int_mode (cval, xmode));
8667 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8668 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8669 x = y;
8672 break;
8675 goto binop;
8677 case PLUS:
8678 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8679 low-order bits (as in an alignment operation) and FOO is already
8680 aligned to that boundary, mask C1 to that boundary as well.
8681 This may eliminate that PLUS and, later, the AND. */
8684 unsigned int width = GET_MODE_PRECISION (mode);
8685 unsigned HOST_WIDE_INT smask = mask;
8687 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8688 number, sign extend it. */
8690 if (width < HOST_BITS_PER_WIDE_INT
8691 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8692 smask |= HOST_WIDE_INT_M1U << width;
8694 if (CONST_INT_P (XEXP (x, 1))
8695 && pow2p_hwi (- smask)
8696 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8697 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8698 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8699 (INTVAL (XEXP (x, 1)) & smask)),
8700 mode, smask, next_select);
8703 /* fall through */
8705 case MULT:
8706 /* Substituting into the operands of a widening MULT is not likely to
8707 create RTL matching a machine insn. */
8708 if (code == MULT
8709 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8710 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8711 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8712 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8713 && REG_P (XEXP (XEXP (x, 0), 0))
8714 && REG_P (XEXP (XEXP (x, 1), 0)))
8715 return gen_lowpart_or_truncate (mode, x);
8717 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8718 most significant bit in MASK since carries from those bits will
8719 affect the bits we are interested in. */
8720 mask = fuller_mask;
8721 goto binop;
8723 case MINUS:
8724 /* If X is (minus C Y) where C's least set bit is larger than any bit
8725 in the mask, then we may replace with (neg Y). */
8726 if (CONST_INT_P (XEXP (x, 0))
8727 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8729 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8730 return force_to_mode (x, mode, mask, next_select);
8733 /* Similarly, if C contains every bit in the fuller_mask, then we may
8734 replace with (not Y). */
8735 if (CONST_INT_P (XEXP (x, 0))
8736 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8738 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8739 return force_to_mode (x, mode, mask, next_select);
8742 mask = fuller_mask;
8743 goto binop;
8745 case IOR:
8746 case XOR:
8747 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8748 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8749 operation which may be a bitfield extraction. Ensure that the
8750 constant we form is not wider than the mode of X. */
8752 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8753 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8754 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8755 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8756 && CONST_INT_P (XEXP (x, 1))
8757 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8758 + floor_log2 (INTVAL (XEXP (x, 1))))
8759 < GET_MODE_PRECISION (xmode))
8760 && (UINTVAL (XEXP (x, 1))
8761 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8763 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8764 << INTVAL (XEXP (XEXP (x, 0), 1)),
8765 xmode);
8766 temp = simplify_gen_binary (GET_CODE (x), xmode,
8767 XEXP (XEXP (x, 0), 0), temp);
8768 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8769 XEXP (XEXP (x, 0), 1));
8770 return force_to_mode (x, mode, mask, next_select);
8773 binop:
8774 /* For most binary operations, just propagate into the operation and
8775 change the mode if we have an operation of that mode. */
8777 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8778 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8780 /* If we ended up truncating both operands, truncate the result of the
8781 operation instead. */
8782 if (GET_CODE (op0) == TRUNCATE
8783 && GET_CODE (op1) == TRUNCATE)
8785 op0 = XEXP (op0, 0);
8786 op1 = XEXP (op1, 0);
8789 op0 = gen_lowpart_or_truncate (op_mode, op0);
8790 op1 = gen_lowpart_or_truncate (op_mode, op1);
8792 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8794 x = simplify_gen_binary (code, op_mode, op0, op1);
8795 xmode = op_mode;
8797 break;
8799 case ASHIFT:
8800 /* For left shifts, do the same, but just for the first operand.
8801 However, we cannot do anything with shifts where we cannot
8802 guarantee that the counts are smaller than the size of the mode
8803 because such a count will have a different meaning in a
8804 wider mode. */
8806 if (! (CONST_INT_P (XEXP (x, 1))
8807 && INTVAL (XEXP (x, 1)) >= 0
8808 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8809 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8810 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8811 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8812 break;
8814 /* If the shift count is a constant and we can do arithmetic in
8815 the mode of the shift, refine which bits we need. Otherwise, use the
8816 conservative form of the mask. */
8817 if (CONST_INT_P (XEXP (x, 1))
8818 && INTVAL (XEXP (x, 1)) >= 0
8819 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8820 && HWI_COMPUTABLE_MODE_P (op_mode))
8821 mask >>= INTVAL (XEXP (x, 1));
8822 else
8823 mask = fuller_mask;
8825 op0 = gen_lowpart_or_truncate (op_mode,
8826 force_to_mode (XEXP (x, 0), op_mode,
8827 mask, next_select));
8829 if (op_mode != xmode || op0 != XEXP (x, 0))
8831 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8832 xmode = op_mode;
8834 break;
8836 case LSHIFTRT:
8837 /* Here we can only do something if the shift count is a constant,
8838 this shift constant is valid for the host, and we can do arithmetic
8839 in OP_MODE. */
8841 if (CONST_INT_P (XEXP (x, 1))
8842 && INTVAL (XEXP (x, 1)) >= 0
8843 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8844 && HWI_COMPUTABLE_MODE_P (op_mode))
8846 rtx inner = XEXP (x, 0);
8847 unsigned HOST_WIDE_INT inner_mask;
8849 /* Select the mask of the bits we need for the shift operand. */
8850 inner_mask = mask << INTVAL (XEXP (x, 1));
8852 /* We can only change the mode of the shift if we can do arithmetic
8853 in the mode of the shift and INNER_MASK is no wider than the
8854 width of X's mode. */
8855 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8856 op_mode = xmode;
8858 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8860 if (xmode != op_mode || inner != XEXP (x, 0))
8862 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8863 xmode = op_mode;
8867 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8868 shift and AND produces only copies of the sign bit (C2 is one less
8869 than a power of two), we can do this with just a shift. */
8871 if (GET_CODE (x) == LSHIFTRT
8872 && CONST_INT_P (XEXP (x, 1))
8873 /* The shift puts one of the sign bit copies in the least significant
8874 bit. */
8875 && ((INTVAL (XEXP (x, 1))
8876 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8877 >= GET_MODE_PRECISION (xmode))
8878 && pow2p_hwi (mask + 1)
8879 /* Number of bits left after the shift must be more than the mask
8880 needs. */
8881 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8882 <= GET_MODE_PRECISION (xmode))
8883 /* Must be more sign bit copies than the mask needs. */
8884 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8885 >= exact_log2 (mask + 1)))
8886 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8887 GEN_INT (GET_MODE_PRECISION (xmode)
8888 - exact_log2 (mask + 1)));
8890 goto shiftrt;
8892 case ASHIFTRT:
8893 /* If we are just looking for the sign bit, we don't need this shift at
8894 all, even if it has a variable count. */
8895 if (val_signbit_p (xmode, mask))
8896 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8898 /* If this is a shift by a constant, get a mask that contains those bits
8899 that are not copies of the sign bit. We then have two cases: If
8900 MASK only includes those bits, this can be a logical shift, which may
8901 allow simplifications. If MASK is a single-bit field not within
8902 those bits, we are requesting a copy of the sign bit and hence can
8903 shift the sign bit to the appropriate location. */
8905 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8906 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8908 unsigned HOST_WIDE_INT nonzero;
8909 int i;
8911 /* If the considered data is wider than HOST_WIDE_INT, we can't
8912 represent a mask for all its bits in a single scalar.
8913 But we only care about the lower bits, so calculate these. */
8915 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8917 nonzero = HOST_WIDE_INT_M1U;
8919 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8920 is the number of bits a full-width mask would have set.
8921 We need only shift if these are fewer than nonzero can
8922 hold. If not, we must keep all bits set in nonzero. */
8924 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8925 < HOST_BITS_PER_WIDE_INT)
8926 nonzero >>= INTVAL (XEXP (x, 1))
8927 + HOST_BITS_PER_WIDE_INT
8928 - GET_MODE_PRECISION (xmode);
8930 else
8932 nonzero = GET_MODE_MASK (xmode);
8933 nonzero >>= INTVAL (XEXP (x, 1));
8936 if ((mask & ~nonzero) == 0)
8938 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8939 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8940 if (GET_CODE (x) != ASHIFTRT)
8941 return force_to_mode (x, mode, mask, next_select);
8944 else if ((i = exact_log2 (mask)) >= 0)
8946 x = simplify_shift_const
8947 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8948 GET_MODE_PRECISION (xmode) - 1 - i);
8950 if (GET_CODE (x) != ASHIFTRT)
8951 return force_to_mode (x, mode, mask, next_select);
8955 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8956 even if the shift count isn't a constant. */
8957 if (mask == 1)
8958 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
8960 shiftrt:
8962 /* If this is a zero- or sign-extension operation that just affects bits
8963 we don't care about, remove it. Be sure the call above returned
8964 something that is still a shift. */
8966 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8967 && CONST_INT_P (XEXP (x, 1))
8968 && INTVAL (XEXP (x, 1)) >= 0
8969 && (INTVAL (XEXP (x, 1))
8970 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
8971 && GET_CODE (XEXP (x, 0)) == ASHIFT
8972 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8973 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8974 next_select);
8976 break;
8978 case ROTATE:
8979 case ROTATERT:
8980 /* If the shift count is constant and we can do computations
8981 in the mode of X, compute where the bits we care about are.
8982 Otherwise, we can't do anything. Don't change the mode of
8983 the shift or propagate MODE into the shift, though. */
8984 if (CONST_INT_P (XEXP (x, 1))
8985 && INTVAL (XEXP (x, 1)) >= 0)
8987 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8988 xmode, gen_int_mode (mask, xmode),
8989 XEXP (x, 1));
8990 if (temp && CONST_INT_P (temp))
8991 x = simplify_gen_binary (code, xmode,
8992 force_to_mode (XEXP (x, 0), xmode,
8993 INTVAL (temp), next_select),
8994 XEXP (x, 1));
8996 break;
8998 case NEG:
8999 /* If we just want the low-order bit, the NEG isn't needed since it
9000 won't change the low-order bit. */
9001 if (mask == 1)
9002 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9004 /* We need any bits less significant than the most significant bit in
9005 MASK since carries from those bits will affect the bits we are
9006 interested in. */
9007 mask = fuller_mask;
9008 goto unop;
9010 case NOT:
9011 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9012 same as the XOR case above. Ensure that the constant we form is not
9013 wider than the mode of X. */
9015 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9016 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9017 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9018 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9019 < GET_MODE_PRECISION (xmode))
9020 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9022 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9023 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9024 x = simplify_gen_binary (LSHIFTRT, xmode,
9025 temp, XEXP (XEXP (x, 0), 1));
9027 return force_to_mode (x, mode, mask, next_select);
9030 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9031 use the full mask inside the NOT. */
9032 mask = fuller_mask;
9034 unop:
9035 op0 = gen_lowpart_or_truncate (op_mode,
9036 force_to_mode (XEXP (x, 0), mode, mask,
9037 next_select));
9038 if (op_mode != xmode || op0 != XEXP (x, 0))
9040 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9041 xmode = op_mode;
9043 break;
9045 case NE:
9046 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9047 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9048 which is equal to STORE_FLAG_VALUE. */
9049 if ((mask & ~STORE_FLAG_VALUE) == 0
9050 && XEXP (x, 1) == const0_rtx
9051 && GET_MODE (XEXP (x, 0)) == mode
9052 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9053 && (nonzero_bits (XEXP (x, 0), mode)
9054 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9055 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9057 break;
9059 case IF_THEN_ELSE:
9060 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9061 written in a narrower mode. We play it safe and do not do so. */
9063 op0 = gen_lowpart_or_truncate (xmode,
9064 force_to_mode (XEXP (x, 1), mode,
9065 mask, next_select));
9066 op1 = gen_lowpart_or_truncate (xmode,
9067 force_to_mode (XEXP (x, 2), mode,
9068 mask, next_select));
9069 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9070 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9071 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9072 op0, op1);
9073 break;
9075 default:
9076 break;
9079 /* Ensure we return a value of the proper mode. */
9080 return gen_lowpart_or_truncate (mode, x);
9083 /* Return nonzero if X is an expression that has one of two values depending on
9084 whether some other value is zero or nonzero. In that case, we return the
9085 value that is being tested, *PTRUE is set to the value if the rtx being
9086 returned has a nonzero value, and *PFALSE is set to the other alternative.
9088 If we return zero, we set *PTRUE and *PFALSE to X. */
9090 static rtx
9091 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9093 machine_mode mode = GET_MODE (x);
9094 enum rtx_code code = GET_CODE (x);
9095 rtx cond0, cond1, true0, true1, false0, false1;
9096 unsigned HOST_WIDE_INT nz;
9097 scalar_int_mode int_mode;
9099 /* If we are comparing a value against zero, we are done. */
9100 if ((code == NE || code == EQ)
9101 && XEXP (x, 1) == const0_rtx)
9103 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9104 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9105 return XEXP (x, 0);
9108 /* If this is a unary operation whose operand has one of two values, apply
9109 our opcode to compute those values. */
9110 else if (UNARY_P (x)
9111 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9113 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9114 *pfalse = simplify_gen_unary (code, mode, false0,
9115 GET_MODE (XEXP (x, 0)));
9116 return cond0;
9119 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9120 make can't possibly match and would suppress other optimizations. */
9121 else if (code == COMPARE)
9124 /* If this is a binary operation, see if either side has only one of two
9125 values. If either one does or if both do and they are conditional on
9126 the same value, compute the new true and false values. */
9127 else if (BINARY_P (x))
9129 rtx op0 = XEXP (x, 0);
9130 rtx op1 = XEXP (x, 1);
9131 cond0 = if_then_else_cond (op0, &true0, &false0);
9132 cond1 = if_then_else_cond (op1, &true1, &false1);
9134 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9135 && (REG_P (op0) || REG_P (op1)))
9137 /* Try to enable a simplification by undoing work done by
9138 if_then_else_cond if it converted a REG into something more
9139 complex. */
9140 if (REG_P (op0))
9142 cond0 = 0;
9143 true0 = false0 = op0;
9145 else
9147 cond1 = 0;
9148 true1 = false1 = op1;
9152 if ((cond0 != 0 || cond1 != 0)
9153 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9155 /* If if_then_else_cond returned zero, then true/false are the
9156 same rtl. We must copy one of them to prevent invalid rtl
9157 sharing. */
9158 if (cond0 == 0)
9159 true0 = copy_rtx (true0);
9160 else if (cond1 == 0)
9161 true1 = copy_rtx (true1);
9163 if (COMPARISON_P (x))
9165 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9166 true0, true1);
9167 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9168 false0, false1);
9170 else
9172 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9173 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9176 return cond0 ? cond0 : cond1;
9179 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9180 operands is zero when the other is nonzero, and vice-versa,
9181 and STORE_FLAG_VALUE is 1 or -1. */
9183 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9184 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9185 || code == UMAX)
9186 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9188 rtx op0 = XEXP (XEXP (x, 0), 1);
9189 rtx op1 = XEXP (XEXP (x, 1), 1);
9191 cond0 = XEXP (XEXP (x, 0), 0);
9192 cond1 = XEXP (XEXP (x, 1), 0);
9194 if (COMPARISON_P (cond0)
9195 && COMPARISON_P (cond1)
9196 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9197 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9198 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9199 || ((swap_condition (GET_CODE (cond0))
9200 == reversed_comparison_code (cond1, NULL))
9201 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9202 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9203 && ! side_effects_p (x))
9205 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9206 *pfalse = simplify_gen_binary (MULT, mode,
9207 (code == MINUS
9208 ? simplify_gen_unary (NEG, mode,
9209 op1, mode)
9210 : op1),
9211 const_true_rtx);
9212 return cond0;
9216 /* Similarly for MULT, AND and UMIN, except that for these the result
9217 is always zero. */
9218 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9219 && (code == MULT || code == AND || code == UMIN)
9220 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9222 cond0 = XEXP (XEXP (x, 0), 0);
9223 cond1 = XEXP (XEXP (x, 1), 0);
9225 if (COMPARISON_P (cond0)
9226 && COMPARISON_P (cond1)
9227 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9228 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9229 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9230 || ((swap_condition (GET_CODE (cond0))
9231 == reversed_comparison_code (cond1, NULL))
9232 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9233 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9234 && ! side_effects_p (x))
9236 *ptrue = *pfalse = const0_rtx;
9237 return cond0;
9242 else if (code == IF_THEN_ELSE)
9244 /* If we have IF_THEN_ELSE already, extract the condition and
9245 canonicalize it if it is NE or EQ. */
9246 cond0 = XEXP (x, 0);
9247 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9248 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9249 return XEXP (cond0, 0);
9250 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9252 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9253 return XEXP (cond0, 0);
9255 else
9256 return cond0;
9259 /* If X is a SUBREG, we can narrow both the true and false values
9260 if the inner expression, if there is a condition. */
9261 else if (code == SUBREG
9262 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9263 &true0, &false0)))
9265 true0 = simplify_gen_subreg (mode, true0,
9266 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9267 false0 = simplify_gen_subreg (mode, false0,
9268 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9269 if (true0 && false0)
9271 *ptrue = true0;
9272 *pfalse = false0;
9273 return cond0;
9277 /* If X is a constant, this isn't special and will cause confusions
9278 if we treat it as such. Likewise if it is equivalent to a constant. */
9279 else if (CONSTANT_P (x)
9280 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9283 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9284 will be least confusing to the rest of the compiler. */
9285 else if (mode == BImode)
9287 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9288 return x;
9291 /* If X is known to be either 0 or -1, those are the true and
9292 false values when testing X. */
9293 else if (x == constm1_rtx || x == const0_rtx
9294 || (is_a <scalar_int_mode> (mode, &int_mode)
9295 && (num_sign_bit_copies (x, int_mode)
9296 == GET_MODE_PRECISION (int_mode))))
9298 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9299 return x;
9302 /* Likewise for 0 or a single bit. */
9303 else if (HWI_COMPUTABLE_MODE_P (mode)
9304 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9306 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9307 return x;
9310 /* Otherwise fail; show no condition with true and false values the same. */
9311 *ptrue = *pfalse = x;
9312 return 0;
9315 /* Return the value of expression X given the fact that condition COND
9316 is known to be true when applied to REG as its first operand and VAL
9317 as its second. X is known to not be shared and so can be modified in
9318 place.
9320 We only handle the simplest cases, and specifically those cases that
9321 arise with IF_THEN_ELSE expressions. */
9323 static rtx
9324 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9326 enum rtx_code code = GET_CODE (x);
9327 const char *fmt;
9328 int i, j;
9330 if (side_effects_p (x))
9331 return x;
9333 /* If either operand of the condition is a floating point value,
9334 then we have to avoid collapsing an EQ comparison. */
9335 if (cond == EQ
9336 && rtx_equal_p (x, reg)
9337 && ! FLOAT_MODE_P (GET_MODE (x))
9338 && ! FLOAT_MODE_P (GET_MODE (val)))
9339 return val;
9341 if (cond == UNEQ && rtx_equal_p (x, reg))
9342 return val;
9344 /* If X is (abs REG) and we know something about REG's relationship
9345 with zero, we may be able to simplify this. */
9347 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9348 switch (cond)
9350 case GE: case GT: case EQ:
9351 return XEXP (x, 0);
9352 case LT: case LE:
9353 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9354 XEXP (x, 0),
9355 GET_MODE (XEXP (x, 0)));
9356 default:
9357 break;
9360 /* The only other cases we handle are MIN, MAX, and comparisons if the
9361 operands are the same as REG and VAL. */
9363 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9365 if (rtx_equal_p (XEXP (x, 0), val))
9367 std::swap (val, reg);
9368 cond = swap_condition (cond);
9371 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9373 if (COMPARISON_P (x))
9375 if (comparison_dominates_p (cond, code))
9376 return const_true_rtx;
9378 code = reversed_comparison_code (x, NULL);
9379 if (code != UNKNOWN
9380 && comparison_dominates_p (cond, code))
9381 return const0_rtx;
9382 else
9383 return x;
9385 else if (code == SMAX || code == SMIN
9386 || code == UMIN || code == UMAX)
9388 int unsignedp = (code == UMIN || code == UMAX);
9390 /* Do not reverse the condition when it is NE or EQ.
9391 This is because we cannot conclude anything about
9392 the value of 'SMAX (x, y)' when x is not equal to y,
9393 but we can when x equals y. */
9394 if ((code == SMAX || code == UMAX)
9395 && ! (cond == EQ || cond == NE))
9396 cond = reverse_condition (cond);
9398 switch (cond)
9400 case GE: case GT:
9401 return unsignedp ? x : XEXP (x, 1);
9402 case LE: case LT:
9403 return unsignedp ? x : XEXP (x, 0);
9404 case GEU: case GTU:
9405 return unsignedp ? XEXP (x, 1) : x;
9406 case LEU: case LTU:
9407 return unsignedp ? XEXP (x, 0) : x;
9408 default:
9409 break;
9414 else if (code == SUBREG)
9416 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9417 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9419 if (SUBREG_REG (x) != r)
9421 /* We must simplify subreg here, before we lose track of the
9422 original inner_mode. */
9423 new_rtx = simplify_subreg (GET_MODE (x), r,
9424 inner_mode, SUBREG_BYTE (x));
9425 if (new_rtx)
9426 return new_rtx;
9427 else
9428 SUBST (SUBREG_REG (x), r);
9431 return x;
9433 /* We don't have to handle SIGN_EXTEND here, because even in the
9434 case of replacing something with a modeless CONST_INT, a
9435 CONST_INT is already (supposed to be) a valid sign extension for
9436 its narrower mode, which implies it's already properly
9437 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9438 story is different. */
9439 else if (code == ZERO_EXTEND)
9441 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9442 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9444 if (XEXP (x, 0) != r)
9446 /* We must simplify the zero_extend here, before we lose
9447 track of the original inner_mode. */
9448 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9449 r, inner_mode);
9450 if (new_rtx)
9451 return new_rtx;
9452 else
9453 SUBST (XEXP (x, 0), r);
9456 return x;
9459 fmt = GET_RTX_FORMAT (code);
9460 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9462 if (fmt[i] == 'e')
9463 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9464 else if (fmt[i] == 'E')
9465 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9466 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9467 cond, reg, val));
9470 return x;
9473 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9474 assignment as a field assignment. */
9476 static int
9477 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9479 if (widen_x && GET_MODE (x) != GET_MODE (y))
9481 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9482 return 0;
9483 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9484 return 0;
9485 /* For big endian, adjust the memory offset. */
9486 if (BYTES_BIG_ENDIAN)
9487 x = adjust_address_nv (x, GET_MODE (y),
9488 -subreg_lowpart_offset (GET_MODE (x),
9489 GET_MODE (y)));
9490 else
9491 x = adjust_address_nv (x, GET_MODE (y), 0);
9494 if (x == y || rtx_equal_p (x, y))
9495 return 1;
9497 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9498 return 0;
9500 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9501 Note that all SUBREGs of MEM are paradoxical; otherwise they
9502 would have been rewritten. */
9503 if (MEM_P (x) && GET_CODE (y) == SUBREG
9504 && MEM_P (SUBREG_REG (y))
9505 && rtx_equal_p (SUBREG_REG (y),
9506 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9507 return 1;
9509 if (MEM_P (y) && GET_CODE (x) == SUBREG
9510 && MEM_P (SUBREG_REG (x))
9511 && rtx_equal_p (SUBREG_REG (x),
9512 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9513 return 1;
9515 /* We used to see if get_last_value of X and Y were the same but that's
9516 not correct. In one direction, we'll cause the assignment to have
9517 the wrong destination and in the case, we'll import a register into this
9518 insn that might have already have been dead. So fail if none of the
9519 above cases are true. */
9520 return 0;
9523 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9524 Return that assignment if so.
9526 We only handle the most common cases. */
9528 static rtx
9529 make_field_assignment (rtx x)
9531 rtx dest = SET_DEST (x);
9532 rtx src = SET_SRC (x);
9533 rtx assign;
9534 rtx rhs, lhs;
9535 HOST_WIDE_INT c1;
9536 HOST_WIDE_INT pos;
9537 unsigned HOST_WIDE_INT len;
9538 rtx other;
9540 /* All the rules in this function are specific to scalar integers. */
9541 scalar_int_mode mode;
9542 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9543 return x;
9545 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9546 a clear of a one-bit field. We will have changed it to
9547 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9548 for a SUBREG. */
9550 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9551 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9552 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9553 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9555 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9556 1, 1, 1, 0);
9557 if (assign != 0)
9558 return gen_rtx_SET (assign, const0_rtx);
9559 return x;
9562 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9563 && subreg_lowpart_p (XEXP (src, 0))
9564 && partial_subreg_p (XEXP (src, 0))
9565 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9566 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9567 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9568 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9570 assign = make_extraction (VOIDmode, dest, 0,
9571 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9572 1, 1, 1, 0);
9573 if (assign != 0)
9574 return gen_rtx_SET (assign, const0_rtx);
9575 return x;
9578 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9579 one-bit field. */
9580 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9581 && XEXP (XEXP (src, 0), 0) == const1_rtx
9582 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9584 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9585 1, 1, 1, 0);
9586 if (assign != 0)
9587 return gen_rtx_SET (assign, const1_rtx);
9588 return x;
9591 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9592 SRC is an AND with all bits of that field set, then we can discard
9593 the AND. */
9594 if (GET_CODE (dest) == ZERO_EXTRACT
9595 && CONST_INT_P (XEXP (dest, 1))
9596 && GET_CODE (src) == AND
9597 && CONST_INT_P (XEXP (src, 1)))
9599 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9600 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9601 unsigned HOST_WIDE_INT ze_mask;
9603 if (width >= HOST_BITS_PER_WIDE_INT)
9604 ze_mask = -1;
9605 else
9606 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9608 /* Complete overlap. We can remove the source AND. */
9609 if ((and_mask & ze_mask) == ze_mask)
9610 return gen_rtx_SET (dest, XEXP (src, 0));
9612 /* Partial overlap. We can reduce the source AND. */
9613 if ((and_mask & ze_mask) != and_mask)
9615 src = gen_rtx_AND (mode, XEXP (src, 0),
9616 gen_int_mode (and_mask & ze_mask, mode));
9617 return gen_rtx_SET (dest, src);
9621 /* The other case we handle is assignments into a constant-position
9622 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9623 a mask that has all one bits except for a group of zero bits and
9624 OTHER is known to have zeros where C1 has ones, this is such an
9625 assignment. Compute the position and length from C1. Shift OTHER
9626 to the appropriate position, force it to the required mode, and
9627 make the extraction. Check for the AND in both operands. */
9629 /* One or more SUBREGs might obscure the constant-position field
9630 assignment. The first one we are likely to encounter is an outer
9631 narrowing SUBREG, which we can just strip for the purposes of
9632 identifying the constant-field assignment. */
9633 scalar_int_mode src_mode = mode;
9634 if (GET_CODE (src) == SUBREG
9635 && subreg_lowpart_p (src)
9636 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9637 src = SUBREG_REG (src);
9639 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9640 return x;
9642 rhs = expand_compound_operation (XEXP (src, 0));
9643 lhs = expand_compound_operation (XEXP (src, 1));
9645 if (GET_CODE (rhs) == AND
9646 && CONST_INT_P (XEXP (rhs, 1))
9647 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9648 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9649 /* The second SUBREG that might get in the way is a paradoxical
9650 SUBREG around the first operand of the AND. We want to
9651 pretend the operand is as wide as the destination here. We
9652 do this by adjusting the MEM to wider mode for the sole
9653 purpose of the call to rtx_equal_for_field_assignment_p. Also
9654 note this trick only works for MEMs. */
9655 else if (GET_CODE (rhs) == AND
9656 && paradoxical_subreg_p (XEXP (rhs, 0))
9657 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9658 && CONST_INT_P (XEXP (rhs, 1))
9659 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9660 dest, true))
9661 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9662 else if (GET_CODE (lhs) == AND
9663 && CONST_INT_P (XEXP (lhs, 1))
9664 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9665 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9666 /* The second SUBREG that might get in the way is a paradoxical
9667 SUBREG around the first operand of the AND. We want to
9668 pretend the operand is as wide as the destination here. We
9669 do this by adjusting the MEM to wider mode for the sole
9670 purpose of the call to rtx_equal_for_field_assignment_p. Also
9671 note this trick only works for MEMs. */
9672 else if (GET_CODE (lhs) == AND
9673 && paradoxical_subreg_p (XEXP (lhs, 0))
9674 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9675 && CONST_INT_P (XEXP (lhs, 1))
9676 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9677 dest, true))
9678 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9679 else
9680 return x;
9682 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9683 if (pos < 0
9684 || pos + len > GET_MODE_PRECISION (mode)
9685 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9686 || (c1 & nonzero_bits (other, mode)) != 0)
9687 return x;
9689 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9690 if (assign == 0)
9691 return x;
9693 /* The mode to use for the source is the mode of the assignment, or of
9694 what is inside a possible STRICT_LOW_PART. */
9695 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9696 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9698 /* Shift OTHER right POS places and make it the source, restricting it
9699 to the proper length and mode. */
9701 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9702 src_mode, other, pos),
9703 dest);
9704 src = force_to_mode (src, new_mode,
9705 len >= HOST_BITS_PER_WIDE_INT
9706 ? HOST_WIDE_INT_M1U
9707 : (HOST_WIDE_INT_1U << len) - 1,
9710 /* If SRC is masked by an AND that does not make a difference in
9711 the value being stored, strip it. */
9712 if (GET_CODE (assign) == ZERO_EXTRACT
9713 && CONST_INT_P (XEXP (assign, 1))
9714 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9715 && GET_CODE (src) == AND
9716 && CONST_INT_P (XEXP (src, 1))
9717 && UINTVAL (XEXP (src, 1))
9718 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9719 src = XEXP (src, 0);
9721 return gen_rtx_SET (assign, src);
9724 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9725 if so. */
9727 static rtx
9728 apply_distributive_law (rtx x)
9730 enum rtx_code code = GET_CODE (x);
9731 enum rtx_code inner_code;
9732 rtx lhs, rhs, other;
9733 rtx tem;
9735 /* Distributivity is not true for floating point as it can change the
9736 value. So we don't do it unless -funsafe-math-optimizations. */
9737 if (FLOAT_MODE_P (GET_MODE (x))
9738 && ! flag_unsafe_math_optimizations)
9739 return x;
9741 /* The outer operation can only be one of the following: */
9742 if (code != IOR && code != AND && code != XOR
9743 && code != PLUS && code != MINUS)
9744 return x;
9746 lhs = XEXP (x, 0);
9747 rhs = XEXP (x, 1);
9749 /* If either operand is a primitive we can't do anything, so get out
9750 fast. */
9751 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9752 return x;
9754 lhs = expand_compound_operation (lhs);
9755 rhs = expand_compound_operation (rhs);
9756 inner_code = GET_CODE (lhs);
9757 if (inner_code != GET_CODE (rhs))
9758 return x;
9760 /* See if the inner and outer operations distribute. */
9761 switch (inner_code)
9763 case LSHIFTRT:
9764 case ASHIFTRT:
9765 case AND:
9766 case IOR:
9767 /* These all distribute except over PLUS. */
9768 if (code == PLUS || code == MINUS)
9769 return x;
9770 break;
9772 case MULT:
9773 if (code != PLUS && code != MINUS)
9774 return x;
9775 break;
9777 case ASHIFT:
9778 /* This is also a multiply, so it distributes over everything. */
9779 break;
9781 /* This used to handle SUBREG, but this turned out to be counter-
9782 productive, since (subreg (op ...)) usually is not handled by
9783 insn patterns, and this "optimization" therefore transformed
9784 recognizable patterns into unrecognizable ones. Therefore the
9785 SUBREG case was removed from here.
9787 It is possible that distributing SUBREG over arithmetic operations
9788 leads to an intermediate result than can then be optimized further,
9789 e.g. by moving the outer SUBREG to the other side of a SET as done
9790 in simplify_set. This seems to have been the original intent of
9791 handling SUBREGs here.
9793 However, with current GCC this does not appear to actually happen,
9794 at least on major platforms. If some case is found where removing
9795 the SUBREG case here prevents follow-on optimizations, distributing
9796 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9798 default:
9799 return x;
9802 /* Set LHS and RHS to the inner operands (A and B in the example
9803 above) and set OTHER to the common operand (C in the example).
9804 There is only one way to do this unless the inner operation is
9805 commutative. */
9806 if (COMMUTATIVE_ARITH_P (lhs)
9807 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9808 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9809 else if (COMMUTATIVE_ARITH_P (lhs)
9810 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9811 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9812 else if (COMMUTATIVE_ARITH_P (lhs)
9813 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9814 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9815 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9816 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9817 else
9818 return x;
9820 /* Form the new inner operation, seeing if it simplifies first. */
9821 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9823 /* There is one exception to the general way of distributing:
9824 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9825 if (code == XOR && inner_code == IOR)
9827 inner_code = AND;
9828 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9831 /* We may be able to continuing distributing the result, so call
9832 ourselves recursively on the inner operation before forming the
9833 outer operation, which we return. */
9834 return simplify_gen_binary (inner_code, GET_MODE (x),
9835 apply_distributive_law (tem), other);
9838 /* See if X is of the form (* (+ A B) C), and if so convert to
9839 (+ (* A C) (* B C)) and try to simplify.
9841 Most of the time, this results in no change. However, if some of
9842 the operands are the same or inverses of each other, simplifications
9843 will result.
9845 For example, (and (ior A B) (not B)) can occur as the result of
9846 expanding a bit field assignment. When we apply the distributive
9847 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9848 which then simplifies to (and (A (not B))).
9850 Note that no checks happen on the validity of applying the inverse
9851 distributive law. This is pointless since we can do it in the
9852 few places where this routine is called.
9854 N is the index of the term that is decomposed (the arithmetic operation,
9855 i.e. (+ A B) in the first example above). !N is the index of the term that
9856 is distributed, i.e. of C in the first example above. */
9857 static rtx
9858 distribute_and_simplify_rtx (rtx x, int n)
9860 machine_mode mode;
9861 enum rtx_code outer_code, inner_code;
9862 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9864 /* Distributivity is not true for floating point as it can change the
9865 value. So we don't do it unless -funsafe-math-optimizations. */
9866 if (FLOAT_MODE_P (GET_MODE (x))
9867 && ! flag_unsafe_math_optimizations)
9868 return NULL_RTX;
9870 decomposed = XEXP (x, n);
9871 if (!ARITHMETIC_P (decomposed))
9872 return NULL_RTX;
9874 mode = GET_MODE (x);
9875 outer_code = GET_CODE (x);
9876 distributed = XEXP (x, !n);
9878 inner_code = GET_CODE (decomposed);
9879 inner_op0 = XEXP (decomposed, 0);
9880 inner_op1 = XEXP (decomposed, 1);
9882 /* Special case (and (xor B C) (not A)), which is equivalent to
9883 (xor (ior A B) (ior A C)) */
9884 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9886 distributed = XEXP (distributed, 0);
9887 outer_code = IOR;
9890 if (n == 0)
9892 /* Distribute the second term. */
9893 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9894 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9896 else
9898 /* Distribute the first term. */
9899 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9900 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9903 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9904 new_op0, new_op1));
9905 if (GET_CODE (tmp) != outer_code
9906 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9907 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9908 return tmp;
9910 return NULL_RTX;
9913 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9914 in MODE. Return an equivalent form, if different from (and VAROP
9915 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9917 static rtx
9918 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9919 unsigned HOST_WIDE_INT constop)
9921 unsigned HOST_WIDE_INT nonzero;
9922 unsigned HOST_WIDE_INT orig_constop;
9923 rtx orig_varop;
9924 int i;
9926 orig_varop = varop;
9927 orig_constop = constop;
9928 if (GET_CODE (varop) == CLOBBER)
9929 return NULL_RTX;
9931 /* Simplify VAROP knowing that we will be only looking at some of the
9932 bits in it.
9934 Note by passing in CONSTOP, we guarantee that the bits not set in
9935 CONSTOP are not significant and will never be examined. We must
9936 ensure that is the case by explicitly masking out those bits
9937 before returning. */
9938 varop = force_to_mode (varop, mode, constop, 0);
9940 /* If VAROP is a CLOBBER, we will fail so return it. */
9941 if (GET_CODE (varop) == CLOBBER)
9942 return varop;
9944 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9945 to VAROP and return the new constant. */
9946 if (CONST_INT_P (varop))
9947 return gen_int_mode (INTVAL (varop) & constop, mode);
9949 /* See what bits may be nonzero in VAROP. Unlike the general case of
9950 a call to nonzero_bits, here we don't care about bits outside
9951 MODE. */
9953 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9955 /* Turn off all bits in the constant that are known to already be zero.
9956 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9957 which is tested below. */
9959 constop &= nonzero;
9961 /* If we don't have any bits left, return zero. */
9962 if (constop == 0)
9963 return const0_rtx;
9965 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9966 a power of two, we can replace this with an ASHIFT. */
9967 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9968 && (i = exact_log2 (constop)) >= 0)
9969 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9971 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9972 or XOR, then try to apply the distributive law. This may eliminate
9973 operations if either branch can be simplified because of the AND.
9974 It may also make some cases more complex, but those cases probably
9975 won't match a pattern either with or without this. */
9977 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9979 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
9980 return
9981 gen_lowpart
9982 (mode,
9983 apply_distributive_law
9984 (simplify_gen_binary (GET_CODE (varop), varop_mode,
9985 simplify_and_const_int (NULL_RTX, varop_mode,
9986 XEXP (varop, 0),
9987 constop),
9988 simplify_and_const_int (NULL_RTX, varop_mode,
9989 XEXP (varop, 1),
9990 constop))));
9993 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9994 the AND and see if one of the operands simplifies to zero. If so, we
9995 may eliminate it. */
9997 if (GET_CODE (varop) == PLUS
9998 && pow2p_hwi (constop + 1))
10000 rtx o0, o1;
10002 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10003 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10004 if (o0 == const0_rtx)
10005 return o1;
10006 if (o1 == const0_rtx)
10007 return o0;
10010 /* Make a SUBREG if necessary. If we can't make it, fail. */
10011 varop = gen_lowpart (mode, varop);
10012 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10013 return NULL_RTX;
10015 /* If we are only masking insignificant bits, return VAROP. */
10016 if (constop == nonzero)
10017 return varop;
10019 if (varop == orig_varop && constop == orig_constop)
10020 return NULL_RTX;
10022 /* Otherwise, return an AND. */
10023 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10027 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10028 in MODE.
10030 Return an equivalent form, if different from X. Otherwise, return X. If
10031 X is zero, we are to always construct the equivalent form. */
10033 static rtx
10034 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10035 unsigned HOST_WIDE_INT constop)
10037 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10038 if (tem)
10039 return tem;
10041 if (!x)
10042 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10043 gen_int_mode (constop, mode));
10044 if (GET_MODE (x) != mode)
10045 x = gen_lowpart (mode, x);
10046 return x;
10049 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10050 We don't care about bits outside of those defined in MODE.
10052 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10053 a shift, AND, or zero_extract, we can do better. */
10055 static rtx
10056 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10057 scalar_int_mode mode,
10058 unsigned HOST_WIDE_INT *nonzero)
10060 rtx tem;
10061 reg_stat_type *rsp;
10063 /* If X is a register whose nonzero bits value is current, use it.
10064 Otherwise, if X is a register whose value we can find, use that
10065 value. Otherwise, use the previously-computed global nonzero bits
10066 for this register. */
10068 rsp = &reg_stat[REGNO (x)];
10069 if (rsp->last_set_value != 0
10070 && (rsp->last_set_mode == mode
10071 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10072 && GET_MODE_CLASS (mode) == MODE_INT))
10073 && ((rsp->last_set_label >= label_tick_ebb_start
10074 && rsp->last_set_label < label_tick)
10075 || (rsp->last_set_label == label_tick
10076 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10077 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10078 && REGNO (x) < reg_n_sets_max
10079 && REG_N_SETS (REGNO (x)) == 1
10080 && !REGNO_REG_SET_P
10081 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10082 REGNO (x)))))
10084 /* Note that, even if the precision of last_set_mode is lower than that
10085 of mode, record_value_for_reg invoked nonzero_bits on the register
10086 with nonzero_bits_mode (because last_set_mode is necessarily integral
10087 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10088 are all valid, hence in mode too since nonzero_bits_mode is defined
10089 to the largest HWI_COMPUTABLE_MODE_P mode. */
10090 *nonzero &= rsp->last_set_nonzero_bits;
10091 return NULL;
10094 tem = get_last_value (x);
10095 if (tem)
10097 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10098 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10100 return tem;
10103 if (nonzero_sign_valid && rsp->nonzero_bits)
10105 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10107 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10108 /* We don't know anything about the upper bits. */
10109 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10111 *nonzero &= mask;
10114 return NULL;
10117 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10118 end of X that are known to be equal to the sign bit. X will be used
10119 in mode MODE; the returned value will always be between 1 and the
10120 number of bits in MODE. */
10122 static rtx
10123 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10124 scalar_int_mode mode,
10125 unsigned int *result)
10127 rtx tem;
10128 reg_stat_type *rsp;
10130 rsp = &reg_stat[REGNO (x)];
10131 if (rsp->last_set_value != 0
10132 && rsp->last_set_mode == mode
10133 && ((rsp->last_set_label >= label_tick_ebb_start
10134 && rsp->last_set_label < label_tick)
10135 || (rsp->last_set_label == label_tick
10136 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10137 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10138 && REGNO (x) < reg_n_sets_max
10139 && REG_N_SETS (REGNO (x)) == 1
10140 && !REGNO_REG_SET_P
10141 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10142 REGNO (x)))))
10144 *result = rsp->last_set_sign_bit_copies;
10145 return NULL;
10148 tem = get_last_value (x);
10149 if (tem != 0)
10150 return tem;
10152 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10153 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10154 *result = rsp->sign_bit_copies;
10156 return NULL;
10159 /* Return the number of "extended" bits there are in X, when interpreted
10160 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10161 unsigned quantities, this is the number of high-order zero bits.
10162 For signed quantities, this is the number of copies of the sign bit
10163 minus 1. In both case, this function returns the number of "spare"
10164 bits. For example, if two quantities for which this function returns
10165 at least 1 are added, the addition is known not to overflow.
10167 This function will always return 0 unless called during combine, which
10168 implies that it must be called from a define_split. */
10170 unsigned int
10171 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10173 if (nonzero_sign_valid == 0)
10174 return 0;
10176 scalar_int_mode int_mode;
10177 return (unsignedp
10178 ? (is_a <scalar_int_mode> (mode, &int_mode)
10179 && HWI_COMPUTABLE_MODE_P (int_mode)
10180 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10181 - floor_log2 (nonzero_bits (x, int_mode)))
10182 : 0)
10183 : num_sign_bit_copies (x, mode) - 1);
10186 /* This function is called from `simplify_shift_const' to merge two
10187 outer operations. Specifically, we have already found that we need
10188 to perform operation *POP0 with constant *PCONST0 at the outermost
10189 position. We would now like to also perform OP1 with constant CONST1
10190 (with *POP0 being done last).
10192 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10193 the resulting operation. *PCOMP_P is set to 1 if we would need to
10194 complement the innermost operand, otherwise it is unchanged.
10196 MODE is the mode in which the operation will be done. No bits outside
10197 the width of this mode matter. It is assumed that the width of this mode
10198 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10200 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10201 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10202 result is simply *PCONST0.
10204 If the resulting operation cannot be expressed as one operation, we
10205 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10207 static int
10208 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10210 enum rtx_code op0 = *pop0;
10211 HOST_WIDE_INT const0 = *pconst0;
10213 const0 &= GET_MODE_MASK (mode);
10214 const1 &= GET_MODE_MASK (mode);
10216 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10217 if (op0 == AND)
10218 const1 &= const0;
10220 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10221 if OP0 is SET. */
10223 if (op1 == UNKNOWN || op0 == SET)
10224 return 1;
10226 else if (op0 == UNKNOWN)
10227 op0 = op1, const0 = const1;
10229 else if (op0 == op1)
10231 switch (op0)
10233 case AND:
10234 const0 &= const1;
10235 break;
10236 case IOR:
10237 const0 |= const1;
10238 break;
10239 case XOR:
10240 const0 ^= const1;
10241 break;
10242 case PLUS:
10243 const0 += const1;
10244 break;
10245 case NEG:
10246 op0 = UNKNOWN;
10247 break;
10248 default:
10249 break;
10253 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10254 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10255 return 0;
10257 /* If the two constants aren't the same, we can't do anything. The
10258 remaining six cases can all be done. */
10259 else if (const0 != const1)
10260 return 0;
10262 else
10263 switch (op0)
10265 case IOR:
10266 if (op1 == AND)
10267 /* (a & b) | b == b */
10268 op0 = SET;
10269 else /* op1 == XOR */
10270 /* (a ^ b) | b == a | b */
10272 break;
10274 case XOR:
10275 if (op1 == AND)
10276 /* (a & b) ^ b == (~a) & b */
10277 op0 = AND, *pcomp_p = 1;
10278 else /* op1 == IOR */
10279 /* (a | b) ^ b == a & ~b */
10280 op0 = AND, const0 = ~const0;
10281 break;
10283 case AND:
10284 if (op1 == IOR)
10285 /* (a | b) & b == b */
10286 op0 = SET;
10287 else /* op1 == XOR */
10288 /* (a ^ b) & b) == (~a) & b */
10289 *pcomp_p = 1;
10290 break;
10291 default:
10292 break;
10295 /* Check for NO-OP cases. */
10296 const0 &= GET_MODE_MASK (mode);
10297 if (const0 == 0
10298 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10299 op0 = UNKNOWN;
10300 else if (const0 == 0 && op0 == AND)
10301 op0 = SET;
10302 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10303 && op0 == AND)
10304 op0 = UNKNOWN;
10306 *pop0 = op0;
10308 /* ??? Slightly redundant with the above mask, but not entirely.
10309 Moving this above means we'd have to sign-extend the mode mask
10310 for the final test. */
10311 if (op0 != UNKNOWN && op0 != NEG)
10312 *pconst0 = trunc_int_for_mode (const0, mode);
10314 return 1;
10317 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10318 the shift in. The original shift operation CODE is performed on OP in
10319 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10320 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10321 result of the shift is subject to operation OUTER_CODE with operand
10322 OUTER_CONST. */
10324 static scalar_int_mode
10325 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10326 scalar_int_mode orig_mode, scalar_int_mode mode,
10327 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10329 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10331 /* In general we can't perform in wider mode for right shift and rotate. */
10332 switch (code)
10334 case ASHIFTRT:
10335 /* We can still widen if the bits brought in from the left are identical
10336 to the sign bit of ORIG_MODE. */
10337 if (num_sign_bit_copies (op, mode)
10338 > (unsigned) (GET_MODE_PRECISION (mode)
10339 - GET_MODE_PRECISION (orig_mode)))
10340 return mode;
10341 return orig_mode;
10343 case LSHIFTRT:
10344 /* Similarly here but with zero bits. */
10345 if (HWI_COMPUTABLE_MODE_P (mode)
10346 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10347 return mode;
10349 /* We can also widen if the bits brought in will be masked off. This
10350 operation is performed in ORIG_MODE. */
10351 if (outer_code == AND)
10353 int care_bits = low_bitmask_len (orig_mode, outer_const);
10355 if (care_bits >= 0
10356 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10357 return mode;
10359 /* fall through */
10361 case ROTATE:
10362 return orig_mode;
10364 case ROTATERT:
10365 gcc_unreachable ();
10367 default:
10368 return mode;
10372 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10373 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10374 if we cannot simplify it. Otherwise, return a simplified value.
10376 The shift is normally computed in the widest mode we find in VAROP, as
10377 long as it isn't a different number of words than RESULT_MODE. Exceptions
10378 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10380 static rtx
10381 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10382 rtx varop, int orig_count)
10384 enum rtx_code orig_code = code;
10385 rtx orig_varop = varop;
10386 int count;
10387 machine_mode mode = result_mode;
10388 machine_mode shift_mode;
10389 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10390 unsigned int mode_words
10391 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10392 /* We form (outer_op (code varop count) (outer_const)). */
10393 enum rtx_code outer_op = UNKNOWN;
10394 HOST_WIDE_INT outer_const = 0;
10395 int complement_p = 0;
10396 rtx new_rtx, x;
10398 /* Make sure and truncate the "natural" shift on the way in. We don't
10399 want to do this inside the loop as it makes it more difficult to
10400 combine shifts. */
10401 if (SHIFT_COUNT_TRUNCATED)
10402 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10404 /* If we were given an invalid count, don't do anything except exactly
10405 what was requested. */
10407 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10408 return NULL_RTX;
10410 count = orig_count;
10412 /* Unless one of the branches of the `if' in this loop does a `continue',
10413 we will `break' the loop after the `if'. */
10415 while (count != 0)
10417 /* If we have an operand of (clobber (const_int 0)), fail. */
10418 if (GET_CODE (varop) == CLOBBER)
10419 return NULL_RTX;
10421 /* Convert ROTATERT to ROTATE. */
10422 if (code == ROTATERT)
10424 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10425 code = ROTATE;
10426 count = bitsize - count;
10429 shift_mode = result_mode;
10430 if (shift_mode != mode)
10432 /* We only change the modes of scalar shifts. */
10433 int_mode = as_a <scalar_int_mode> (mode);
10434 int_result_mode = as_a <scalar_int_mode> (result_mode);
10435 shift_mode = try_widen_shift_mode (code, varop, count,
10436 int_result_mode, int_mode,
10437 outer_op, outer_const);
10440 scalar_int_mode shift_unit_mode
10441 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10443 /* Handle cases where the count is greater than the size of the mode
10444 minus 1. For ASHIFT, use the size minus one as the count (this can
10445 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10446 take the count modulo the size. For other shifts, the result is
10447 zero.
10449 Since these shifts are being produced by the compiler by combining
10450 multiple operations, each of which are defined, we know what the
10451 result is supposed to be. */
10453 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10455 if (code == ASHIFTRT)
10456 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10457 else if (code == ROTATE || code == ROTATERT)
10458 count %= GET_MODE_PRECISION (shift_unit_mode);
10459 else
10461 /* We can't simply return zero because there may be an
10462 outer op. */
10463 varop = const0_rtx;
10464 count = 0;
10465 break;
10469 /* If we discovered we had to complement VAROP, leave. Making a NOT
10470 here would cause an infinite loop. */
10471 if (complement_p)
10472 break;
10474 if (shift_mode == shift_unit_mode)
10476 /* An arithmetic right shift of a quantity known to be -1 or 0
10477 is a no-op. */
10478 if (code == ASHIFTRT
10479 && (num_sign_bit_copies (varop, shift_unit_mode)
10480 == GET_MODE_PRECISION (shift_unit_mode)))
10482 count = 0;
10483 break;
10486 /* If we are doing an arithmetic right shift and discarding all but
10487 the sign bit copies, this is equivalent to doing a shift by the
10488 bitsize minus one. Convert it into that shift because it will
10489 often allow other simplifications. */
10491 if (code == ASHIFTRT
10492 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10493 >= GET_MODE_PRECISION (shift_unit_mode)))
10494 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10496 /* We simplify the tests below and elsewhere by converting
10497 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10498 `make_compound_operation' will convert it to an ASHIFTRT for
10499 those machines (such as VAX) that don't have an LSHIFTRT. */
10500 if (code == ASHIFTRT
10501 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10502 && val_signbit_known_clear_p (shift_unit_mode,
10503 nonzero_bits (varop,
10504 shift_unit_mode)))
10505 code = LSHIFTRT;
10507 if (((code == LSHIFTRT
10508 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10509 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10510 || (code == ASHIFT
10511 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10512 && !((nonzero_bits (varop, shift_unit_mode) << count)
10513 & GET_MODE_MASK (shift_unit_mode))))
10514 && !side_effects_p (varop))
10515 varop = const0_rtx;
10518 switch (GET_CODE (varop))
10520 case SIGN_EXTEND:
10521 case ZERO_EXTEND:
10522 case SIGN_EXTRACT:
10523 case ZERO_EXTRACT:
10524 new_rtx = expand_compound_operation (varop);
10525 if (new_rtx != varop)
10527 varop = new_rtx;
10528 continue;
10530 break;
10532 case MEM:
10533 /* The following rules apply only to scalars. */
10534 if (shift_mode != shift_unit_mode)
10535 break;
10536 int_mode = as_a <scalar_int_mode> (mode);
10538 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10539 minus the width of a smaller mode, we can do this with a
10540 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10541 if ((code == ASHIFTRT || code == LSHIFTRT)
10542 && ! mode_dependent_address_p (XEXP (varop, 0),
10543 MEM_ADDR_SPACE (varop))
10544 && ! MEM_VOLATILE_P (varop)
10545 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10546 .exists (&tmode)))
10548 new_rtx = adjust_address_nv (varop, tmode,
10549 BYTES_BIG_ENDIAN ? 0
10550 : count / BITS_PER_UNIT);
10552 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10553 : ZERO_EXTEND, int_mode, new_rtx);
10554 count = 0;
10555 continue;
10557 break;
10559 case SUBREG:
10560 /* The following rules apply only to scalars. */
10561 if (shift_mode != shift_unit_mode)
10562 break;
10563 int_mode = as_a <scalar_int_mode> (mode);
10564 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10566 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10567 the same number of words as what we've seen so far. Then store
10568 the widest mode in MODE. */
10569 if (subreg_lowpart_p (varop)
10570 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10571 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10572 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10573 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10574 == mode_words
10575 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10577 varop = SUBREG_REG (varop);
10578 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10579 mode = inner_mode;
10580 continue;
10582 break;
10584 case MULT:
10585 /* Some machines use MULT instead of ASHIFT because MULT
10586 is cheaper. But it is still better on those machines to
10587 merge two shifts into one. */
10588 if (CONST_INT_P (XEXP (varop, 1))
10589 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10591 varop
10592 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10593 XEXP (varop, 0),
10594 GEN_INT (exact_log2 (
10595 UINTVAL (XEXP (varop, 1)))));
10596 continue;
10598 break;
10600 case UDIV:
10601 /* Similar, for when divides are cheaper. */
10602 if (CONST_INT_P (XEXP (varop, 1))
10603 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10605 varop
10606 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10607 XEXP (varop, 0),
10608 GEN_INT (exact_log2 (
10609 UINTVAL (XEXP (varop, 1)))));
10610 continue;
10612 break;
10614 case ASHIFTRT:
10615 /* If we are extracting just the sign bit of an arithmetic
10616 right shift, that shift is not needed. However, the sign
10617 bit of a wider mode may be different from what would be
10618 interpreted as the sign bit in a narrower mode, so, if
10619 the result is narrower, don't discard the shift. */
10620 if (code == LSHIFTRT
10621 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10622 && (GET_MODE_UNIT_BITSIZE (result_mode)
10623 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10625 varop = XEXP (varop, 0);
10626 continue;
10629 /* fall through */
10631 case LSHIFTRT:
10632 case ASHIFT:
10633 case ROTATE:
10634 /* The following rules apply only to scalars. */
10635 if (shift_mode != shift_unit_mode)
10636 break;
10637 int_mode = as_a <scalar_int_mode> (mode);
10638 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10639 int_result_mode = as_a <scalar_int_mode> (result_mode);
10641 /* Here we have two nested shifts. The result is usually the
10642 AND of a new shift with a mask. We compute the result below. */
10643 if (CONST_INT_P (XEXP (varop, 1))
10644 && INTVAL (XEXP (varop, 1)) >= 0
10645 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10646 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10647 && HWI_COMPUTABLE_MODE_P (int_mode))
10649 enum rtx_code first_code = GET_CODE (varop);
10650 unsigned int first_count = INTVAL (XEXP (varop, 1));
10651 unsigned HOST_WIDE_INT mask;
10652 rtx mask_rtx;
10654 /* We have one common special case. We can't do any merging if
10655 the inner code is an ASHIFTRT of a smaller mode. However, if
10656 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10657 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10658 we can convert it to
10659 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10660 This simplifies certain SIGN_EXTEND operations. */
10661 if (code == ASHIFT && first_code == ASHIFTRT
10662 && count == (GET_MODE_PRECISION (int_result_mode)
10663 - GET_MODE_PRECISION (int_varop_mode)))
10665 /* C3 has the low-order C1 bits zero. */
10667 mask = GET_MODE_MASK (int_mode)
10668 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10670 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10671 XEXP (varop, 0), mask);
10672 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10673 int_result_mode, varop, count);
10674 count = first_count;
10675 code = ASHIFTRT;
10676 continue;
10679 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10680 than C1 high-order bits equal to the sign bit, we can convert
10681 this to either an ASHIFT or an ASHIFTRT depending on the
10682 two counts.
10684 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10686 if (code == ASHIFTRT && first_code == ASHIFT
10687 && int_varop_mode == shift_unit_mode
10688 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10689 > first_count))
10691 varop = XEXP (varop, 0);
10692 count -= first_count;
10693 if (count < 0)
10695 count = -count;
10696 code = ASHIFT;
10699 continue;
10702 /* There are some cases we can't do. If CODE is ASHIFTRT,
10703 we can only do this if FIRST_CODE is also ASHIFTRT.
10705 We can't do the case when CODE is ROTATE and FIRST_CODE is
10706 ASHIFTRT.
10708 If the mode of this shift is not the mode of the outer shift,
10709 we can't do this if either shift is a right shift or ROTATE.
10711 Finally, we can't do any of these if the mode is too wide
10712 unless the codes are the same.
10714 Handle the case where the shift codes are the same
10715 first. */
10717 if (code == first_code)
10719 if (int_varop_mode != int_result_mode
10720 && (code == ASHIFTRT || code == LSHIFTRT
10721 || code == ROTATE))
10722 break;
10724 count += first_count;
10725 varop = XEXP (varop, 0);
10726 continue;
10729 if (code == ASHIFTRT
10730 || (code == ROTATE && first_code == ASHIFTRT)
10731 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10732 || (int_varop_mode != int_result_mode
10733 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10734 || first_code == ROTATE
10735 || code == ROTATE)))
10736 break;
10738 /* To compute the mask to apply after the shift, shift the
10739 nonzero bits of the inner shift the same way the
10740 outer shift will. */
10742 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10743 int_result_mode);
10745 mask_rtx
10746 = simplify_const_binary_operation (code, int_result_mode,
10747 mask_rtx, GEN_INT (count));
10749 /* Give up if we can't compute an outer operation to use. */
10750 if (mask_rtx == 0
10751 || !CONST_INT_P (mask_rtx)
10752 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10753 INTVAL (mask_rtx),
10754 int_result_mode, &complement_p))
10755 break;
10757 /* If the shifts are in the same direction, we add the
10758 counts. Otherwise, we subtract them. */
10759 if ((code == ASHIFTRT || code == LSHIFTRT)
10760 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10761 count += first_count;
10762 else
10763 count -= first_count;
10765 /* If COUNT is positive, the new shift is usually CODE,
10766 except for the two exceptions below, in which case it is
10767 FIRST_CODE. If the count is negative, FIRST_CODE should
10768 always be used */
10769 if (count > 0
10770 && ((first_code == ROTATE && code == ASHIFT)
10771 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10772 code = first_code;
10773 else if (count < 0)
10774 code = first_code, count = -count;
10776 varop = XEXP (varop, 0);
10777 continue;
10780 /* If we have (A << B << C) for any shift, we can convert this to
10781 (A << C << B). This wins if A is a constant. Only try this if
10782 B is not a constant. */
10784 else if (GET_CODE (varop) == code
10785 && CONST_INT_P (XEXP (varop, 0))
10786 && !CONST_INT_P (XEXP (varop, 1)))
10788 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10789 sure the result will be masked. See PR70222. */
10790 if (code == LSHIFTRT
10791 && int_mode != int_result_mode
10792 && !merge_outer_ops (&outer_op, &outer_const, AND,
10793 GET_MODE_MASK (int_result_mode)
10794 >> orig_count, int_result_mode,
10795 &complement_p))
10796 break;
10797 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10798 up outer sign extension (often left and right shift) is
10799 hardly more efficient than the original. See PR70429. */
10800 if (code == ASHIFTRT && int_mode != int_result_mode)
10801 break;
10803 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10804 XEXP (varop, 0),
10805 GEN_INT (count));
10806 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10807 count = 0;
10808 continue;
10810 break;
10812 case NOT:
10813 /* The following rules apply only to scalars. */
10814 if (shift_mode != shift_unit_mode)
10815 break;
10817 /* Make this fit the case below. */
10818 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10819 continue;
10821 case IOR:
10822 case AND:
10823 case XOR:
10824 /* The following rules apply only to scalars. */
10825 if (shift_mode != shift_unit_mode)
10826 break;
10827 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10828 int_result_mode = as_a <scalar_int_mode> (result_mode);
10830 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10831 with C the size of VAROP - 1 and the shift is logical if
10832 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10833 we have an (le X 0) operation. If we have an arithmetic shift
10834 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10835 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10837 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10838 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10839 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10840 && (code == LSHIFTRT || code == ASHIFTRT)
10841 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10842 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10844 count = 0;
10845 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10846 const0_rtx);
10848 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10849 varop = gen_rtx_NEG (int_varop_mode, varop);
10851 continue;
10854 /* If we have (shift (logical)), move the logical to the outside
10855 to allow it to possibly combine with another logical and the
10856 shift to combine with another shift. This also canonicalizes to
10857 what a ZERO_EXTRACT looks like. Also, some machines have
10858 (and (shift)) insns. */
10860 if (CONST_INT_P (XEXP (varop, 1))
10861 /* We can't do this if we have (ashiftrt (xor)) and the
10862 constant has its sign bit set in shift_unit_mode with
10863 shift_unit_mode wider than result_mode. */
10864 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10865 && int_result_mode != shift_unit_mode
10866 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10867 shift_unit_mode))
10868 && (new_rtx = simplify_const_binary_operation
10869 (code, int_result_mode,
10870 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10871 GEN_INT (count))) != 0
10872 && CONST_INT_P (new_rtx)
10873 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10874 INTVAL (new_rtx), int_result_mode,
10875 &complement_p))
10877 varop = XEXP (varop, 0);
10878 continue;
10881 /* If we can't do that, try to simplify the shift in each arm of the
10882 logical expression, make a new logical expression, and apply
10883 the inverse distributive law. This also can't be done for
10884 (ashiftrt (xor)) where we've widened the shift and the constant
10885 changes the sign bit. */
10886 if (CONST_INT_P (XEXP (varop, 1))
10887 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10888 && int_result_mode != shift_unit_mode
10889 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10890 shift_unit_mode)))
10892 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10893 XEXP (varop, 0), count);
10894 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10895 XEXP (varop, 1), count);
10897 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10898 lhs, rhs);
10899 varop = apply_distributive_law (varop);
10901 count = 0;
10902 continue;
10904 break;
10906 case EQ:
10907 /* The following rules apply only to scalars. */
10908 if (shift_mode != shift_unit_mode)
10909 break;
10910 int_result_mode = as_a <scalar_int_mode> (result_mode);
10912 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10913 says that the sign bit can be tested, FOO has mode MODE, C is
10914 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10915 that may be nonzero. */
10916 if (code == LSHIFTRT
10917 && XEXP (varop, 1) == const0_rtx
10918 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10919 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10920 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10921 && STORE_FLAG_VALUE == -1
10922 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10923 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10924 int_result_mode, &complement_p))
10926 varop = XEXP (varop, 0);
10927 count = 0;
10928 continue;
10930 break;
10932 case NEG:
10933 /* The following rules apply only to scalars. */
10934 if (shift_mode != shift_unit_mode)
10935 break;
10936 int_result_mode = as_a <scalar_int_mode> (result_mode);
10938 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10939 than the number of bits in the mode is equivalent to A. */
10940 if (code == LSHIFTRT
10941 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10942 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10944 varop = XEXP (varop, 0);
10945 count = 0;
10946 continue;
10949 /* NEG commutes with ASHIFT since it is multiplication. Move the
10950 NEG outside to allow shifts to combine. */
10951 if (code == ASHIFT
10952 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10953 int_result_mode, &complement_p))
10955 varop = XEXP (varop, 0);
10956 continue;
10958 break;
10960 case PLUS:
10961 /* The following rules apply only to scalars. */
10962 if (shift_mode != shift_unit_mode)
10963 break;
10964 int_result_mode = as_a <scalar_int_mode> (result_mode);
10966 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10967 is one less than the number of bits in the mode is
10968 equivalent to (xor A 1). */
10969 if (code == LSHIFTRT
10970 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10971 && XEXP (varop, 1) == constm1_rtx
10972 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10973 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10974 int_result_mode, &complement_p))
10976 count = 0;
10977 varop = XEXP (varop, 0);
10978 continue;
10981 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10982 that might be nonzero in BAR are those being shifted out and those
10983 bits are known zero in FOO, we can replace the PLUS with FOO.
10984 Similarly in the other operand order. This code occurs when
10985 we are computing the size of a variable-size array. */
10987 if ((code == ASHIFTRT || code == LSHIFTRT)
10988 && count < HOST_BITS_PER_WIDE_INT
10989 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
10990 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
10991 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
10993 varop = XEXP (varop, 0);
10994 continue;
10996 else if ((code == ASHIFTRT || code == LSHIFTRT)
10997 && count < HOST_BITS_PER_WIDE_INT
10998 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10999 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11000 >> count)
11001 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11002 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11004 varop = XEXP (varop, 1);
11005 continue;
11008 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11009 if (code == ASHIFT
11010 && CONST_INT_P (XEXP (varop, 1))
11011 && (new_rtx = simplify_const_binary_operation
11012 (ASHIFT, int_result_mode,
11013 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11014 GEN_INT (count))) != 0
11015 && CONST_INT_P (new_rtx)
11016 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11017 INTVAL (new_rtx), int_result_mode,
11018 &complement_p))
11020 varop = XEXP (varop, 0);
11021 continue;
11024 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11025 signbit', and attempt to change the PLUS to an XOR and move it to
11026 the outer operation as is done above in the AND/IOR/XOR case
11027 leg for shift(logical). See details in logical handling above
11028 for reasoning in doing so. */
11029 if (code == LSHIFTRT
11030 && CONST_INT_P (XEXP (varop, 1))
11031 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11032 && (new_rtx = simplify_const_binary_operation
11033 (code, int_result_mode,
11034 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11035 GEN_INT (count))) != 0
11036 && CONST_INT_P (new_rtx)
11037 && merge_outer_ops (&outer_op, &outer_const, XOR,
11038 INTVAL (new_rtx), int_result_mode,
11039 &complement_p))
11041 varop = XEXP (varop, 0);
11042 continue;
11045 break;
11047 case MINUS:
11048 /* The following rules apply only to scalars. */
11049 if (shift_mode != shift_unit_mode)
11050 break;
11051 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11053 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11054 with C the size of VAROP - 1 and the shift is logical if
11055 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11056 we have a (gt X 0) operation. If the shift is arithmetic with
11057 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11058 we have a (neg (gt X 0)) operation. */
11060 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11061 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11062 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11063 && (code == LSHIFTRT || code == ASHIFTRT)
11064 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11065 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11066 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11068 count = 0;
11069 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11070 const0_rtx);
11072 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11073 varop = gen_rtx_NEG (int_varop_mode, varop);
11075 continue;
11077 break;
11079 case TRUNCATE:
11080 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11081 if the truncate does not affect the value. */
11082 if (code == LSHIFTRT
11083 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11084 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11085 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11086 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11087 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11089 rtx varop_inner = XEXP (varop, 0);
11091 varop_inner
11092 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11093 XEXP (varop_inner, 0),
11094 GEN_INT
11095 (count + INTVAL (XEXP (varop_inner, 1))));
11096 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11097 count = 0;
11098 continue;
11100 break;
11102 default:
11103 break;
11106 break;
11109 shift_mode = result_mode;
11110 if (shift_mode != mode)
11112 /* We only change the modes of scalar shifts. */
11113 int_mode = as_a <scalar_int_mode> (mode);
11114 int_result_mode = as_a <scalar_int_mode> (result_mode);
11115 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11116 int_mode, outer_op, outer_const);
11119 /* We have now finished analyzing the shift. The result should be
11120 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11121 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11122 to the result of the shift. OUTER_CONST is the relevant constant,
11123 but we must turn off all bits turned off in the shift. */
11125 if (outer_op == UNKNOWN
11126 && orig_code == code && orig_count == count
11127 && varop == orig_varop
11128 && shift_mode == GET_MODE (varop))
11129 return NULL_RTX;
11131 /* Make a SUBREG if necessary. If we can't make it, fail. */
11132 varop = gen_lowpart (shift_mode, varop);
11133 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11134 return NULL_RTX;
11136 /* If we have an outer operation and we just made a shift, it is
11137 possible that we could have simplified the shift were it not
11138 for the outer operation. So try to do the simplification
11139 recursively. */
11141 if (outer_op != UNKNOWN)
11142 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11143 else
11144 x = NULL_RTX;
11146 if (x == NULL_RTX)
11147 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11149 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11150 turn off all the bits that the shift would have turned off. */
11151 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11152 /* We only change the modes of scalar shifts. */
11153 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11154 x, GET_MODE_MASK (result_mode) >> orig_count);
11156 /* Do the remainder of the processing in RESULT_MODE. */
11157 x = gen_lowpart_or_truncate (result_mode, x);
11159 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11160 operation. */
11161 if (complement_p)
11162 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11164 if (outer_op != UNKNOWN)
11166 int_result_mode = as_a <scalar_int_mode> (result_mode);
11168 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11169 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11170 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11172 if (outer_op == AND)
11173 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11174 else if (outer_op == SET)
11176 /* This means that we have determined that the result is
11177 equivalent to a constant. This should be rare. */
11178 if (!side_effects_p (x))
11179 x = GEN_INT (outer_const);
11181 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11182 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11183 else
11184 x = simplify_gen_binary (outer_op, int_result_mode, x,
11185 GEN_INT (outer_const));
11188 return x;
11191 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11192 The result of the shift is RESULT_MODE. If we cannot simplify it,
11193 return X or, if it is NULL, synthesize the expression with
11194 simplify_gen_binary. Otherwise, return a simplified value.
11196 The shift is normally computed in the widest mode we find in VAROP, as
11197 long as it isn't a different number of words than RESULT_MODE. Exceptions
11198 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11200 static rtx
11201 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11202 rtx varop, int count)
11204 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11205 if (tem)
11206 return tem;
11208 if (!x)
11209 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11210 if (GET_MODE (x) != result_mode)
11211 x = gen_lowpart (result_mode, x);
11212 return x;
11216 /* A subroutine of recog_for_combine. See there for arguments and
11217 return value. */
11219 static int
11220 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11222 rtx pat = *pnewpat;
11223 rtx pat_without_clobbers;
11224 int insn_code_number;
11225 int num_clobbers_to_add = 0;
11226 int i;
11227 rtx notes = NULL_RTX;
11228 rtx old_notes, old_pat;
11229 int old_icode;
11231 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11232 we use to indicate that something didn't match. If we find such a
11233 thing, force rejection. */
11234 if (GET_CODE (pat) == PARALLEL)
11235 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11236 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11237 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11238 return -1;
11240 old_pat = PATTERN (insn);
11241 old_notes = REG_NOTES (insn);
11242 PATTERN (insn) = pat;
11243 REG_NOTES (insn) = NULL_RTX;
11245 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11246 if (dump_file && (dump_flags & TDF_DETAILS))
11248 if (insn_code_number < 0)
11249 fputs ("Failed to match this instruction:\n", dump_file);
11250 else
11251 fputs ("Successfully matched this instruction:\n", dump_file);
11252 print_rtl_single (dump_file, pat);
11255 /* If it isn't, there is the possibility that we previously had an insn
11256 that clobbered some register as a side effect, but the combined
11257 insn doesn't need to do that. So try once more without the clobbers
11258 unless this represents an ASM insn. */
11260 if (insn_code_number < 0 && ! check_asm_operands (pat)
11261 && GET_CODE (pat) == PARALLEL)
11263 int pos;
11265 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11266 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11268 if (i != pos)
11269 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11270 pos++;
11273 SUBST_INT (XVECLEN (pat, 0), pos);
11275 if (pos == 1)
11276 pat = XVECEXP (pat, 0, 0);
11278 PATTERN (insn) = pat;
11279 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11280 if (dump_file && (dump_flags & TDF_DETAILS))
11282 if (insn_code_number < 0)
11283 fputs ("Failed to match this instruction:\n", dump_file);
11284 else
11285 fputs ("Successfully matched this instruction:\n", dump_file);
11286 print_rtl_single (dump_file, pat);
11290 pat_without_clobbers = pat;
11292 PATTERN (insn) = old_pat;
11293 REG_NOTES (insn) = old_notes;
11295 /* Recognize all noop sets, these will be killed by followup pass. */
11296 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11297 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11299 /* If we had any clobbers to add, make a new pattern than contains
11300 them. Then check to make sure that all of them are dead. */
11301 if (num_clobbers_to_add)
11303 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11304 rtvec_alloc (GET_CODE (pat) == PARALLEL
11305 ? (XVECLEN (pat, 0)
11306 + num_clobbers_to_add)
11307 : num_clobbers_to_add + 1));
11309 if (GET_CODE (pat) == PARALLEL)
11310 for (i = 0; i < XVECLEN (pat, 0); i++)
11311 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11312 else
11313 XVECEXP (newpat, 0, 0) = pat;
11315 add_clobbers (newpat, insn_code_number);
11317 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11318 i < XVECLEN (newpat, 0); i++)
11320 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11321 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11322 return -1;
11323 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11325 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11326 notes = alloc_reg_note (REG_UNUSED,
11327 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11330 pat = newpat;
11333 if (insn_code_number >= 0
11334 && insn_code_number != NOOP_MOVE_INSN_CODE)
11336 old_pat = PATTERN (insn);
11337 old_notes = REG_NOTES (insn);
11338 old_icode = INSN_CODE (insn);
11339 PATTERN (insn) = pat;
11340 REG_NOTES (insn) = notes;
11341 INSN_CODE (insn) = insn_code_number;
11343 /* Allow targets to reject combined insn. */
11344 if (!targetm.legitimate_combined_insn (insn))
11346 if (dump_file && (dump_flags & TDF_DETAILS))
11347 fputs ("Instruction not appropriate for target.",
11348 dump_file);
11350 /* Callers expect recog_for_combine to strip
11351 clobbers from the pattern on failure. */
11352 pat = pat_without_clobbers;
11353 notes = NULL_RTX;
11355 insn_code_number = -1;
11358 PATTERN (insn) = old_pat;
11359 REG_NOTES (insn) = old_notes;
11360 INSN_CODE (insn) = old_icode;
11363 *pnewpat = pat;
11364 *pnotes = notes;
11366 return insn_code_number;
11369 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11370 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11371 Return whether anything was so changed. */
11373 static bool
11374 change_zero_ext (rtx pat)
11376 bool changed = false;
11377 rtx *src = &SET_SRC (pat);
11379 subrtx_ptr_iterator::array_type array;
11380 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11382 rtx x = **iter;
11383 scalar_int_mode mode, inner_mode;
11384 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11385 continue;
11386 int size;
11388 if (GET_CODE (x) == ZERO_EXTRACT
11389 && CONST_INT_P (XEXP (x, 1))
11390 && CONST_INT_P (XEXP (x, 2))
11391 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11392 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11394 size = INTVAL (XEXP (x, 1));
11396 int start = INTVAL (XEXP (x, 2));
11397 if (BITS_BIG_ENDIAN)
11398 start = GET_MODE_PRECISION (inner_mode) - size - start;
11400 if (start)
11401 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11402 else
11403 x = XEXP (x, 0);
11404 if (mode != inner_mode)
11405 x = gen_lowpart_SUBREG (mode, x);
11407 else if (GET_CODE (x) == ZERO_EXTEND
11408 && GET_CODE (XEXP (x, 0)) == SUBREG
11409 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11410 && !paradoxical_subreg_p (XEXP (x, 0))
11411 && subreg_lowpart_p (XEXP (x, 0)))
11413 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11414 size = GET_MODE_PRECISION (inner_mode);
11415 x = SUBREG_REG (XEXP (x, 0));
11416 if (GET_MODE (x) != mode)
11417 x = gen_lowpart_SUBREG (mode, x);
11419 else if (GET_CODE (x) == ZERO_EXTEND
11420 && REG_P (XEXP (x, 0))
11421 && HARD_REGISTER_P (XEXP (x, 0))
11422 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11424 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11425 size = GET_MODE_PRECISION (inner_mode);
11426 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11428 else
11429 continue;
11431 if (!(GET_CODE (x) == LSHIFTRT
11432 && CONST_INT_P (XEXP (x, 1))
11433 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11435 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11436 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11439 SUBST (**iter, x);
11440 changed = true;
11443 if (changed)
11444 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11445 maybe_swap_commutative_operands (**iter);
11447 rtx *dst = &SET_DEST (pat);
11448 scalar_int_mode mode;
11449 if (GET_CODE (*dst) == ZERO_EXTRACT
11450 && REG_P (XEXP (*dst, 0))
11451 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11452 && CONST_INT_P (XEXP (*dst, 1))
11453 && CONST_INT_P (XEXP (*dst, 2)))
11455 rtx reg = XEXP (*dst, 0);
11456 int width = INTVAL (XEXP (*dst, 1));
11457 int offset = INTVAL (XEXP (*dst, 2));
11458 int reg_width = GET_MODE_PRECISION (mode);
11459 if (BITS_BIG_ENDIAN)
11460 offset = reg_width - width - offset;
11462 rtx x, y, z, w;
11463 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11464 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11465 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11466 if (offset)
11467 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11468 else
11469 y = SET_SRC (pat);
11470 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11471 w = gen_rtx_IOR (mode, x, z);
11472 SUBST (SET_DEST (pat), reg);
11473 SUBST (SET_SRC (pat), w);
11475 changed = true;
11478 return changed;
11481 /* Like recog, but we receive the address of a pointer to a new pattern.
11482 We try to match the rtx that the pointer points to.
11483 If that fails, we may try to modify or replace the pattern,
11484 storing the replacement into the same pointer object.
11486 Modifications include deletion or addition of CLOBBERs. If the
11487 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11488 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11489 (and undo if that fails).
11491 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11492 the CLOBBERs are placed.
11494 The value is the final insn code from the pattern ultimately matched,
11495 or -1. */
11497 static int
11498 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11500 rtx pat = *pnewpat;
11501 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11502 if (insn_code_number >= 0 || check_asm_operands (pat))
11503 return insn_code_number;
11505 void *marker = get_undo_marker ();
11506 bool changed = false;
11508 if (GET_CODE (pat) == SET)
11509 changed = change_zero_ext (pat);
11510 else if (GET_CODE (pat) == PARALLEL)
11512 int i;
11513 for (i = 0; i < XVECLEN (pat, 0); i++)
11515 rtx set = XVECEXP (pat, 0, i);
11516 if (GET_CODE (set) == SET)
11517 changed |= change_zero_ext (set);
11521 if (changed)
11523 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11525 if (insn_code_number < 0)
11526 undo_to_marker (marker);
11529 return insn_code_number;
11532 /* Like gen_lowpart_general but for use by combine. In combine it
11533 is not possible to create any new pseudoregs. However, it is
11534 safe to create invalid memory addresses, because combine will
11535 try to recognize them and all they will do is make the combine
11536 attempt fail.
11538 If for some reason this cannot do its job, an rtx
11539 (clobber (const_int 0)) is returned.
11540 An insn containing that will not be recognized. */
11542 static rtx
11543 gen_lowpart_for_combine (machine_mode omode, rtx x)
11545 machine_mode imode = GET_MODE (x);
11546 unsigned int osize = GET_MODE_SIZE (omode);
11547 unsigned int isize = GET_MODE_SIZE (imode);
11548 rtx result;
11550 if (omode == imode)
11551 return x;
11553 /* We can only support MODE being wider than a word if X is a
11554 constant integer or has a mode the same size. */
11555 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11556 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11557 goto fail;
11559 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11560 won't know what to do. So we will strip off the SUBREG here and
11561 process normally. */
11562 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11564 x = SUBREG_REG (x);
11566 /* For use in case we fall down into the address adjustments
11567 further below, we need to adjust the known mode and size of
11568 x; imode and isize, since we just adjusted x. */
11569 imode = GET_MODE (x);
11571 if (imode == omode)
11572 return x;
11574 isize = GET_MODE_SIZE (imode);
11577 result = gen_lowpart_common (omode, x);
11579 if (result)
11580 return result;
11582 if (MEM_P (x))
11584 int offset = 0;
11586 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11587 address. */
11588 if (MEM_VOLATILE_P (x)
11589 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11590 goto fail;
11592 /* If we want to refer to something bigger than the original memref,
11593 generate a paradoxical subreg instead. That will force a reload
11594 of the original memref X. */
11595 if (paradoxical_subreg_p (omode, imode))
11596 return gen_rtx_SUBREG (omode, x, 0);
11598 if (WORDS_BIG_ENDIAN)
11599 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11601 /* Adjust the address so that the address-after-the-data is
11602 unchanged. */
11603 if (BYTES_BIG_ENDIAN)
11604 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11606 return adjust_address_nv (x, omode, offset);
11609 /* If X is a comparison operator, rewrite it in a new mode. This
11610 probably won't match, but may allow further simplifications. */
11611 else if (COMPARISON_P (x))
11612 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11614 /* If we couldn't simplify X any other way, just enclose it in a
11615 SUBREG. Normally, this SUBREG won't match, but some patterns may
11616 include an explicit SUBREG or we may simplify it further in combine. */
11617 else
11619 rtx res;
11621 if (imode == VOIDmode)
11623 imode = int_mode_for_mode (omode).require ();
11624 x = gen_lowpart_common (imode, x);
11625 if (x == NULL)
11626 goto fail;
11628 res = lowpart_subreg (omode, x, imode);
11629 if (res)
11630 return res;
11633 fail:
11634 return gen_rtx_CLOBBER (omode, const0_rtx);
11637 /* Try to simplify a comparison between OP0 and a constant OP1,
11638 where CODE is the comparison code that will be tested, into a
11639 (CODE OP0 const0_rtx) form.
11641 The result is a possibly different comparison code to use.
11642 *POP1 may be updated. */
11644 static enum rtx_code
11645 simplify_compare_const (enum rtx_code code, machine_mode mode,
11646 rtx op0, rtx *pop1)
11648 scalar_int_mode int_mode;
11649 HOST_WIDE_INT const_op = INTVAL (*pop1);
11651 /* Get the constant we are comparing against and turn off all bits
11652 not on in our mode. */
11653 if (mode != VOIDmode)
11654 const_op = trunc_int_for_mode (const_op, mode);
11656 /* If we are comparing against a constant power of two and the value
11657 being compared can only have that single bit nonzero (e.g., it was
11658 `and'ed with that bit), we can replace this with a comparison
11659 with zero. */
11660 if (const_op
11661 && (code == EQ || code == NE || code == GE || code == GEU
11662 || code == LT || code == LTU)
11663 && is_a <scalar_int_mode> (mode, &int_mode)
11664 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11665 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11666 && (nonzero_bits (op0, int_mode)
11667 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11669 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11670 const_op = 0;
11673 /* Similarly, if we are comparing a value known to be either -1 or
11674 0 with -1, change it to the opposite comparison against zero. */
11675 if (const_op == -1
11676 && (code == EQ || code == NE || code == GT || code == LE
11677 || code == GEU || code == LTU)
11678 && is_a <scalar_int_mode> (mode, &int_mode)
11679 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11681 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11682 const_op = 0;
11685 /* Do some canonicalizations based on the comparison code. We prefer
11686 comparisons against zero and then prefer equality comparisons.
11687 If we can reduce the size of a constant, we will do that too. */
11688 switch (code)
11690 case LT:
11691 /* < C is equivalent to <= (C - 1) */
11692 if (const_op > 0)
11694 const_op -= 1;
11695 code = LE;
11696 /* ... fall through to LE case below. */
11697 gcc_fallthrough ();
11699 else
11700 break;
11702 case LE:
11703 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11704 if (const_op < 0)
11706 const_op += 1;
11707 code = LT;
11710 /* If we are doing a <= 0 comparison on a value known to have
11711 a zero sign bit, we can replace this with == 0. */
11712 else if (const_op == 0
11713 && is_a <scalar_int_mode> (mode, &int_mode)
11714 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11715 && (nonzero_bits (op0, int_mode)
11716 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11717 == 0)
11718 code = EQ;
11719 break;
11721 case GE:
11722 /* >= C is equivalent to > (C - 1). */
11723 if (const_op > 0)
11725 const_op -= 1;
11726 code = GT;
11727 /* ... fall through to GT below. */
11728 gcc_fallthrough ();
11730 else
11731 break;
11733 case GT:
11734 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11735 if (const_op < 0)
11737 const_op += 1;
11738 code = GE;
11741 /* If we are doing a > 0 comparison on a value known to have
11742 a zero sign bit, we can replace this with != 0. */
11743 else if (const_op == 0
11744 && is_a <scalar_int_mode> (mode, &int_mode)
11745 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11746 && (nonzero_bits (op0, int_mode)
11747 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11748 == 0)
11749 code = NE;
11750 break;
11752 case LTU:
11753 /* < C is equivalent to <= (C - 1). */
11754 if (const_op > 0)
11756 const_op -= 1;
11757 code = LEU;
11758 /* ... fall through ... */
11760 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11761 else if (is_a <scalar_int_mode> (mode, &int_mode)
11762 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11763 && ((unsigned HOST_WIDE_INT) const_op
11764 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11766 const_op = 0;
11767 code = GE;
11768 break;
11770 else
11771 break;
11773 case LEU:
11774 /* unsigned <= 0 is equivalent to == 0 */
11775 if (const_op == 0)
11776 code = EQ;
11777 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11778 else if (is_a <scalar_int_mode> (mode, &int_mode)
11779 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11780 && ((unsigned HOST_WIDE_INT) const_op
11781 == ((HOST_WIDE_INT_1U
11782 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11784 const_op = 0;
11785 code = GE;
11787 break;
11789 case GEU:
11790 /* >= C is equivalent to > (C - 1). */
11791 if (const_op > 1)
11793 const_op -= 1;
11794 code = GTU;
11795 /* ... fall through ... */
11798 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11799 else if (is_a <scalar_int_mode> (mode, &int_mode)
11800 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11801 && ((unsigned HOST_WIDE_INT) const_op
11802 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11804 const_op = 0;
11805 code = LT;
11806 break;
11808 else
11809 break;
11811 case GTU:
11812 /* unsigned > 0 is equivalent to != 0 */
11813 if (const_op == 0)
11814 code = NE;
11815 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11816 else if (is_a <scalar_int_mode> (mode, &int_mode)
11817 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11818 && ((unsigned HOST_WIDE_INT) const_op
11819 == (HOST_WIDE_INT_1U
11820 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11822 const_op = 0;
11823 code = LT;
11825 break;
11827 default:
11828 break;
11831 *pop1 = GEN_INT (const_op);
11832 return code;
11835 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11836 comparison code that will be tested.
11838 The result is a possibly different comparison code to use. *POP0 and
11839 *POP1 may be updated.
11841 It is possible that we might detect that a comparison is either always
11842 true or always false. However, we do not perform general constant
11843 folding in combine, so this knowledge isn't useful. Such tautologies
11844 should have been detected earlier. Hence we ignore all such cases. */
11846 static enum rtx_code
11847 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11849 rtx op0 = *pop0;
11850 rtx op1 = *pop1;
11851 rtx tem, tem1;
11852 int i;
11853 scalar_int_mode mode, inner_mode, tmode;
11854 opt_scalar_int_mode tmode_iter;
11856 /* Try a few ways of applying the same transformation to both operands. */
11857 while (1)
11859 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11860 so check specially. */
11861 if (!WORD_REGISTER_OPERATIONS
11862 && code != GTU && code != GEU && code != LTU && code != LEU
11863 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11864 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11865 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11866 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11867 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11868 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11869 && (is_a <scalar_int_mode>
11870 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11871 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11872 && CONST_INT_P (XEXP (op0, 1))
11873 && XEXP (op0, 1) == XEXP (op1, 1)
11874 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11875 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11876 && (INTVAL (XEXP (op0, 1))
11877 == (GET_MODE_PRECISION (mode)
11878 - GET_MODE_PRECISION (inner_mode))))
11880 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11881 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11884 /* If both operands are the same constant shift, see if we can ignore the
11885 shift. We can if the shift is a rotate or if the bits shifted out of
11886 this shift are known to be zero for both inputs and if the type of
11887 comparison is compatible with the shift. */
11888 if (GET_CODE (op0) == GET_CODE (op1)
11889 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11890 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11891 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11892 && (code != GT && code != LT && code != GE && code != LE))
11893 || (GET_CODE (op0) == ASHIFTRT
11894 && (code != GTU && code != LTU
11895 && code != GEU && code != LEU)))
11896 && CONST_INT_P (XEXP (op0, 1))
11897 && INTVAL (XEXP (op0, 1)) >= 0
11898 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11899 && XEXP (op0, 1) == XEXP (op1, 1))
11901 machine_mode mode = GET_MODE (op0);
11902 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11903 int shift_count = INTVAL (XEXP (op0, 1));
11905 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11906 mask &= (mask >> shift_count) << shift_count;
11907 else if (GET_CODE (op0) == ASHIFT)
11908 mask = (mask & (mask << shift_count)) >> shift_count;
11910 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11911 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11912 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11913 else
11914 break;
11917 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11918 SUBREGs are of the same mode, and, in both cases, the AND would
11919 be redundant if the comparison was done in the narrower mode,
11920 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11921 and the operand's possibly nonzero bits are 0xffffff01; in that case
11922 if we only care about QImode, we don't need the AND). This case
11923 occurs if the output mode of an scc insn is not SImode and
11924 STORE_FLAG_VALUE == 1 (e.g., the 386).
11926 Similarly, check for a case where the AND's are ZERO_EXTEND
11927 operations from some narrower mode even though a SUBREG is not
11928 present. */
11930 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11931 && CONST_INT_P (XEXP (op0, 1))
11932 && CONST_INT_P (XEXP (op1, 1)))
11934 rtx inner_op0 = XEXP (op0, 0);
11935 rtx inner_op1 = XEXP (op1, 0);
11936 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11937 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11938 int changed = 0;
11940 if (paradoxical_subreg_p (inner_op0)
11941 && GET_CODE (inner_op1) == SUBREG
11942 && (GET_MODE (SUBREG_REG (inner_op0))
11943 == GET_MODE (SUBREG_REG (inner_op1)))
11944 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11945 <= HOST_BITS_PER_WIDE_INT)
11946 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11947 GET_MODE (SUBREG_REG (inner_op0)))))
11948 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11949 GET_MODE (SUBREG_REG (inner_op1))))))
11951 op0 = SUBREG_REG (inner_op0);
11952 op1 = SUBREG_REG (inner_op1);
11954 /* The resulting comparison is always unsigned since we masked
11955 off the original sign bit. */
11956 code = unsigned_condition (code);
11958 changed = 1;
11961 else if (c0 == c1)
11962 FOR_EACH_MODE_UNTIL (tmode,
11963 as_a <scalar_int_mode> (GET_MODE (op0)))
11964 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11966 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11967 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11968 code = unsigned_condition (code);
11969 changed = 1;
11970 break;
11973 if (! changed)
11974 break;
11977 /* If both operands are NOT, we can strip off the outer operation
11978 and adjust the comparison code for swapped operands; similarly for
11979 NEG, except that this must be an equality comparison. */
11980 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11981 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11982 && (code == EQ || code == NE)))
11983 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11985 else
11986 break;
11989 /* If the first operand is a constant, swap the operands and adjust the
11990 comparison code appropriately, but don't do this if the second operand
11991 is already a constant integer. */
11992 if (swap_commutative_operands_p (op0, op1))
11994 std::swap (op0, op1);
11995 code = swap_condition (code);
11998 /* We now enter a loop during which we will try to simplify the comparison.
11999 For the most part, we only are concerned with comparisons with zero,
12000 but some things may really be comparisons with zero but not start
12001 out looking that way. */
12003 while (CONST_INT_P (op1))
12005 machine_mode raw_mode = GET_MODE (op0);
12006 scalar_int_mode int_mode;
12007 int equality_comparison_p;
12008 int sign_bit_comparison_p;
12009 int unsigned_comparison_p;
12010 HOST_WIDE_INT const_op;
12012 /* We only want to handle integral modes. This catches VOIDmode,
12013 CCmode, and the floating-point modes. An exception is that we
12014 can handle VOIDmode if OP0 is a COMPARE or a comparison
12015 operation. */
12017 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12018 && ! (raw_mode == VOIDmode
12019 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12020 break;
12022 /* Try to simplify the compare to constant, possibly changing the
12023 comparison op, and/or changing op1 to zero. */
12024 code = simplify_compare_const (code, raw_mode, op0, &op1);
12025 const_op = INTVAL (op1);
12027 /* Compute some predicates to simplify code below. */
12029 equality_comparison_p = (code == EQ || code == NE);
12030 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12031 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12032 || code == GEU);
12034 /* If this is a sign bit comparison and we can do arithmetic in
12035 MODE, say that we will only be needing the sign bit of OP0. */
12036 if (sign_bit_comparison_p
12037 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12038 && HWI_COMPUTABLE_MODE_P (int_mode))
12039 op0 = force_to_mode (op0, int_mode,
12040 HOST_WIDE_INT_1U
12041 << (GET_MODE_PRECISION (int_mode) - 1),
12044 if (COMPARISON_P (op0))
12046 /* We can't do anything if OP0 is a condition code value, rather
12047 than an actual data value. */
12048 if (const_op != 0
12049 || CC0_P (XEXP (op0, 0))
12050 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12051 break;
12053 /* Get the two operands being compared. */
12054 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12055 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12056 else
12057 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12059 /* Check for the cases where we simply want the result of the
12060 earlier test or the opposite of that result. */
12061 if (code == NE || code == EQ
12062 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12063 && (code == LT || code == GE)))
12065 enum rtx_code new_code;
12066 if (code == LT || code == NE)
12067 new_code = GET_CODE (op0);
12068 else
12069 new_code = reversed_comparison_code (op0, NULL);
12071 if (new_code != UNKNOWN)
12073 code = new_code;
12074 op0 = tem;
12075 op1 = tem1;
12076 continue;
12079 break;
12082 if (raw_mode == VOIDmode)
12083 break;
12084 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12086 /* Now try cases based on the opcode of OP0. If none of the cases
12087 does a "continue", we exit this loop immediately after the
12088 switch. */
12090 unsigned int mode_width = GET_MODE_PRECISION (mode);
12091 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12092 switch (GET_CODE (op0))
12094 case ZERO_EXTRACT:
12095 /* If we are extracting a single bit from a variable position in
12096 a constant that has only a single bit set and are comparing it
12097 with zero, we can convert this into an equality comparison
12098 between the position and the location of the single bit. */
12099 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12100 have already reduced the shift count modulo the word size. */
12101 if (!SHIFT_COUNT_TRUNCATED
12102 && CONST_INT_P (XEXP (op0, 0))
12103 && XEXP (op0, 1) == const1_rtx
12104 && equality_comparison_p && const_op == 0
12105 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12107 if (BITS_BIG_ENDIAN)
12108 i = BITS_PER_WORD - 1 - i;
12110 op0 = XEXP (op0, 2);
12111 op1 = GEN_INT (i);
12112 const_op = i;
12114 /* Result is nonzero iff shift count is equal to I. */
12115 code = reverse_condition (code);
12116 continue;
12119 /* fall through */
12121 case SIGN_EXTRACT:
12122 tem = expand_compound_operation (op0);
12123 if (tem != op0)
12125 op0 = tem;
12126 continue;
12128 break;
12130 case NOT:
12131 /* If testing for equality, we can take the NOT of the constant. */
12132 if (equality_comparison_p
12133 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12135 op0 = XEXP (op0, 0);
12136 op1 = tem;
12137 continue;
12140 /* If just looking at the sign bit, reverse the sense of the
12141 comparison. */
12142 if (sign_bit_comparison_p)
12144 op0 = XEXP (op0, 0);
12145 code = (code == GE ? LT : GE);
12146 continue;
12148 break;
12150 case NEG:
12151 /* If testing for equality, we can take the NEG of the constant. */
12152 if (equality_comparison_p
12153 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12155 op0 = XEXP (op0, 0);
12156 op1 = tem;
12157 continue;
12160 /* The remaining cases only apply to comparisons with zero. */
12161 if (const_op != 0)
12162 break;
12164 /* When X is ABS or is known positive,
12165 (neg X) is < 0 if and only if X != 0. */
12167 if (sign_bit_comparison_p
12168 && (GET_CODE (XEXP (op0, 0)) == ABS
12169 || (mode_width <= HOST_BITS_PER_WIDE_INT
12170 && (nonzero_bits (XEXP (op0, 0), mode)
12171 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12172 == 0)))
12174 op0 = XEXP (op0, 0);
12175 code = (code == LT ? NE : EQ);
12176 continue;
12179 /* If we have NEG of something whose two high-order bits are the
12180 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12181 if (num_sign_bit_copies (op0, mode) >= 2)
12183 op0 = XEXP (op0, 0);
12184 code = swap_condition (code);
12185 continue;
12187 break;
12189 case ROTATE:
12190 /* If we are testing equality and our count is a constant, we
12191 can perform the inverse operation on our RHS. */
12192 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12193 && (tem = simplify_binary_operation (ROTATERT, mode,
12194 op1, XEXP (op0, 1))) != 0)
12196 op0 = XEXP (op0, 0);
12197 op1 = tem;
12198 continue;
12201 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12202 a particular bit. Convert it to an AND of a constant of that
12203 bit. This will be converted into a ZERO_EXTRACT. */
12204 if (const_op == 0 && sign_bit_comparison_p
12205 && CONST_INT_P (XEXP (op0, 1))
12206 && mode_width <= HOST_BITS_PER_WIDE_INT)
12208 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12209 (HOST_WIDE_INT_1U
12210 << (mode_width - 1
12211 - INTVAL (XEXP (op0, 1)))));
12212 code = (code == LT ? NE : EQ);
12213 continue;
12216 /* Fall through. */
12218 case ABS:
12219 /* ABS is ignorable inside an equality comparison with zero. */
12220 if (const_op == 0 && equality_comparison_p)
12222 op0 = XEXP (op0, 0);
12223 continue;
12225 break;
12227 case SIGN_EXTEND:
12228 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12229 (compare FOO CONST) if CONST fits in FOO's mode and we
12230 are either testing inequality or have an unsigned
12231 comparison with ZERO_EXTEND or a signed comparison with
12232 SIGN_EXTEND. But don't do it if we don't have a compare
12233 insn of the given mode, since we'd have to revert it
12234 later on, and then we wouldn't know whether to sign- or
12235 zero-extend. */
12236 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12237 && ! unsigned_comparison_p
12238 && HWI_COMPUTABLE_MODE_P (mode)
12239 && trunc_int_for_mode (const_op, mode) == const_op
12240 && have_insn_for (COMPARE, mode))
12242 op0 = XEXP (op0, 0);
12243 continue;
12245 break;
12247 case SUBREG:
12248 /* Check for the case where we are comparing A - C1 with C2, that is
12250 (subreg:MODE (plus (A) (-C1))) op (C2)
12252 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12253 comparison in the wider mode. One of the following two conditions
12254 must be true in order for this to be valid:
12256 1. The mode extension results in the same bit pattern being added
12257 on both sides and the comparison is equality or unsigned. As
12258 C2 has been truncated to fit in MODE, the pattern can only be
12259 all 0s or all 1s.
12261 2. The mode extension results in the sign bit being copied on
12262 each side.
12264 The difficulty here is that we have predicates for A but not for
12265 (A - C1) so we need to check that C1 is within proper bounds so
12266 as to perturbate A as little as possible. */
12268 if (mode_width <= HOST_BITS_PER_WIDE_INT
12269 && subreg_lowpart_p (op0)
12270 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12271 &inner_mode)
12272 && GET_MODE_PRECISION (inner_mode) > mode_width
12273 && GET_CODE (SUBREG_REG (op0)) == PLUS
12274 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12276 rtx a = XEXP (SUBREG_REG (op0), 0);
12277 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12279 if ((c1 > 0
12280 && (unsigned HOST_WIDE_INT) c1
12281 < HOST_WIDE_INT_1U << (mode_width - 1)
12282 && (equality_comparison_p || unsigned_comparison_p)
12283 /* (A - C1) zero-extends if it is positive and sign-extends
12284 if it is negative, C2 both zero- and sign-extends. */
12285 && ((0 == (nonzero_bits (a, inner_mode)
12286 & ~GET_MODE_MASK (mode))
12287 && const_op >= 0)
12288 /* (A - C1) sign-extends if it is positive and 1-extends
12289 if it is negative, C2 both sign- and 1-extends. */
12290 || (num_sign_bit_copies (a, inner_mode)
12291 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12292 - mode_width)
12293 && const_op < 0)))
12294 || ((unsigned HOST_WIDE_INT) c1
12295 < HOST_WIDE_INT_1U << (mode_width - 2)
12296 /* (A - C1) always sign-extends, like C2. */
12297 && num_sign_bit_copies (a, inner_mode)
12298 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12299 - (mode_width - 1))))
12301 op0 = SUBREG_REG (op0);
12302 continue;
12306 /* If the inner mode is narrower and we are extracting the low part,
12307 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12308 if (paradoxical_subreg_p (op0))
12310 else if (subreg_lowpart_p (op0)
12311 && GET_MODE_CLASS (mode) == MODE_INT
12312 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12313 && (code == NE || code == EQ)
12314 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12315 && !paradoxical_subreg_p (op0)
12316 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12317 & ~GET_MODE_MASK (mode)) == 0)
12319 /* Remove outer subregs that don't do anything. */
12320 tem = gen_lowpart (inner_mode, op1);
12322 if ((nonzero_bits (tem, inner_mode)
12323 & ~GET_MODE_MASK (mode)) == 0)
12325 op0 = SUBREG_REG (op0);
12326 op1 = tem;
12327 continue;
12329 break;
12331 else
12332 break;
12334 /* FALLTHROUGH */
12336 case ZERO_EXTEND:
12337 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12338 && (unsigned_comparison_p || equality_comparison_p)
12339 && HWI_COMPUTABLE_MODE_P (mode)
12340 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12341 && const_op >= 0
12342 && have_insn_for (COMPARE, mode))
12344 op0 = XEXP (op0, 0);
12345 continue;
12347 break;
12349 case PLUS:
12350 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12351 this for equality comparisons due to pathological cases involving
12352 overflows. */
12353 if (equality_comparison_p
12354 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12355 op1, XEXP (op0, 1))))
12357 op0 = XEXP (op0, 0);
12358 op1 = tem;
12359 continue;
12362 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12363 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12364 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12366 op0 = XEXP (XEXP (op0, 0), 0);
12367 code = (code == LT ? EQ : NE);
12368 continue;
12370 break;
12372 case MINUS:
12373 /* We used to optimize signed comparisons against zero, but that
12374 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12375 arrive here as equality comparisons, or (GEU, LTU) are
12376 optimized away. No need to special-case them. */
12378 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12379 (eq B (minus A C)), whichever simplifies. We can only do
12380 this for equality comparisons due to pathological cases involving
12381 overflows. */
12382 if (equality_comparison_p
12383 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12384 XEXP (op0, 1), op1)))
12386 op0 = XEXP (op0, 0);
12387 op1 = tem;
12388 continue;
12391 if (equality_comparison_p
12392 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12393 XEXP (op0, 0), op1)))
12395 op0 = XEXP (op0, 1);
12396 op1 = tem;
12397 continue;
12400 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12401 of bits in X minus 1, is one iff X > 0. */
12402 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12403 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12404 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12405 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12407 op0 = XEXP (op0, 1);
12408 code = (code == GE ? LE : GT);
12409 continue;
12411 break;
12413 case XOR:
12414 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12415 if C is zero or B is a constant. */
12416 if (equality_comparison_p
12417 && 0 != (tem = simplify_binary_operation (XOR, mode,
12418 XEXP (op0, 1), op1)))
12420 op0 = XEXP (op0, 0);
12421 op1 = tem;
12422 continue;
12424 break;
12427 case IOR:
12428 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12429 iff X <= 0. */
12430 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12431 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12432 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12434 op0 = XEXP (op0, 1);
12435 code = (code == GE ? GT : LE);
12436 continue;
12438 break;
12440 case AND:
12441 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12442 will be converted to a ZERO_EXTRACT later. */
12443 if (const_op == 0 && equality_comparison_p
12444 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12445 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12447 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12448 XEXP (XEXP (op0, 0), 1));
12449 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12450 continue;
12453 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12454 zero and X is a comparison and C1 and C2 describe only bits set
12455 in STORE_FLAG_VALUE, we can compare with X. */
12456 if (const_op == 0 && equality_comparison_p
12457 && mode_width <= HOST_BITS_PER_WIDE_INT
12458 && CONST_INT_P (XEXP (op0, 1))
12459 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12460 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12461 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12462 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12464 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12465 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12466 if ((~STORE_FLAG_VALUE & mask) == 0
12467 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12468 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12469 && COMPARISON_P (tem))))
12471 op0 = XEXP (XEXP (op0, 0), 0);
12472 continue;
12476 /* If we are doing an equality comparison of an AND of a bit equal
12477 to the sign bit, replace this with a LT or GE comparison of
12478 the underlying value. */
12479 if (equality_comparison_p
12480 && const_op == 0
12481 && CONST_INT_P (XEXP (op0, 1))
12482 && mode_width <= HOST_BITS_PER_WIDE_INT
12483 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12484 == HOST_WIDE_INT_1U << (mode_width - 1)))
12486 op0 = XEXP (op0, 0);
12487 code = (code == EQ ? GE : LT);
12488 continue;
12491 /* If this AND operation is really a ZERO_EXTEND from a narrower
12492 mode, the constant fits within that mode, and this is either an
12493 equality or unsigned comparison, try to do this comparison in
12494 the narrower mode.
12496 Note that in:
12498 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12499 -> (ne:DI (reg:SI 4) (const_int 0))
12501 unless TRULY_NOOP_TRUNCATION allows it or the register is
12502 known to hold a value of the required mode the
12503 transformation is invalid. */
12504 if ((equality_comparison_p || unsigned_comparison_p)
12505 && CONST_INT_P (XEXP (op0, 1))
12506 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12507 & GET_MODE_MASK (mode))
12508 + 1)) >= 0
12509 && const_op >> i == 0
12510 && int_mode_for_size (i, 1).exists (&tmode))
12512 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12513 continue;
12516 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12517 fits in both M1 and M2 and the SUBREG is either paradoxical
12518 or represents the low part, permute the SUBREG and the AND
12519 and try again. */
12520 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12521 && CONST_INT_P (XEXP (op0, 1)))
12523 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12524 /* Require an integral mode, to avoid creating something like
12525 (AND:SF ...). */
12526 if ((is_a <scalar_int_mode>
12527 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12528 /* It is unsafe to commute the AND into the SUBREG if the
12529 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12530 not defined. As originally written the upper bits
12531 have a defined value due to the AND operation.
12532 However, if we commute the AND inside the SUBREG then
12533 they no longer have defined values and the meaning of
12534 the code has been changed.
12535 Also C1 should not change value in the smaller mode,
12536 see PR67028 (a positive C1 can become negative in the
12537 smaller mode, so that the AND does no longer mask the
12538 upper bits). */
12539 && ((WORD_REGISTER_OPERATIONS
12540 && mode_width > GET_MODE_PRECISION (tmode)
12541 && mode_width <= BITS_PER_WORD
12542 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12543 || (mode_width <= GET_MODE_PRECISION (tmode)
12544 && subreg_lowpart_p (XEXP (op0, 0))))
12545 && mode_width <= HOST_BITS_PER_WIDE_INT
12546 && HWI_COMPUTABLE_MODE_P (tmode)
12547 && (c1 & ~mask) == 0
12548 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12549 && c1 != mask
12550 && c1 != GET_MODE_MASK (tmode))
12552 op0 = simplify_gen_binary (AND, tmode,
12553 SUBREG_REG (XEXP (op0, 0)),
12554 gen_int_mode (c1, tmode));
12555 op0 = gen_lowpart (mode, op0);
12556 continue;
12560 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12561 if (const_op == 0 && equality_comparison_p
12562 && XEXP (op0, 1) == const1_rtx
12563 && GET_CODE (XEXP (op0, 0)) == NOT)
12565 op0 = simplify_and_const_int (NULL_RTX, mode,
12566 XEXP (XEXP (op0, 0), 0), 1);
12567 code = (code == NE ? EQ : NE);
12568 continue;
12571 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12572 (eq (and (lshiftrt X) 1) 0).
12573 Also handle the case where (not X) is expressed using xor. */
12574 if (const_op == 0 && equality_comparison_p
12575 && XEXP (op0, 1) == const1_rtx
12576 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12578 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12579 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12581 if (GET_CODE (shift_op) == NOT
12582 || (GET_CODE (shift_op) == XOR
12583 && CONST_INT_P (XEXP (shift_op, 1))
12584 && CONST_INT_P (shift_count)
12585 && HWI_COMPUTABLE_MODE_P (mode)
12586 && (UINTVAL (XEXP (shift_op, 1))
12587 == HOST_WIDE_INT_1U
12588 << INTVAL (shift_count))))
12591 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12592 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12593 code = (code == NE ? EQ : NE);
12594 continue;
12597 break;
12599 case ASHIFT:
12600 /* If we have (compare (ashift FOO N) (const_int C)) and
12601 the high order N bits of FOO (N+1 if an inequality comparison)
12602 are known to be zero, we can do this by comparing FOO with C
12603 shifted right N bits so long as the low-order N bits of C are
12604 zero. */
12605 if (CONST_INT_P (XEXP (op0, 1))
12606 && INTVAL (XEXP (op0, 1)) >= 0
12607 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12608 < HOST_BITS_PER_WIDE_INT)
12609 && (((unsigned HOST_WIDE_INT) const_op
12610 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12611 - 1)) == 0)
12612 && mode_width <= HOST_BITS_PER_WIDE_INT
12613 && (nonzero_bits (XEXP (op0, 0), mode)
12614 & ~(mask >> (INTVAL (XEXP (op0, 1))
12615 + ! equality_comparison_p))) == 0)
12617 /* We must perform a logical shift, not an arithmetic one,
12618 as we want the top N bits of C to be zero. */
12619 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12621 temp >>= INTVAL (XEXP (op0, 1));
12622 op1 = gen_int_mode (temp, mode);
12623 op0 = XEXP (op0, 0);
12624 continue;
12627 /* If we are doing a sign bit comparison, it means we are testing
12628 a particular bit. Convert it to the appropriate AND. */
12629 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12630 && mode_width <= HOST_BITS_PER_WIDE_INT)
12632 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12633 (HOST_WIDE_INT_1U
12634 << (mode_width - 1
12635 - INTVAL (XEXP (op0, 1)))));
12636 code = (code == LT ? NE : EQ);
12637 continue;
12640 /* If this an equality comparison with zero and we are shifting
12641 the low bit to the sign bit, we can convert this to an AND of the
12642 low-order bit. */
12643 if (const_op == 0 && equality_comparison_p
12644 && CONST_INT_P (XEXP (op0, 1))
12645 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12647 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12648 continue;
12650 break;
12652 case ASHIFTRT:
12653 /* If this is an equality comparison with zero, we can do this
12654 as a logical shift, which might be much simpler. */
12655 if (equality_comparison_p && const_op == 0
12656 && CONST_INT_P (XEXP (op0, 1)))
12658 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12659 XEXP (op0, 0),
12660 INTVAL (XEXP (op0, 1)));
12661 continue;
12664 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12665 do the comparison in a narrower mode. */
12666 if (! unsigned_comparison_p
12667 && CONST_INT_P (XEXP (op0, 1))
12668 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12669 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12670 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12671 .exists (&tmode))
12672 && (((unsigned HOST_WIDE_INT) const_op
12673 + (GET_MODE_MASK (tmode) >> 1) + 1)
12674 <= GET_MODE_MASK (tmode)))
12676 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12677 continue;
12680 /* Likewise if OP0 is a PLUS of a sign extension with a
12681 constant, which is usually represented with the PLUS
12682 between the shifts. */
12683 if (! unsigned_comparison_p
12684 && CONST_INT_P (XEXP (op0, 1))
12685 && GET_CODE (XEXP (op0, 0)) == PLUS
12686 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12687 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12688 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12689 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12690 .exists (&tmode))
12691 && (((unsigned HOST_WIDE_INT) const_op
12692 + (GET_MODE_MASK (tmode) >> 1) + 1)
12693 <= GET_MODE_MASK (tmode)))
12695 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12696 rtx add_const = XEXP (XEXP (op0, 0), 1);
12697 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12698 add_const, XEXP (op0, 1));
12700 op0 = simplify_gen_binary (PLUS, tmode,
12701 gen_lowpart (tmode, inner),
12702 new_const);
12703 continue;
12706 /* FALLTHROUGH */
12707 case LSHIFTRT:
12708 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12709 the low order N bits of FOO are known to be zero, we can do this
12710 by comparing FOO with C shifted left N bits so long as no
12711 overflow occurs. Even if the low order N bits of FOO aren't known
12712 to be zero, if the comparison is >= or < we can use the same
12713 optimization and for > or <= by setting all the low
12714 order N bits in the comparison constant. */
12715 if (CONST_INT_P (XEXP (op0, 1))
12716 && INTVAL (XEXP (op0, 1)) > 0
12717 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12718 && mode_width <= HOST_BITS_PER_WIDE_INT
12719 && (((unsigned HOST_WIDE_INT) const_op
12720 + (GET_CODE (op0) != LSHIFTRT
12721 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12722 + 1)
12723 : 0))
12724 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12726 unsigned HOST_WIDE_INT low_bits
12727 = (nonzero_bits (XEXP (op0, 0), mode)
12728 & ((HOST_WIDE_INT_1U
12729 << INTVAL (XEXP (op0, 1))) - 1));
12730 if (low_bits == 0 || !equality_comparison_p)
12732 /* If the shift was logical, then we must make the condition
12733 unsigned. */
12734 if (GET_CODE (op0) == LSHIFTRT)
12735 code = unsigned_condition (code);
12737 const_op = (unsigned HOST_WIDE_INT) const_op
12738 << INTVAL (XEXP (op0, 1));
12739 if (low_bits != 0
12740 && (code == GT || code == GTU
12741 || code == LE || code == LEU))
12742 const_op
12743 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12744 op1 = GEN_INT (const_op);
12745 op0 = XEXP (op0, 0);
12746 continue;
12750 /* If we are using this shift to extract just the sign bit, we
12751 can replace this with an LT or GE comparison. */
12752 if (const_op == 0
12753 && (equality_comparison_p || sign_bit_comparison_p)
12754 && CONST_INT_P (XEXP (op0, 1))
12755 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12757 op0 = XEXP (op0, 0);
12758 code = (code == NE || code == GT ? LT : GE);
12759 continue;
12761 break;
12763 default:
12764 break;
12767 break;
12770 /* Now make any compound operations involved in this comparison. Then,
12771 check for an outmost SUBREG on OP0 that is not doing anything or is
12772 paradoxical. The latter transformation must only be performed when
12773 it is known that the "extra" bits will be the same in op0 and op1 or
12774 that they don't matter. There are three cases to consider:
12776 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12777 care bits and we can assume they have any convenient value. So
12778 making the transformation is safe.
12780 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12781 In this case the upper bits of op0 are undefined. We should not make
12782 the simplification in that case as we do not know the contents of
12783 those bits.
12785 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12786 In that case we know those bits are zeros or ones. We must also be
12787 sure that they are the same as the upper bits of op1.
12789 We can never remove a SUBREG for a non-equality comparison because
12790 the sign bit is in a different place in the underlying object. */
12792 rtx_code op0_mco_code = SET;
12793 if (op1 == const0_rtx)
12794 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12796 op0 = make_compound_operation (op0, op0_mco_code);
12797 op1 = make_compound_operation (op1, SET);
12799 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12800 && is_int_mode (GET_MODE (op0), &mode)
12801 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12802 && (code == NE || code == EQ))
12804 if (paradoxical_subreg_p (op0))
12806 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12807 implemented. */
12808 if (REG_P (SUBREG_REG (op0)))
12810 op0 = SUBREG_REG (op0);
12811 op1 = gen_lowpart (inner_mode, op1);
12814 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12815 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12816 & ~GET_MODE_MASK (mode)) == 0)
12818 tem = gen_lowpart (inner_mode, op1);
12820 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12821 op0 = SUBREG_REG (op0), op1 = tem;
12825 /* We now do the opposite procedure: Some machines don't have compare
12826 insns in all modes. If OP0's mode is an integer mode smaller than a
12827 word and we can't do a compare in that mode, see if there is a larger
12828 mode for which we can do the compare. There are a number of cases in
12829 which we can use the wider mode. */
12831 if (is_int_mode (GET_MODE (op0), &mode)
12832 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12833 && ! have_insn_for (COMPARE, mode))
12834 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12836 tmode = tmode_iter.require ();
12837 if (!HWI_COMPUTABLE_MODE_P (tmode))
12838 break;
12839 if (have_insn_for (COMPARE, tmode))
12841 int zero_extended;
12843 /* If this is a test for negative, we can make an explicit
12844 test of the sign bit. Test this first so we can use
12845 a paradoxical subreg to extend OP0. */
12847 if (op1 == const0_rtx && (code == LT || code == GE)
12848 && HWI_COMPUTABLE_MODE_P (mode))
12850 unsigned HOST_WIDE_INT sign
12851 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12852 op0 = simplify_gen_binary (AND, tmode,
12853 gen_lowpart (tmode, op0),
12854 gen_int_mode (sign, tmode));
12855 code = (code == LT) ? NE : EQ;
12856 break;
12859 /* If the only nonzero bits in OP0 and OP1 are those in the
12860 narrower mode and this is an equality or unsigned comparison,
12861 we can use the wider mode. Similarly for sign-extended
12862 values, in which case it is true for all comparisons. */
12863 zero_extended = ((code == EQ || code == NE
12864 || code == GEU || code == GTU
12865 || code == LEU || code == LTU)
12866 && (nonzero_bits (op0, tmode)
12867 & ~GET_MODE_MASK (mode)) == 0
12868 && ((CONST_INT_P (op1)
12869 || (nonzero_bits (op1, tmode)
12870 & ~GET_MODE_MASK (mode)) == 0)));
12872 if (zero_extended
12873 || ((num_sign_bit_copies (op0, tmode)
12874 > (unsigned int) (GET_MODE_PRECISION (tmode)
12875 - GET_MODE_PRECISION (mode)))
12876 && (num_sign_bit_copies (op1, tmode)
12877 > (unsigned int) (GET_MODE_PRECISION (tmode)
12878 - GET_MODE_PRECISION (mode)))))
12880 /* If OP0 is an AND and we don't have an AND in MODE either,
12881 make a new AND in the proper mode. */
12882 if (GET_CODE (op0) == AND
12883 && !have_insn_for (AND, mode))
12884 op0 = simplify_gen_binary (AND, tmode,
12885 gen_lowpart (tmode,
12886 XEXP (op0, 0)),
12887 gen_lowpart (tmode,
12888 XEXP (op0, 1)));
12889 else
12891 if (zero_extended)
12893 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12894 op0, mode);
12895 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12896 op1, mode);
12898 else
12900 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12901 op0, mode);
12902 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12903 op1, mode);
12905 break;
12911 /* We may have changed the comparison operands. Re-canonicalize. */
12912 if (swap_commutative_operands_p (op0, op1))
12914 std::swap (op0, op1);
12915 code = swap_condition (code);
12918 /* If this machine only supports a subset of valid comparisons, see if we
12919 can convert an unsupported one into a supported one. */
12920 target_canonicalize_comparison (&code, &op0, &op1, 0);
12922 *pop0 = op0;
12923 *pop1 = op1;
12925 return code;
12928 /* Utility function for record_value_for_reg. Count number of
12929 rtxs in X. */
12930 static int
12931 count_rtxs (rtx x)
12933 enum rtx_code code = GET_CODE (x);
12934 const char *fmt;
12935 int i, j, ret = 1;
12937 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12938 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12940 rtx x0 = XEXP (x, 0);
12941 rtx x1 = XEXP (x, 1);
12943 if (x0 == x1)
12944 return 1 + 2 * count_rtxs (x0);
12946 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12947 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12948 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12949 return 2 + 2 * count_rtxs (x0)
12950 + count_rtxs (x == XEXP (x1, 0)
12951 ? XEXP (x1, 1) : XEXP (x1, 0));
12953 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12954 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12955 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12956 return 2 + 2 * count_rtxs (x1)
12957 + count_rtxs (x == XEXP (x0, 0)
12958 ? XEXP (x0, 1) : XEXP (x0, 0));
12961 fmt = GET_RTX_FORMAT (code);
12962 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12963 if (fmt[i] == 'e')
12964 ret += count_rtxs (XEXP (x, i));
12965 else if (fmt[i] == 'E')
12966 for (j = 0; j < XVECLEN (x, i); j++)
12967 ret += count_rtxs (XVECEXP (x, i, j));
12969 return ret;
12972 /* Utility function for following routine. Called when X is part of a value
12973 being stored into last_set_value. Sets last_set_table_tick
12974 for each register mentioned. Similar to mention_regs in cse.c */
12976 static void
12977 update_table_tick (rtx x)
12979 enum rtx_code code = GET_CODE (x);
12980 const char *fmt = GET_RTX_FORMAT (code);
12981 int i, j;
12983 if (code == REG)
12985 unsigned int regno = REGNO (x);
12986 unsigned int endregno = END_REGNO (x);
12987 unsigned int r;
12989 for (r = regno; r < endregno; r++)
12991 reg_stat_type *rsp = &reg_stat[r];
12992 rsp->last_set_table_tick = label_tick;
12995 return;
12998 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12999 if (fmt[i] == 'e')
13001 /* Check for identical subexpressions. If x contains
13002 identical subexpression we only have to traverse one of
13003 them. */
13004 if (i == 0 && ARITHMETIC_P (x))
13006 /* Note that at this point x1 has already been
13007 processed. */
13008 rtx x0 = XEXP (x, 0);
13009 rtx x1 = XEXP (x, 1);
13011 /* If x0 and x1 are identical then there is no need to
13012 process x0. */
13013 if (x0 == x1)
13014 break;
13016 /* If x0 is identical to a subexpression of x1 then while
13017 processing x1, x0 has already been processed. Thus we
13018 are done with x. */
13019 if (ARITHMETIC_P (x1)
13020 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13021 break;
13023 /* If x1 is identical to a subexpression of x0 then we
13024 still have to process the rest of x0. */
13025 if (ARITHMETIC_P (x0)
13026 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13028 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13029 break;
13033 update_table_tick (XEXP (x, i));
13035 else if (fmt[i] == 'E')
13036 for (j = 0; j < XVECLEN (x, i); j++)
13037 update_table_tick (XVECEXP (x, i, j));
13040 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13041 are saying that the register is clobbered and we no longer know its
13042 value. If INSN is zero, don't update reg_stat[].last_set; this is
13043 only permitted with VALUE also zero and is used to invalidate the
13044 register. */
13046 static void
13047 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13049 unsigned int regno = REGNO (reg);
13050 unsigned int endregno = END_REGNO (reg);
13051 unsigned int i;
13052 reg_stat_type *rsp;
13054 /* If VALUE contains REG and we have a previous value for REG, substitute
13055 the previous value. */
13056 if (value && insn && reg_overlap_mentioned_p (reg, value))
13058 rtx tem;
13060 /* Set things up so get_last_value is allowed to see anything set up to
13061 our insn. */
13062 subst_low_luid = DF_INSN_LUID (insn);
13063 tem = get_last_value (reg);
13065 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13066 it isn't going to be useful and will take a lot of time to process,
13067 so just use the CLOBBER. */
13069 if (tem)
13071 if (ARITHMETIC_P (tem)
13072 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13073 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13074 tem = XEXP (tem, 0);
13075 else if (count_occurrences (value, reg, 1) >= 2)
13077 /* If there are two or more occurrences of REG in VALUE,
13078 prevent the value from growing too much. */
13079 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13080 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13083 value = replace_rtx (copy_rtx (value), reg, tem);
13087 /* For each register modified, show we don't know its value, that
13088 we don't know about its bitwise content, that its value has been
13089 updated, and that we don't know the location of the death of the
13090 register. */
13091 for (i = regno; i < endregno; i++)
13093 rsp = &reg_stat[i];
13095 if (insn)
13096 rsp->last_set = insn;
13098 rsp->last_set_value = 0;
13099 rsp->last_set_mode = VOIDmode;
13100 rsp->last_set_nonzero_bits = 0;
13101 rsp->last_set_sign_bit_copies = 0;
13102 rsp->last_death = 0;
13103 rsp->truncated_to_mode = VOIDmode;
13106 /* Mark registers that are being referenced in this value. */
13107 if (value)
13108 update_table_tick (value);
13110 /* Now update the status of each register being set.
13111 If someone is using this register in this block, set this register
13112 to invalid since we will get confused between the two lives in this
13113 basic block. This makes using this register always invalid. In cse, we
13114 scan the table to invalidate all entries using this register, but this
13115 is too much work for us. */
13117 for (i = regno; i < endregno; i++)
13119 rsp = &reg_stat[i];
13120 rsp->last_set_label = label_tick;
13121 if (!insn
13122 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13123 rsp->last_set_invalid = 1;
13124 else
13125 rsp->last_set_invalid = 0;
13128 /* The value being assigned might refer to X (like in "x++;"). In that
13129 case, we must replace it with (clobber (const_int 0)) to prevent
13130 infinite loops. */
13131 rsp = &reg_stat[regno];
13132 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13134 value = copy_rtx (value);
13135 if (!get_last_value_validate (&value, insn, label_tick, 1))
13136 value = 0;
13139 /* For the main register being modified, update the value, the mode, the
13140 nonzero bits, and the number of sign bit copies. */
13142 rsp->last_set_value = value;
13144 if (value)
13146 machine_mode mode = GET_MODE (reg);
13147 subst_low_luid = DF_INSN_LUID (insn);
13148 rsp->last_set_mode = mode;
13149 if (GET_MODE_CLASS (mode) == MODE_INT
13150 && HWI_COMPUTABLE_MODE_P (mode))
13151 mode = nonzero_bits_mode;
13152 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13153 rsp->last_set_sign_bit_copies
13154 = num_sign_bit_copies (value, GET_MODE (reg));
13158 /* Called via note_stores from record_dead_and_set_regs to handle one
13159 SET or CLOBBER in an insn. DATA is the instruction in which the
13160 set is occurring. */
13162 static void
13163 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13165 rtx_insn *record_dead_insn = (rtx_insn *) data;
13167 if (GET_CODE (dest) == SUBREG)
13168 dest = SUBREG_REG (dest);
13170 if (!record_dead_insn)
13172 if (REG_P (dest))
13173 record_value_for_reg (dest, NULL, NULL_RTX);
13174 return;
13177 if (REG_P (dest))
13179 /* If we are setting the whole register, we know its value. Otherwise
13180 show that we don't know the value. We can handle SUBREG in
13181 some cases. */
13182 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13183 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13184 else if (GET_CODE (setter) == SET
13185 && GET_CODE (SET_DEST (setter)) == SUBREG
13186 && SUBREG_REG (SET_DEST (setter)) == dest
13187 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13188 && subreg_lowpart_p (SET_DEST (setter)))
13189 record_value_for_reg (dest, record_dead_insn,
13190 gen_lowpart (GET_MODE (dest),
13191 SET_SRC (setter)));
13192 else
13193 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13195 else if (MEM_P (dest)
13196 /* Ignore pushes, they clobber nothing. */
13197 && ! push_operand (dest, GET_MODE (dest)))
13198 mem_last_set = DF_INSN_LUID (record_dead_insn);
13201 /* Update the records of when each REG was most recently set or killed
13202 for the things done by INSN. This is the last thing done in processing
13203 INSN in the combiner loop.
13205 We update reg_stat[], in particular fields last_set, last_set_value,
13206 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13207 last_death, and also the similar information mem_last_set (which insn
13208 most recently modified memory) and last_call_luid (which insn was the
13209 most recent subroutine call). */
13211 static void
13212 record_dead_and_set_regs (rtx_insn *insn)
13214 rtx link;
13215 unsigned int i;
13217 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13219 if (REG_NOTE_KIND (link) == REG_DEAD
13220 && REG_P (XEXP (link, 0)))
13222 unsigned int regno = REGNO (XEXP (link, 0));
13223 unsigned int endregno = END_REGNO (XEXP (link, 0));
13225 for (i = regno; i < endregno; i++)
13227 reg_stat_type *rsp;
13229 rsp = &reg_stat[i];
13230 rsp->last_death = insn;
13233 else if (REG_NOTE_KIND (link) == REG_INC)
13234 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13237 if (CALL_P (insn))
13239 hard_reg_set_iterator hrsi;
13240 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13242 reg_stat_type *rsp;
13244 rsp = &reg_stat[i];
13245 rsp->last_set_invalid = 1;
13246 rsp->last_set = insn;
13247 rsp->last_set_value = 0;
13248 rsp->last_set_mode = VOIDmode;
13249 rsp->last_set_nonzero_bits = 0;
13250 rsp->last_set_sign_bit_copies = 0;
13251 rsp->last_death = 0;
13252 rsp->truncated_to_mode = VOIDmode;
13255 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13257 /* We can't combine into a call pattern. Remember, though, that
13258 the return value register is set at this LUID. We could
13259 still replace a register with the return value from the
13260 wrong subroutine call! */
13261 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13263 else
13264 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13267 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13268 register present in the SUBREG, so for each such SUBREG go back and
13269 adjust nonzero and sign bit information of the registers that are
13270 known to have some zero/sign bits set.
13272 This is needed because when combine blows the SUBREGs away, the
13273 information on zero/sign bits is lost and further combines can be
13274 missed because of that. */
13276 static void
13277 record_promoted_value (rtx_insn *insn, rtx subreg)
13279 struct insn_link *links;
13280 rtx set;
13281 unsigned int regno = REGNO (SUBREG_REG (subreg));
13282 machine_mode mode = GET_MODE (subreg);
13284 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13285 return;
13287 for (links = LOG_LINKS (insn); links;)
13289 reg_stat_type *rsp;
13291 insn = links->insn;
13292 set = single_set (insn);
13294 if (! set || !REG_P (SET_DEST (set))
13295 || REGNO (SET_DEST (set)) != regno
13296 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13298 links = links->next;
13299 continue;
13302 rsp = &reg_stat[regno];
13303 if (rsp->last_set == insn)
13305 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13306 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13309 if (REG_P (SET_SRC (set)))
13311 regno = REGNO (SET_SRC (set));
13312 links = LOG_LINKS (insn);
13314 else
13315 break;
13319 /* Check if X, a register, is known to contain a value already
13320 truncated to MODE. In this case we can use a subreg to refer to
13321 the truncated value even though in the generic case we would need
13322 an explicit truncation. */
13324 static bool
13325 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13327 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13328 machine_mode truncated = rsp->truncated_to_mode;
13330 if (truncated == 0
13331 || rsp->truncation_label < label_tick_ebb_start)
13332 return false;
13333 if (!partial_subreg_p (mode, truncated))
13334 return true;
13335 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13336 return true;
13337 return false;
13340 /* If X is a hard reg or a subreg record the mode that the register is
13341 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13342 to turn a truncate into a subreg using this information. Return true
13343 if traversing X is complete. */
13345 static bool
13346 record_truncated_value (rtx x)
13348 machine_mode truncated_mode;
13349 reg_stat_type *rsp;
13351 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13353 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13354 truncated_mode = GET_MODE (x);
13356 if (!partial_subreg_p (truncated_mode, original_mode))
13357 return true;
13359 truncated_mode = GET_MODE (x);
13360 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13361 return true;
13363 x = SUBREG_REG (x);
13365 /* ??? For hard-regs we now record everything. We might be able to
13366 optimize this using last_set_mode. */
13367 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13368 truncated_mode = GET_MODE (x);
13369 else
13370 return false;
13372 rsp = &reg_stat[REGNO (x)];
13373 if (rsp->truncated_to_mode == 0
13374 || rsp->truncation_label < label_tick_ebb_start
13375 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13377 rsp->truncated_to_mode = truncated_mode;
13378 rsp->truncation_label = label_tick;
13381 return true;
13384 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13385 the modes they are used in. This can help truning TRUNCATEs into
13386 SUBREGs. */
13388 static void
13389 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13391 subrtx_var_iterator::array_type array;
13392 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13393 if (record_truncated_value (*iter))
13394 iter.skip_subrtxes ();
13397 /* Scan X for promoted SUBREGs. For each one found,
13398 note what it implies to the registers used in it. */
13400 static void
13401 check_promoted_subreg (rtx_insn *insn, rtx x)
13403 if (GET_CODE (x) == SUBREG
13404 && SUBREG_PROMOTED_VAR_P (x)
13405 && REG_P (SUBREG_REG (x)))
13406 record_promoted_value (insn, x);
13407 else
13409 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13410 int i, j;
13412 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13413 switch (format[i])
13415 case 'e':
13416 check_promoted_subreg (insn, XEXP (x, i));
13417 break;
13418 case 'V':
13419 case 'E':
13420 if (XVEC (x, i) != 0)
13421 for (j = 0; j < XVECLEN (x, i); j++)
13422 check_promoted_subreg (insn, XVECEXP (x, i, j));
13423 break;
13428 /* Verify that all the registers and memory references mentioned in *LOC are
13429 still valid. *LOC was part of a value set in INSN when label_tick was
13430 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13431 the invalid references with (clobber (const_int 0)) and return 1. This
13432 replacement is useful because we often can get useful information about
13433 the form of a value (e.g., if it was produced by a shift that always
13434 produces -1 or 0) even though we don't know exactly what registers it
13435 was produced from. */
13437 static int
13438 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13440 rtx x = *loc;
13441 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13442 int len = GET_RTX_LENGTH (GET_CODE (x));
13443 int i, j;
13445 if (REG_P (x))
13447 unsigned int regno = REGNO (x);
13448 unsigned int endregno = END_REGNO (x);
13449 unsigned int j;
13451 for (j = regno; j < endregno; j++)
13453 reg_stat_type *rsp = &reg_stat[j];
13454 if (rsp->last_set_invalid
13455 /* If this is a pseudo-register that was only set once and not
13456 live at the beginning of the function, it is always valid. */
13457 || (! (regno >= FIRST_PSEUDO_REGISTER
13458 && regno < reg_n_sets_max
13459 && REG_N_SETS (regno) == 1
13460 && (!REGNO_REG_SET_P
13461 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13462 regno)))
13463 && rsp->last_set_label > tick))
13465 if (replace)
13466 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13467 return replace;
13471 return 1;
13473 /* If this is a memory reference, make sure that there were no stores after
13474 it that might have clobbered the value. We don't have alias info, so we
13475 assume any store invalidates it. Moreover, we only have local UIDs, so
13476 we also assume that there were stores in the intervening basic blocks. */
13477 else if (MEM_P (x) && !MEM_READONLY_P (x)
13478 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13480 if (replace)
13481 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13482 return replace;
13485 for (i = 0; i < len; i++)
13487 if (fmt[i] == 'e')
13489 /* Check for identical subexpressions. If x contains
13490 identical subexpression we only have to traverse one of
13491 them. */
13492 if (i == 1 && ARITHMETIC_P (x))
13494 /* Note that at this point x0 has already been checked
13495 and found valid. */
13496 rtx x0 = XEXP (x, 0);
13497 rtx x1 = XEXP (x, 1);
13499 /* If x0 and x1 are identical then x is also valid. */
13500 if (x0 == x1)
13501 return 1;
13503 /* If x1 is identical to a subexpression of x0 then
13504 while checking x0, x1 has already been checked. Thus
13505 it is valid and so as x. */
13506 if (ARITHMETIC_P (x0)
13507 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13508 return 1;
13510 /* If x0 is identical to a subexpression of x1 then x is
13511 valid iff the rest of x1 is valid. */
13512 if (ARITHMETIC_P (x1)
13513 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13514 return
13515 get_last_value_validate (&XEXP (x1,
13516 x0 == XEXP (x1, 0) ? 1 : 0),
13517 insn, tick, replace);
13520 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13521 replace) == 0)
13522 return 0;
13524 else if (fmt[i] == 'E')
13525 for (j = 0; j < XVECLEN (x, i); j++)
13526 if (get_last_value_validate (&XVECEXP (x, i, j),
13527 insn, tick, replace) == 0)
13528 return 0;
13531 /* If we haven't found a reason for it to be invalid, it is valid. */
13532 return 1;
13535 /* Get the last value assigned to X, if known. Some registers
13536 in the value may be replaced with (clobber (const_int 0)) if their value
13537 is known longer known reliably. */
13539 static rtx
13540 get_last_value (const_rtx x)
13542 unsigned int regno;
13543 rtx value;
13544 reg_stat_type *rsp;
13546 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13547 then convert it to the desired mode. If this is a paradoxical SUBREG,
13548 we cannot predict what values the "extra" bits might have. */
13549 if (GET_CODE (x) == SUBREG
13550 && subreg_lowpart_p (x)
13551 && !paradoxical_subreg_p (x)
13552 && (value = get_last_value (SUBREG_REG (x))) != 0)
13553 return gen_lowpart (GET_MODE (x), value);
13555 if (!REG_P (x))
13556 return 0;
13558 regno = REGNO (x);
13559 rsp = &reg_stat[regno];
13560 value = rsp->last_set_value;
13562 /* If we don't have a value, or if it isn't for this basic block and
13563 it's either a hard register, set more than once, or it's a live
13564 at the beginning of the function, return 0.
13566 Because if it's not live at the beginning of the function then the reg
13567 is always set before being used (is never used without being set).
13568 And, if it's set only once, and it's always set before use, then all
13569 uses must have the same last value, even if it's not from this basic
13570 block. */
13572 if (value == 0
13573 || (rsp->last_set_label < label_tick_ebb_start
13574 && (regno < FIRST_PSEUDO_REGISTER
13575 || regno >= reg_n_sets_max
13576 || REG_N_SETS (regno) != 1
13577 || REGNO_REG_SET_P
13578 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13579 return 0;
13581 /* If the value was set in a later insn than the ones we are processing,
13582 we can't use it even if the register was only set once. */
13583 if (rsp->last_set_label == label_tick
13584 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13585 return 0;
13587 /* If fewer bits were set than what we are asked for now, we cannot use
13588 the value. */
13589 if (GET_MODE_PRECISION (rsp->last_set_mode)
13590 < GET_MODE_PRECISION (GET_MODE (x)))
13591 return 0;
13593 /* If the value has all its registers valid, return it. */
13594 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13595 return value;
13597 /* Otherwise, make a copy and replace any invalid register with
13598 (clobber (const_int 0)). If that fails for some reason, return 0. */
13600 value = copy_rtx (value);
13601 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13602 return value;
13604 return 0;
13607 /* Return nonzero if expression X refers to a REG or to memory
13608 that is set in an instruction more recent than FROM_LUID. */
13610 static int
13611 use_crosses_set_p (const_rtx x, int from_luid)
13613 const char *fmt;
13614 int i;
13615 enum rtx_code code = GET_CODE (x);
13617 if (code == REG)
13619 unsigned int regno = REGNO (x);
13620 unsigned endreg = END_REGNO (x);
13622 #ifdef PUSH_ROUNDING
13623 /* Don't allow uses of the stack pointer to be moved,
13624 because we don't know whether the move crosses a push insn. */
13625 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13626 return 1;
13627 #endif
13628 for (; regno < endreg; regno++)
13630 reg_stat_type *rsp = &reg_stat[regno];
13631 if (rsp->last_set
13632 && rsp->last_set_label == label_tick
13633 && DF_INSN_LUID (rsp->last_set) > from_luid)
13634 return 1;
13636 return 0;
13639 if (code == MEM && mem_last_set > from_luid)
13640 return 1;
13642 fmt = GET_RTX_FORMAT (code);
13644 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13646 if (fmt[i] == 'E')
13648 int j;
13649 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13650 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13651 return 1;
13653 else if (fmt[i] == 'e'
13654 && use_crosses_set_p (XEXP (x, i), from_luid))
13655 return 1;
13657 return 0;
13660 /* Define three variables used for communication between the following
13661 routines. */
13663 static unsigned int reg_dead_regno, reg_dead_endregno;
13664 static int reg_dead_flag;
13666 /* Function called via note_stores from reg_dead_at_p.
13668 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13669 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13671 static void
13672 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13674 unsigned int regno, endregno;
13676 if (!REG_P (dest))
13677 return;
13679 regno = REGNO (dest);
13680 endregno = END_REGNO (dest);
13681 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13682 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13685 /* Return nonzero if REG is known to be dead at INSN.
13687 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13688 referencing REG, it is dead. If we hit a SET referencing REG, it is
13689 live. Otherwise, see if it is live or dead at the start of the basic
13690 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13691 must be assumed to be always live. */
13693 static int
13694 reg_dead_at_p (rtx reg, rtx_insn *insn)
13696 basic_block block;
13697 unsigned int i;
13699 /* Set variables for reg_dead_at_p_1. */
13700 reg_dead_regno = REGNO (reg);
13701 reg_dead_endregno = END_REGNO (reg);
13703 reg_dead_flag = 0;
13705 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13706 we allow the machine description to decide whether use-and-clobber
13707 patterns are OK. */
13708 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13710 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13711 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13712 return 0;
13715 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13716 beginning of basic block. */
13717 block = BLOCK_FOR_INSN (insn);
13718 for (;;)
13720 if (INSN_P (insn))
13722 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13723 return 1;
13725 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13726 if (reg_dead_flag)
13727 return reg_dead_flag == 1 ? 1 : 0;
13729 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13730 return 1;
13733 if (insn == BB_HEAD (block))
13734 break;
13736 insn = PREV_INSN (insn);
13739 /* Look at live-in sets for the basic block that we were in. */
13740 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13741 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13742 return 0;
13744 return 1;
13747 /* Note hard registers in X that are used. */
13749 static void
13750 mark_used_regs_combine (rtx x)
13752 RTX_CODE code = GET_CODE (x);
13753 unsigned int regno;
13754 int i;
13756 switch (code)
13758 case LABEL_REF:
13759 case SYMBOL_REF:
13760 case CONST:
13761 CASE_CONST_ANY:
13762 case PC:
13763 case ADDR_VEC:
13764 case ADDR_DIFF_VEC:
13765 case ASM_INPUT:
13766 /* CC0 must die in the insn after it is set, so we don't need to take
13767 special note of it here. */
13768 case CC0:
13769 return;
13771 case CLOBBER:
13772 /* If we are clobbering a MEM, mark any hard registers inside the
13773 address as used. */
13774 if (MEM_P (XEXP (x, 0)))
13775 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13776 return;
13778 case REG:
13779 regno = REGNO (x);
13780 /* A hard reg in a wide mode may really be multiple registers.
13781 If so, mark all of them just like the first. */
13782 if (regno < FIRST_PSEUDO_REGISTER)
13784 /* None of this applies to the stack, frame or arg pointers. */
13785 if (regno == STACK_POINTER_REGNUM
13786 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13787 && regno == HARD_FRAME_POINTER_REGNUM)
13788 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13789 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13790 || regno == FRAME_POINTER_REGNUM)
13791 return;
13793 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13795 return;
13797 case SET:
13799 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13800 the address. */
13801 rtx testreg = SET_DEST (x);
13803 while (GET_CODE (testreg) == SUBREG
13804 || GET_CODE (testreg) == ZERO_EXTRACT
13805 || GET_CODE (testreg) == STRICT_LOW_PART)
13806 testreg = XEXP (testreg, 0);
13808 if (MEM_P (testreg))
13809 mark_used_regs_combine (XEXP (testreg, 0));
13811 mark_used_regs_combine (SET_SRC (x));
13813 return;
13815 default:
13816 break;
13819 /* Recursively scan the operands of this expression. */
13822 const char *fmt = GET_RTX_FORMAT (code);
13824 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13826 if (fmt[i] == 'e')
13827 mark_used_regs_combine (XEXP (x, i));
13828 else if (fmt[i] == 'E')
13830 int j;
13832 for (j = 0; j < XVECLEN (x, i); j++)
13833 mark_used_regs_combine (XVECEXP (x, i, j));
13839 /* Remove register number REGNO from the dead registers list of INSN.
13841 Return the note used to record the death, if there was one. */
13844 remove_death (unsigned int regno, rtx_insn *insn)
13846 rtx note = find_regno_note (insn, REG_DEAD, regno);
13848 if (note)
13849 remove_note (insn, note);
13851 return note;
13854 /* For each register (hardware or pseudo) used within expression X, if its
13855 death is in an instruction with luid between FROM_LUID (inclusive) and
13856 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13857 list headed by PNOTES.
13859 That said, don't move registers killed by maybe_kill_insn.
13861 This is done when X is being merged by combination into TO_INSN. These
13862 notes will then be distributed as needed. */
13864 static void
13865 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13866 rtx *pnotes)
13868 const char *fmt;
13869 int len, i;
13870 enum rtx_code code = GET_CODE (x);
13872 if (code == REG)
13874 unsigned int regno = REGNO (x);
13875 rtx_insn *where_dead = reg_stat[regno].last_death;
13877 /* Don't move the register if it gets killed in between from and to. */
13878 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13879 && ! reg_referenced_p (x, maybe_kill_insn))
13880 return;
13882 if (where_dead
13883 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13884 && DF_INSN_LUID (where_dead) >= from_luid
13885 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13887 rtx note = remove_death (regno, where_dead);
13889 /* It is possible for the call above to return 0. This can occur
13890 when last_death points to I2 or I1 that we combined with.
13891 In that case make a new note.
13893 We must also check for the case where X is a hard register
13894 and NOTE is a death note for a range of hard registers
13895 including X. In that case, we must put REG_DEAD notes for
13896 the remaining registers in place of NOTE. */
13898 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13899 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13901 unsigned int deadregno = REGNO (XEXP (note, 0));
13902 unsigned int deadend = END_REGNO (XEXP (note, 0));
13903 unsigned int ourend = END_REGNO (x);
13904 unsigned int i;
13906 for (i = deadregno; i < deadend; i++)
13907 if (i < regno || i >= ourend)
13908 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13911 /* If we didn't find any note, or if we found a REG_DEAD note that
13912 covers only part of the given reg, and we have a multi-reg hard
13913 register, then to be safe we must check for REG_DEAD notes
13914 for each register other than the first. They could have
13915 their own REG_DEAD notes lying around. */
13916 else if ((note == 0
13917 || (note != 0
13918 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13919 GET_MODE (x))))
13920 && regno < FIRST_PSEUDO_REGISTER
13921 && REG_NREGS (x) > 1)
13923 unsigned int ourend = END_REGNO (x);
13924 unsigned int i, offset;
13925 rtx oldnotes = 0;
13927 if (note)
13928 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13929 else
13930 offset = 1;
13932 for (i = regno + offset; i < ourend; i++)
13933 move_deaths (regno_reg_rtx[i],
13934 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13937 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13939 XEXP (note, 1) = *pnotes;
13940 *pnotes = note;
13942 else
13943 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13946 return;
13949 else if (GET_CODE (x) == SET)
13951 rtx dest = SET_DEST (x);
13953 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13955 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13956 that accesses one word of a multi-word item, some
13957 piece of everything register in the expression is used by
13958 this insn, so remove any old death. */
13959 /* ??? So why do we test for equality of the sizes? */
13961 if (GET_CODE (dest) == ZERO_EXTRACT
13962 || GET_CODE (dest) == STRICT_LOW_PART
13963 || (GET_CODE (dest) == SUBREG
13964 && (((GET_MODE_SIZE (GET_MODE (dest))
13965 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13966 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13967 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13969 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13970 return;
13973 /* If this is some other SUBREG, we know it replaces the entire
13974 value, so use that as the destination. */
13975 if (GET_CODE (dest) == SUBREG)
13976 dest = SUBREG_REG (dest);
13978 /* If this is a MEM, adjust deaths of anything used in the address.
13979 For a REG (the only other possibility), the entire value is
13980 being replaced so the old value is not used in this insn. */
13982 if (MEM_P (dest))
13983 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13984 to_insn, pnotes);
13985 return;
13988 else if (GET_CODE (x) == CLOBBER)
13989 return;
13991 len = GET_RTX_LENGTH (code);
13992 fmt = GET_RTX_FORMAT (code);
13994 for (i = 0; i < len; i++)
13996 if (fmt[i] == 'E')
13998 int j;
13999 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14000 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14001 to_insn, pnotes);
14003 else if (fmt[i] == 'e')
14004 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14008 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14009 pattern of an insn. X must be a REG. */
14011 static int
14012 reg_bitfield_target_p (rtx x, rtx body)
14014 int i;
14016 if (GET_CODE (body) == SET)
14018 rtx dest = SET_DEST (body);
14019 rtx target;
14020 unsigned int regno, tregno, endregno, endtregno;
14022 if (GET_CODE (dest) == ZERO_EXTRACT)
14023 target = XEXP (dest, 0);
14024 else if (GET_CODE (dest) == STRICT_LOW_PART)
14025 target = SUBREG_REG (XEXP (dest, 0));
14026 else
14027 return 0;
14029 if (GET_CODE (target) == SUBREG)
14030 target = SUBREG_REG (target);
14032 if (!REG_P (target))
14033 return 0;
14035 tregno = REGNO (target), regno = REGNO (x);
14036 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14037 return target == x;
14039 endtregno = end_hard_regno (GET_MODE (target), tregno);
14040 endregno = end_hard_regno (GET_MODE (x), regno);
14042 return endregno > tregno && regno < endtregno;
14045 else if (GET_CODE (body) == PARALLEL)
14046 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14047 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14048 return 1;
14050 return 0;
14053 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14054 as appropriate. I3 and I2 are the insns resulting from the combination
14055 insns including FROM (I2 may be zero).
14057 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14058 not need REG_DEAD notes because they are being substituted for. This
14059 saves searching in the most common cases.
14061 Each note in the list is either ignored or placed on some insns, depending
14062 on the type of note. */
14064 static void
14065 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14066 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14068 rtx note, next_note;
14069 rtx tem_note;
14070 rtx_insn *tem_insn;
14072 for (note = notes; note; note = next_note)
14074 rtx_insn *place = 0, *place2 = 0;
14076 next_note = XEXP (note, 1);
14077 switch (REG_NOTE_KIND (note))
14079 case REG_BR_PROB:
14080 case REG_BR_PRED:
14081 /* Doesn't matter much where we put this, as long as it's somewhere.
14082 It is preferable to keep these notes on branches, which is most
14083 likely to be i3. */
14084 place = i3;
14085 break;
14087 case REG_NON_LOCAL_GOTO:
14088 if (JUMP_P (i3))
14089 place = i3;
14090 else
14092 gcc_assert (i2 && JUMP_P (i2));
14093 place = i2;
14095 break;
14097 case REG_EH_REGION:
14098 /* These notes must remain with the call or trapping instruction. */
14099 if (CALL_P (i3))
14100 place = i3;
14101 else if (i2 && CALL_P (i2))
14102 place = i2;
14103 else
14105 gcc_assert (cfun->can_throw_non_call_exceptions);
14106 if (may_trap_p (i3))
14107 place = i3;
14108 else if (i2 && may_trap_p (i2))
14109 place = i2;
14110 /* ??? Otherwise assume we've combined things such that we
14111 can now prove that the instructions can't trap. Drop the
14112 note in this case. */
14114 break;
14116 case REG_ARGS_SIZE:
14117 /* ??? How to distribute between i3-i1. Assume i3 contains the
14118 entire adjustment. Assert i3 contains at least some adjust. */
14119 if (!noop_move_p (i3))
14121 int old_size, args_size = INTVAL (XEXP (note, 0));
14122 /* fixup_args_size_notes looks at REG_NORETURN note,
14123 so ensure the note is placed there first. */
14124 if (CALL_P (i3))
14126 rtx *np;
14127 for (np = &next_note; *np; np = &XEXP (*np, 1))
14128 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14130 rtx n = *np;
14131 *np = XEXP (n, 1);
14132 XEXP (n, 1) = REG_NOTES (i3);
14133 REG_NOTES (i3) = n;
14134 break;
14137 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14138 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14139 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14140 gcc_assert (old_size != args_size
14141 || (CALL_P (i3)
14142 && !ACCUMULATE_OUTGOING_ARGS
14143 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14145 break;
14147 case REG_NORETURN:
14148 case REG_SETJMP:
14149 case REG_TM:
14150 case REG_CALL_DECL:
14151 /* These notes must remain with the call. It should not be
14152 possible for both I2 and I3 to be a call. */
14153 if (CALL_P (i3))
14154 place = i3;
14155 else
14157 gcc_assert (i2 && CALL_P (i2));
14158 place = i2;
14160 break;
14162 case REG_UNUSED:
14163 /* Any clobbers for i3 may still exist, and so we must process
14164 REG_UNUSED notes from that insn.
14166 Any clobbers from i2 or i1 can only exist if they were added by
14167 recog_for_combine. In that case, recog_for_combine created the
14168 necessary REG_UNUSED notes. Trying to keep any original
14169 REG_UNUSED notes from these insns can cause incorrect output
14170 if it is for the same register as the original i3 dest.
14171 In that case, we will notice that the register is set in i3,
14172 and then add a REG_UNUSED note for the destination of i3, which
14173 is wrong. However, it is possible to have REG_UNUSED notes from
14174 i2 or i1 for register which were both used and clobbered, so
14175 we keep notes from i2 or i1 if they will turn into REG_DEAD
14176 notes. */
14178 /* If this register is set or clobbered in I3, put the note there
14179 unless there is one already. */
14180 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14182 if (from_insn != i3)
14183 break;
14185 if (! (REG_P (XEXP (note, 0))
14186 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14187 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14188 place = i3;
14190 /* Otherwise, if this register is used by I3, then this register
14191 now dies here, so we must put a REG_DEAD note here unless there
14192 is one already. */
14193 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14194 && ! (REG_P (XEXP (note, 0))
14195 ? find_regno_note (i3, REG_DEAD,
14196 REGNO (XEXP (note, 0)))
14197 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14199 PUT_REG_NOTE_KIND (note, REG_DEAD);
14200 place = i3;
14202 break;
14204 case REG_EQUAL:
14205 case REG_EQUIV:
14206 case REG_NOALIAS:
14207 /* These notes say something about results of an insn. We can
14208 only support them if they used to be on I3 in which case they
14209 remain on I3. Otherwise they are ignored.
14211 If the note refers to an expression that is not a constant, we
14212 must also ignore the note since we cannot tell whether the
14213 equivalence is still true. It might be possible to do
14214 slightly better than this (we only have a problem if I2DEST
14215 or I1DEST is present in the expression), but it doesn't
14216 seem worth the trouble. */
14218 if (from_insn == i3
14219 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14220 place = i3;
14221 break;
14223 case REG_INC:
14224 /* These notes say something about how a register is used. They must
14225 be present on any use of the register in I2 or I3. */
14226 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14227 place = i3;
14229 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14231 if (place)
14232 place2 = i2;
14233 else
14234 place = i2;
14236 break;
14238 case REG_LABEL_TARGET:
14239 case REG_LABEL_OPERAND:
14240 /* This can show up in several ways -- either directly in the
14241 pattern, or hidden off in the constant pool with (or without?)
14242 a REG_EQUAL note. */
14243 /* ??? Ignore the without-reg_equal-note problem for now. */
14244 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14245 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14246 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14247 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14248 place = i3;
14250 if (i2
14251 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14252 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14253 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14254 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14256 if (place)
14257 place2 = i2;
14258 else
14259 place = i2;
14262 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14263 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14264 there. */
14265 if (place && JUMP_P (place)
14266 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14267 && (JUMP_LABEL (place) == NULL
14268 || JUMP_LABEL (place) == XEXP (note, 0)))
14270 rtx label = JUMP_LABEL (place);
14272 if (!label)
14273 JUMP_LABEL (place) = XEXP (note, 0);
14274 else if (LABEL_P (label))
14275 LABEL_NUSES (label)--;
14278 if (place2 && JUMP_P (place2)
14279 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14280 && (JUMP_LABEL (place2) == NULL
14281 || JUMP_LABEL (place2) == XEXP (note, 0)))
14283 rtx label = JUMP_LABEL (place2);
14285 if (!label)
14286 JUMP_LABEL (place2) = XEXP (note, 0);
14287 else if (LABEL_P (label))
14288 LABEL_NUSES (label)--;
14289 place2 = 0;
14291 break;
14293 case REG_NONNEG:
14294 /* This note says something about the value of a register prior
14295 to the execution of an insn. It is too much trouble to see
14296 if the note is still correct in all situations. It is better
14297 to simply delete it. */
14298 break;
14300 case REG_DEAD:
14301 /* If we replaced the right hand side of FROM_INSN with a
14302 REG_EQUAL note, the original use of the dying register
14303 will not have been combined into I3 and I2. In such cases,
14304 FROM_INSN is guaranteed to be the first of the combined
14305 instructions, so we simply need to search back before
14306 FROM_INSN for the previous use or set of this register,
14307 then alter the notes there appropriately.
14309 If the register is used as an input in I3, it dies there.
14310 Similarly for I2, if it is nonzero and adjacent to I3.
14312 If the register is not used as an input in either I3 or I2
14313 and it is not one of the registers we were supposed to eliminate,
14314 there are two possibilities. We might have a non-adjacent I2
14315 or we might have somehow eliminated an additional register
14316 from a computation. For example, we might have had A & B where
14317 we discover that B will always be zero. In this case we will
14318 eliminate the reference to A.
14320 In both cases, we must search to see if we can find a previous
14321 use of A and put the death note there. */
14323 if (from_insn
14324 && from_insn == i2mod
14325 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14326 tem_insn = from_insn;
14327 else
14329 if (from_insn
14330 && CALL_P (from_insn)
14331 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14332 place = from_insn;
14333 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14334 place = i3;
14335 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14336 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14337 place = i2;
14338 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14339 && !(i2mod
14340 && reg_overlap_mentioned_p (XEXP (note, 0),
14341 i2mod_old_rhs)))
14342 || rtx_equal_p (XEXP (note, 0), elim_i1)
14343 || rtx_equal_p (XEXP (note, 0), elim_i0))
14344 break;
14345 tem_insn = i3;
14346 /* If the new I2 sets the same register that is marked dead
14347 in the note, we do not know where to put the note.
14348 Give up. */
14349 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14350 break;
14353 if (place == 0)
14355 basic_block bb = this_basic_block;
14357 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14359 if (!NONDEBUG_INSN_P (tem_insn))
14361 if (tem_insn == BB_HEAD (bb))
14362 break;
14363 continue;
14366 /* If the register is being set at TEM_INSN, see if that is all
14367 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14368 into a REG_UNUSED note instead. Don't delete sets to
14369 global register vars. */
14370 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14371 || !global_regs[REGNO (XEXP (note, 0))])
14372 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14374 rtx set = single_set (tem_insn);
14375 rtx inner_dest = 0;
14376 rtx_insn *cc0_setter = NULL;
14378 if (set != 0)
14379 for (inner_dest = SET_DEST (set);
14380 (GET_CODE (inner_dest) == STRICT_LOW_PART
14381 || GET_CODE (inner_dest) == SUBREG
14382 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14383 inner_dest = XEXP (inner_dest, 0))
14386 /* Verify that it was the set, and not a clobber that
14387 modified the register.
14389 CC0 targets must be careful to maintain setter/user
14390 pairs. If we cannot delete the setter due to side
14391 effects, mark the user with an UNUSED note instead
14392 of deleting it. */
14394 if (set != 0 && ! side_effects_p (SET_SRC (set))
14395 && rtx_equal_p (XEXP (note, 0), inner_dest)
14396 && (!HAVE_cc0
14397 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14398 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14399 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14401 /* Move the notes and links of TEM_INSN elsewhere.
14402 This might delete other dead insns recursively.
14403 First set the pattern to something that won't use
14404 any register. */
14405 rtx old_notes = REG_NOTES (tem_insn);
14407 PATTERN (tem_insn) = pc_rtx;
14408 REG_NOTES (tem_insn) = NULL;
14410 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14411 NULL_RTX, NULL_RTX, NULL_RTX);
14412 distribute_links (LOG_LINKS (tem_insn));
14414 unsigned int regno = REGNO (XEXP (note, 0));
14415 reg_stat_type *rsp = &reg_stat[regno];
14416 if (rsp->last_set == tem_insn)
14417 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14419 SET_INSN_DELETED (tem_insn);
14420 if (tem_insn == i2)
14421 i2 = NULL;
14423 /* Delete the setter too. */
14424 if (cc0_setter)
14426 PATTERN (cc0_setter) = pc_rtx;
14427 old_notes = REG_NOTES (cc0_setter);
14428 REG_NOTES (cc0_setter) = NULL;
14430 distribute_notes (old_notes, cc0_setter,
14431 cc0_setter, NULL,
14432 NULL_RTX, NULL_RTX, NULL_RTX);
14433 distribute_links (LOG_LINKS (cc0_setter));
14435 SET_INSN_DELETED (cc0_setter);
14436 if (cc0_setter == i2)
14437 i2 = NULL;
14440 else
14442 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14444 /* If there isn't already a REG_UNUSED note, put one
14445 here. Do not place a REG_DEAD note, even if
14446 the register is also used here; that would not
14447 match the algorithm used in lifetime analysis
14448 and can cause the consistency check in the
14449 scheduler to fail. */
14450 if (! find_regno_note (tem_insn, REG_UNUSED,
14451 REGNO (XEXP (note, 0))))
14452 place = tem_insn;
14453 break;
14456 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14457 || (CALL_P (tem_insn)
14458 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14460 place = tem_insn;
14462 /* If we are doing a 3->2 combination, and we have a
14463 register which formerly died in i3 and was not used
14464 by i2, which now no longer dies in i3 and is used in
14465 i2 but does not die in i2, and place is between i2
14466 and i3, then we may need to move a link from place to
14467 i2. */
14468 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14469 && from_insn
14470 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14471 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14473 struct insn_link *links = LOG_LINKS (place);
14474 LOG_LINKS (place) = NULL;
14475 distribute_links (links);
14477 break;
14480 if (tem_insn == BB_HEAD (bb))
14481 break;
14486 /* If the register is set or already dead at PLACE, we needn't do
14487 anything with this note if it is still a REG_DEAD note.
14488 We check here if it is set at all, not if is it totally replaced,
14489 which is what `dead_or_set_p' checks, so also check for it being
14490 set partially. */
14492 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14494 unsigned int regno = REGNO (XEXP (note, 0));
14495 reg_stat_type *rsp = &reg_stat[regno];
14497 if (dead_or_set_p (place, XEXP (note, 0))
14498 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14500 /* Unless the register previously died in PLACE, clear
14501 last_death. [I no longer understand why this is
14502 being done.] */
14503 if (rsp->last_death != place)
14504 rsp->last_death = 0;
14505 place = 0;
14507 else
14508 rsp->last_death = place;
14510 /* If this is a death note for a hard reg that is occupying
14511 multiple registers, ensure that we are still using all
14512 parts of the object. If we find a piece of the object
14513 that is unused, we must arrange for an appropriate REG_DEAD
14514 note to be added for it. However, we can't just emit a USE
14515 and tag the note to it, since the register might actually
14516 be dead; so we recourse, and the recursive call then finds
14517 the previous insn that used this register. */
14519 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14521 unsigned int endregno = END_REGNO (XEXP (note, 0));
14522 bool all_used = true;
14523 unsigned int i;
14525 for (i = regno; i < endregno; i++)
14526 if ((! refers_to_regno_p (i, PATTERN (place))
14527 && ! find_regno_fusage (place, USE, i))
14528 || dead_or_set_regno_p (place, i))
14530 all_used = false;
14531 break;
14534 if (! all_used)
14536 /* Put only REG_DEAD notes for pieces that are
14537 not already dead or set. */
14539 for (i = regno; i < endregno;
14540 i += hard_regno_nregs (i, reg_raw_mode[i]))
14542 rtx piece = regno_reg_rtx[i];
14543 basic_block bb = this_basic_block;
14545 if (! dead_or_set_p (place, piece)
14546 && ! reg_bitfield_target_p (piece,
14547 PATTERN (place)))
14549 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14550 NULL_RTX);
14552 distribute_notes (new_note, place, place,
14553 NULL, NULL_RTX, NULL_RTX,
14554 NULL_RTX);
14556 else if (! refers_to_regno_p (i, PATTERN (place))
14557 && ! find_regno_fusage (place, USE, i))
14558 for (tem_insn = PREV_INSN (place); ;
14559 tem_insn = PREV_INSN (tem_insn))
14561 if (!NONDEBUG_INSN_P (tem_insn))
14563 if (tem_insn == BB_HEAD (bb))
14564 break;
14565 continue;
14567 if (dead_or_set_p (tem_insn, piece)
14568 || reg_bitfield_target_p (piece,
14569 PATTERN (tem_insn)))
14571 add_reg_note (tem_insn, REG_UNUSED, piece);
14572 break;
14577 place = 0;
14581 break;
14583 default:
14584 /* Any other notes should not be present at this point in the
14585 compilation. */
14586 gcc_unreachable ();
14589 if (place)
14591 XEXP (note, 1) = REG_NOTES (place);
14592 REG_NOTES (place) = note;
14595 if (place2)
14596 add_shallow_copy_of_reg_note (place2, note);
14600 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14601 I3, I2, and I1 to new locations. This is also called to add a link
14602 pointing at I3 when I3's destination is changed. */
14604 static void
14605 distribute_links (struct insn_link *links)
14607 struct insn_link *link, *next_link;
14609 for (link = links; link; link = next_link)
14611 rtx_insn *place = 0;
14612 rtx_insn *insn;
14613 rtx set, reg;
14615 next_link = link->next;
14617 /* If the insn that this link points to is a NOTE, ignore it. */
14618 if (NOTE_P (link->insn))
14619 continue;
14621 set = 0;
14622 rtx pat = PATTERN (link->insn);
14623 if (GET_CODE (pat) == SET)
14624 set = pat;
14625 else if (GET_CODE (pat) == PARALLEL)
14627 int i;
14628 for (i = 0; i < XVECLEN (pat, 0); i++)
14630 set = XVECEXP (pat, 0, i);
14631 if (GET_CODE (set) != SET)
14632 continue;
14634 reg = SET_DEST (set);
14635 while (GET_CODE (reg) == ZERO_EXTRACT
14636 || GET_CODE (reg) == STRICT_LOW_PART
14637 || GET_CODE (reg) == SUBREG)
14638 reg = XEXP (reg, 0);
14640 if (!REG_P (reg))
14641 continue;
14643 if (REGNO (reg) == link->regno)
14644 break;
14646 if (i == XVECLEN (pat, 0))
14647 continue;
14649 else
14650 continue;
14652 reg = SET_DEST (set);
14654 while (GET_CODE (reg) == ZERO_EXTRACT
14655 || GET_CODE (reg) == STRICT_LOW_PART
14656 || GET_CODE (reg) == SUBREG)
14657 reg = XEXP (reg, 0);
14659 /* A LOG_LINK is defined as being placed on the first insn that uses
14660 a register and points to the insn that sets the register. Start
14661 searching at the next insn after the target of the link and stop
14662 when we reach a set of the register or the end of the basic block.
14664 Note that this correctly handles the link that used to point from
14665 I3 to I2. Also note that not much searching is typically done here
14666 since most links don't point very far away. */
14668 for (insn = NEXT_INSN (link->insn);
14669 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14670 || BB_HEAD (this_basic_block->next_bb) != insn));
14671 insn = NEXT_INSN (insn))
14672 if (DEBUG_INSN_P (insn))
14673 continue;
14674 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14676 if (reg_referenced_p (reg, PATTERN (insn)))
14677 place = insn;
14678 break;
14680 else if (CALL_P (insn)
14681 && find_reg_fusage (insn, USE, reg))
14683 place = insn;
14684 break;
14686 else if (INSN_P (insn) && reg_set_p (reg, insn))
14687 break;
14689 /* If we found a place to put the link, place it there unless there
14690 is already a link to the same insn as LINK at that point. */
14692 if (place)
14694 struct insn_link *link2;
14696 FOR_EACH_LOG_LINK (link2, place)
14697 if (link2->insn == link->insn && link2->regno == link->regno)
14698 break;
14700 if (link2 == NULL)
14702 link->next = LOG_LINKS (place);
14703 LOG_LINKS (place) = link;
14705 /* Set added_links_insn to the earliest insn we added a
14706 link to. */
14707 if (added_links_insn == 0
14708 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14709 added_links_insn = place;
14715 /* Check for any register or memory mentioned in EQUIV that is not
14716 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14717 of EXPR where some registers may have been replaced by constants. */
14719 static bool
14720 unmentioned_reg_p (rtx equiv, rtx expr)
14722 subrtx_iterator::array_type array;
14723 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14725 const_rtx x = *iter;
14726 if ((REG_P (x) || MEM_P (x))
14727 && !reg_mentioned_p (x, expr))
14728 return true;
14730 return false;
14733 DEBUG_FUNCTION void
14734 dump_combine_stats (FILE *file)
14736 fprintf
14737 (file,
14738 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14739 combine_attempts, combine_merges, combine_extras, combine_successes);
14742 void
14743 dump_combine_total_stats (FILE *file)
14745 fprintf
14746 (file,
14747 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14748 total_attempts, total_merges, total_extras, total_successes);
14751 /* Try combining insns through substitution. */
14752 static unsigned int
14753 rest_of_handle_combine (void)
14755 int rebuild_jump_labels_after_combine;
14757 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14758 df_note_add_problem ();
14759 df_analyze ();
14761 regstat_init_n_sets_and_refs ();
14762 reg_n_sets_max = max_reg_num ();
14764 rebuild_jump_labels_after_combine
14765 = combine_instructions (get_insns (), max_reg_num ());
14767 /* Combining insns may have turned an indirect jump into a
14768 direct jump. Rebuild the JUMP_LABEL fields of jumping
14769 instructions. */
14770 if (rebuild_jump_labels_after_combine)
14772 if (dom_info_available_p (CDI_DOMINATORS))
14773 free_dominance_info (CDI_DOMINATORS);
14774 timevar_push (TV_JUMP);
14775 rebuild_jump_labels (get_insns ());
14776 cleanup_cfg (0);
14777 timevar_pop (TV_JUMP);
14780 regstat_free_n_sets_and_refs ();
14781 return 0;
14784 namespace {
14786 const pass_data pass_data_combine =
14788 RTL_PASS, /* type */
14789 "combine", /* name */
14790 OPTGROUP_NONE, /* optinfo_flags */
14791 TV_COMBINE, /* tv_id */
14792 PROP_cfglayout, /* properties_required */
14793 0, /* properties_provided */
14794 0, /* properties_destroyed */
14795 0, /* todo_flags_start */
14796 TODO_df_finish, /* todo_flags_finish */
14799 class pass_combine : public rtl_opt_pass
14801 public:
14802 pass_combine (gcc::context *ctxt)
14803 : rtl_opt_pass (pass_data_combine, ctxt)
14806 /* opt_pass methods: */
14807 virtual bool gate (function *) { return (optimize > 0); }
14808 virtual unsigned int execute (function *)
14810 return rest_of_handle_combine ();
14813 }; // class pass_combine
14815 } // anon namespace
14817 rtl_opt_pass *
14818 make_pass_combine (gcc::context *ctxt)
14820 return new pass_combine (ctxt);