1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* And similarly, for notes. */
307 static rtx_insn
*added_notes_insn
;
309 /* Basic block in which we are performing combines. */
310 static basic_block this_basic_block
;
311 static bool optimize_this_for_speed_p
;
314 /* Length of the currently allocated uid_insn_cost array. */
316 static int max_uid_known
;
318 /* The following array records the insn_cost for every insn
319 in the instruction stream. */
321 static int *uid_insn_cost
;
323 /* The following array records the LOG_LINKS for every insn in the
324 instruction stream as struct insn_link pointers. */
329 struct insn_link
*next
;
332 static struct insn_link
**uid_log_links
;
335 insn_uid_check (const_rtx insn
)
337 int uid
= INSN_UID (insn
);
338 gcc_checking_assert (uid
<= max_uid_known
);
342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348 /* Links for LOG_LINKS are allocated from this obstack. */
350 static struct obstack insn_link_obstack
;
352 /* Allocate a link. */
354 static inline struct insn_link
*
355 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
358 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
359 sizeof (struct insn_link
));
366 /* Incremented for each basic block. */
368 static int label_tick
;
370 /* Reset to label_tick for each extended basic block in scanning order. */
372 static int label_tick_ebb_start
;
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377 static scalar_int_mode nonzero_bits_mode
;
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
385 static int nonzero_sign_valid
;
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
391 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
397 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
398 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
411 rtx_insn
*other_insn
;
414 static struct undobuf undobuf
;
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
419 static int n_occurrences
;
421 static rtx
reg_nonzero_bits_for_combine (const_rtx
, scalar_int_mode
,
423 unsigned HOST_WIDE_INT
*);
424 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, scalar_int_mode
,
427 static void do_SUBST (rtx
*, rtx
);
428 static void do_SUBST_INT (int *, int);
429 static void init_reg_last (void);
430 static void setup_incoming_promotions (rtx_insn
*);
431 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
432 static int cant_combine_insn_p (rtx_insn
*);
433 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
434 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
435 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
436 static int contains_muldiv (rtx
);
437 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
439 static void undo_all (void);
440 static void undo_commit (void);
441 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
442 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
443 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
444 static rtx
simplify_if_then_else (rtx
);
445 static rtx
simplify_set (rtx
);
446 static rtx
simplify_logical (rtx
);
447 static rtx
expand_compound_operation (rtx
);
448 static const_rtx
expand_field_assignment (const_rtx
);
449 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
450 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
452 unsigned HOST_WIDE_INT
*);
453 static rtx
canon_reg_for_combine (rtx
, rtx
);
454 static rtx
force_int_to_mode (rtx
, scalar_int_mode
, scalar_int_mode
,
455 scalar_int_mode
, unsigned HOST_WIDE_INT
, int);
456 static rtx
force_to_mode (rtx
, machine_mode
,
457 unsigned HOST_WIDE_INT
, int);
458 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
459 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
460 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
461 static rtx
make_field_assignment (rtx
);
462 static rtx
apply_distributive_law (rtx
);
463 static rtx
distribute_and_simplify_rtx (rtx
, int);
464 static rtx
simplify_and_const_int_1 (scalar_int_mode
, rtx
,
465 unsigned HOST_WIDE_INT
);
466 static rtx
simplify_and_const_int (rtx
, scalar_int_mode
, rtx
,
467 unsigned HOST_WIDE_INT
);
468 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
469 HOST_WIDE_INT
, machine_mode
, int *);
470 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
471 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
473 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
474 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
475 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
477 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
478 static void update_table_tick (rtx
);
479 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
480 static void check_promoted_subreg (rtx_insn
*, rtx
);
481 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
482 static void record_dead_and_set_regs (rtx_insn
*);
483 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
484 static rtx
get_last_value (const_rtx
);
485 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
486 static int reg_dead_at_p (rtx
, rtx_insn
*);
487 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
488 static int reg_bitfield_target_p (rtx
, rtx
);
489 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
490 static void distribute_links (struct insn_link
*);
491 static void mark_used_regs_combine (rtx
);
492 static void record_promoted_value (rtx_insn
*, rtx
);
493 static bool unmentioned_reg_p (rtx
, rtx
);
494 static void record_truncated_values (rtx
*, void *);
495 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
496 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
499 /* It is not safe to use ordinary gen_lowpart in combine.
500 See comments in gen_lowpart_for_combine. */
501 #undef RTL_HOOKS_GEN_LOWPART
502 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
504 /* Our implementation of gen_lowpart never emits a new pseudo. */
505 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
506 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
508 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
509 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
511 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
512 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
514 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
515 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
517 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
520 /* Convenience wrapper for the canonicalize_comparison target hook.
521 Target hooks cannot use enum rtx_code. */
523 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
524 bool op0_preserve_value
)
526 int code_int
= (int)*code
;
527 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
528 *code
= (enum rtx_code
)code_int
;
531 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
532 PATTERN can not be split. Otherwise, it returns an insn sequence.
533 This is a wrapper around split_insns which ensures that the
534 reg_stat vector is made larger if the splitter creates a new
538 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
543 ret
= split_insns (pattern
, insn
);
544 nregs
= max_reg_num ();
545 if (nregs
> reg_stat
.length ())
546 reg_stat
.safe_grow_cleared (nregs
);
550 /* This is used by find_single_use to locate an rtx in LOC that
551 contains exactly one use of DEST, which is typically either a REG
552 or CC0. It returns a pointer to the innermost rtx expression
553 containing DEST. Appearances of DEST that are being used to
554 totally replace it are not counted. */
557 find_single_use_1 (rtx dest
, rtx
*loc
)
560 enum rtx_code code
= GET_CODE (x
);
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x
)) != CC0
582 && GET_CODE (SET_DEST (x
)) != PC
583 && !REG_P (SET_DEST (x
))
584 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x
)))
586 && !read_modify_subreg_p (SET_DEST (x
))))
589 return find_single_use_1 (dest
, &SET_SRC (x
));
593 return find_single_use_1 (dest
, &XEXP (x
, 0));
599 /* If it wasn't one of the common cases above, check each expression and
600 vector of this code. Look for a unique usage of DEST. */
602 fmt
= GET_RTX_FORMAT (code
);
603 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
607 if (dest
== XEXP (x
, i
)
608 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
609 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
612 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
615 result
= this_result
;
616 else if (this_result
)
617 /* Duplicate usage. */
620 else if (fmt
[i
] == 'E')
624 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
626 if (XVECEXP (x
, i
, j
) == dest
628 && REG_P (XVECEXP (x
, i
, j
))
629 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
632 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
635 result
= this_result
;
636 else if (this_result
)
646 /* See if DEST, produced in INSN, is used only a single time in the
647 sequel. If so, return a pointer to the innermost rtx expression in which
650 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
653 care about REG_DEAD notes or LOG_LINKS.
655 Otherwise, we find the single use by finding an insn that has a
656 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
657 only referenced once in that insn, we know that it must be the first
658 and last insn referencing DEST. */
661 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
666 struct insn_link
*link
;
670 next
= NEXT_INSN (insn
);
672 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
675 result
= find_single_use_1 (dest
, &PATTERN (next
));
684 bb
= BLOCK_FOR_INSN (insn
);
685 for (next
= NEXT_INSN (insn
);
686 next
&& BLOCK_FOR_INSN (next
) == bb
;
687 next
= NEXT_INSN (next
))
688 if (NONDEBUG_INSN_P (next
) && dead_or_set_p (next
, dest
))
690 FOR_EACH_LOG_LINK (link
, next
)
691 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
696 result
= find_single_use_1 (dest
, &PATTERN (next
));
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
713 do_SUBST (rtx
*into
, rtx newval
)
718 if (oldval
== newval
)
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
727 && CONST_INT_P (newval
))
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval
)
732 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval
) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval
))));
742 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval
, 0))));
747 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
749 buf
= XNEW (struct undo
);
751 buf
->kind
= UNDO_RTX
;
753 buf
->old_contents
.r
= oldval
;
756 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
766 do_SUBST_INT (int *into
, int newval
)
771 if (oldval
== newval
)
775 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
777 buf
= XNEW (struct undo
);
779 buf
->kind
= UNDO_INT
;
781 buf
->old_contents
.i
= oldval
;
784 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
795 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
798 machine_mode oldval
= GET_MODE (*into
);
800 if (oldval
== newval
)
804 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
806 buf
= XNEW (struct undo
);
808 buf
->kind
= UNDO_MODE
;
810 buf
->old_contents
.m
= oldval
;
811 adjust_reg_mode (*into
, newval
);
813 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
818 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
821 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
824 struct insn_link
* oldval
= *into
;
826 if (oldval
== newval
)
830 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
832 buf
= XNEW (struct undo
);
834 buf
->kind
= UNDO_LINKS
;
836 buf
->old_contents
.l
= oldval
;
839 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
842 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 /* Subroutine of try_combine. Determine whether the replacement patterns
845 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
846 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
847 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
848 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
849 of all the instructions can be estimated and the replacements are more
850 expensive than the original sequence. */
853 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
854 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
856 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
857 int new_i2_cost
, new_i3_cost
;
858 int old_cost
, new_cost
;
860 /* Lookup the original insn_costs. */
861 i2_cost
= INSN_COST (i2
);
862 i3_cost
= INSN_COST (i3
);
866 i1_cost
= INSN_COST (i1
);
869 i0_cost
= INSN_COST (i0
);
870 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
871 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
875 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
876 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
882 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
883 i1_cost
= i0_cost
= 0;
886 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
888 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
892 /* Calculate the replacement insn_costs. */
893 rtx tmp
= PATTERN (i3
);
894 PATTERN (i3
) = newpat
;
895 int tmpi
= INSN_CODE (i3
);
897 new_i3_cost
= insn_cost (i3
, optimize_this_for_speed_p
);
899 INSN_CODE (i3
) = tmpi
;
903 PATTERN (i2
) = newi2pat
;
904 tmpi
= INSN_CODE (i2
);
906 new_i2_cost
= insn_cost (i2
, optimize_this_for_speed_p
);
908 INSN_CODE (i2
) = tmpi
;
909 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
910 ? new_i2_cost
+ new_i3_cost
: 0;
914 new_cost
= new_i3_cost
;
918 if (undobuf
.other_insn
)
920 int old_other_cost
, new_other_cost
;
922 old_other_cost
= INSN_COST (undobuf
.other_insn
);
923 tmp
= PATTERN (undobuf
.other_insn
);
924 PATTERN (undobuf
.other_insn
) = newotherpat
;
925 tmpi
= INSN_CODE (undobuf
.other_insn
);
926 INSN_CODE (undobuf
.other_insn
) = -1;
927 new_other_cost
= insn_cost (undobuf
.other_insn
,
928 optimize_this_for_speed_p
);
929 PATTERN (undobuf
.other_insn
) = tmp
;
930 INSN_CODE (undobuf
.other_insn
) = tmpi
;
931 if (old_other_cost
> 0 && new_other_cost
> 0)
933 old_cost
+= old_other_cost
;
934 new_cost
+= new_other_cost
;
940 /* Disallow this combination if both new_cost and old_cost are greater than
941 zero, and new_cost is greater than old cost. */
942 int reject
= old_cost
> 0 && new_cost
> old_cost
;
946 fprintf (dump_file
, "%s combination of insns ",
947 reject
? "rejecting" : "allowing");
949 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
950 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
951 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
952 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
954 fprintf (dump_file
, "original costs ");
956 fprintf (dump_file
, "%d + ", i0_cost
);
957 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
958 fprintf (dump_file
, "%d + ", i1_cost
);
959 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
962 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
963 new_i2_cost
, new_i3_cost
, new_cost
);
965 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
971 /* Update the uid_insn_cost array with the replacement costs. */
972 INSN_COST (i2
) = new_i2_cost
;
973 INSN_COST (i3
) = new_i3_cost
;
985 /* Delete any insns that copy a register to itself. */
988 delete_noop_moves (void)
990 rtx_insn
*insn
, *next
;
993 FOR_EACH_BB_FN (bb
, cfun
)
995 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
997 next
= NEXT_INSN (insn
);
998 if (INSN_P (insn
) && noop_move_p (insn
))
1001 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1003 delete_insn_and_edges (insn
);
1010 /* Return false if we do not want to (or cannot) combine DEF. */
1012 can_combine_def_p (df_ref def
)
1014 /* Do not consider if it is pre/post modification in MEM. */
1015 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1018 unsigned int regno
= DF_REF_REGNO (def
);
1020 /* Do not combine frame pointer adjustments. */
1021 if ((regno
== FRAME_POINTER_REGNUM
1022 && (!reload_completed
|| frame_pointer_needed
))
1023 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1024 && regno
== HARD_FRAME_POINTER_REGNUM
1025 && (!reload_completed
|| frame_pointer_needed
))
1026 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1027 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1033 /* Return false if we do not want to (or cannot) combine USE. */
1035 can_combine_use_p (df_ref use
)
1037 /* Do not consider the usage of the stack pointer by function call. */
1038 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1044 /* Fill in log links field for all insns. */
1047 create_log_links (void)
1050 rtx_insn
**next_use
;
1054 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1056 /* Pass through each block from the end, recording the uses of each
1057 register and establishing log links when def is encountered.
1058 Note that we do not clear next_use array in order to save time,
1059 so we have to test whether the use is in the same basic block as def.
1061 There are a few cases below when we do not consider the definition or
1062 usage -- these are taken from original flow.c did. Don't ask me why it is
1063 done this way; I don't know and if it works, I don't want to know. */
1065 FOR_EACH_BB_FN (bb
, cfun
)
1067 FOR_BB_INSNS_REVERSE (bb
, insn
)
1069 if (!NONDEBUG_INSN_P (insn
))
1072 /* Log links are created only once. */
1073 gcc_assert (!LOG_LINKS (insn
));
1075 FOR_EACH_INSN_DEF (def
, insn
)
1077 unsigned int regno
= DF_REF_REGNO (def
);
1080 if (!next_use
[regno
])
1083 if (!can_combine_def_p (def
))
1086 use_insn
= next_use
[regno
];
1087 next_use
[regno
] = NULL
;
1089 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1094 We don't build a LOG_LINK for hard registers contained
1095 in ASM_OPERANDs. If these registers get replaced,
1096 we might wind up changing the semantics of the insn,
1097 even if reload can make what appear to be valid
1098 assignments later. */
1099 if (regno
< FIRST_PSEUDO_REGISTER
1100 && asm_noperands (PATTERN (use_insn
)) >= 0)
1103 /* Don't add duplicate links between instructions. */
1104 struct insn_link
*links
;
1105 FOR_EACH_LOG_LINK (links
, use_insn
)
1106 if (insn
== links
->insn
&& regno
== links
->regno
)
1110 LOG_LINKS (use_insn
)
1111 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1114 FOR_EACH_INSN_USE (use
, insn
)
1115 if (can_combine_use_p (use
))
1116 next_use
[DF_REF_REGNO (use
)] = insn
;
1123 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1124 true if we found a LOG_LINK that proves that A feeds B. This only works
1125 if there are no instructions between A and B which could have a link
1126 depending on A, since in that case we would not record a link for B.
1127 We also check the implicit dependency created by a cc0 setter/user
1131 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1133 struct insn_link
*links
;
1134 FOR_EACH_LOG_LINK (links
, b
)
1135 if (links
->insn
== a
)
1137 if (HAVE_cc0
&& sets_cc0_p (a
))
1142 /* Main entry point for combiner. F is the first insn of the function.
1143 NREGS is the first unused pseudo-reg number.
1145 Return nonzero if the combiner has turned an indirect jump
1146 instruction into a direct jump. */
1148 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1150 rtx_insn
*insn
, *next
;
1152 struct insn_link
*links
, *nextlinks
;
1154 basic_block last_bb
;
1156 int new_direct_jump_p
= 0;
1158 for (first
= f
; first
&& !NONDEBUG_INSN_P (first
); )
1159 first
= NEXT_INSN (first
);
1163 combine_attempts
= 0;
1166 combine_successes
= 0;
1168 rtl_hooks
= combine_rtl_hooks
;
1170 reg_stat
.safe_grow_cleared (nregs
);
1172 init_recog_no_volatile ();
1174 /* Allocate array for insn info. */
1175 max_uid_known
= get_max_uid ();
1176 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1177 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1178 gcc_obstack_init (&insn_link_obstack
);
1180 nonzero_bits_mode
= int_mode_for_size (HOST_BITS_PER_WIDE_INT
, 0).require ();
1182 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1183 problems when, for example, we have j <<= 1 in a loop. */
1185 nonzero_sign_valid
= 0;
1186 label_tick
= label_tick_ebb_start
= 1;
1188 /* Scan all SETs and see if we can deduce anything about what
1189 bits are known to be zero for some registers and how many copies
1190 of the sign bit are known to exist for those registers.
1192 Also set any known values so that we can use it while searching
1193 for what bits are known to be set. */
1195 setup_incoming_promotions (first
);
1196 /* Allow the entry block and the first block to fall into the same EBB.
1197 Conceptually the incoming promotions are assigned to the entry block. */
1198 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1200 create_log_links ();
1201 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1203 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1208 if (!single_pred_p (this_basic_block
)
1209 || single_pred (this_basic_block
) != last_bb
)
1210 label_tick_ebb_start
= label_tick
;
1211 last_bb
= this_basic_block
;
1213 FOR_BB_INSNS (this_basic_block
, insn
)
1214 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1218 subst_low_luid
= DF_INSN_LUID (insn
);
1221 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1223 record_dead_and_set_regs (insn
);
1226 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1227 if (REG_NOTE_KIND (links
) == REG_INC
)
1228 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1231 /* Record the current insn_cost of this instruction. */
1232 if (NONJUMP_INSN_P (insn
))
1233 INSN_COST (insn
) = insn_cost (insn
, optimize_this_for_speed_p
);
1236 fprintf (dump_file
, "insn_cost %d for ", INSN_COST (insn
));
1237 dump_insn_slim (dump_file
, insn
);
1242 nonzero_sign_valid
= 1;
1244 /* Now scan all the insns in forward order. */
1245 label_tick
= label_tick_ebb_start
= 1;
1247 setup_incoming_promotions (first
);
1248 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1249 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1251 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1253 rtx_insn
*last_combined_insn
= NULL
;
1255 /* Ignore instruction combination in basic blocks that are going to
1256 be removed as unreachable anyway. See PR82386. */
1257 if (EDGE_COUNT (this_basic_block
->preds
) == 0)
1260 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1265 if (!single_pred_p (this_basic_block
)
1266 || single_pred (this_basic_block
) != last_bb
)
1267 label_tick_ebb_start
= label_tick
;
1268 last_bb
= this_basic_block
;
1270 rtl_profile_for_bb (this_basic_block
);
1271 for (insn
= BB_HEAD (this_basic_block
);
1272 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1273 insn
= next
? next
: NEXT_INSN (insn
))
1276 if (!NONDEBUG_INSN_P (insn
))
1279 while (last_combined_insn
1280 && (!NONDEBUG_INSN_P (last_combined_insn
)
1281 || last_combined_insn
->deleted ()))
1282 last_combined_insn
= PREV_INSN (last_combined_insn
);
1283 if (last_combined_insn
== NULL_RTX
1284 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1285 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1286 last_combined_insn
= insn
;
1288 /* See if we know about function return values before this
1289 insn based upon SUBREG flags. */
1290 check_promoted_subreg (insn
, PATTERN (insn
));
1292 /* See if we can find hardregs and subreg of pseudos in
1293 narrower modes. This could help turning TRUNCATEs
1295 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1297 /* Try this insn with each insn it links back to. */
1299 FOR_EACH_LOG_LINK (links
, insn
)
1300 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1301 NULL
, &new_direct_jump_p
,
1302 last_combined_insn
)) != 0)
1304 statistics_counter_event (cfun
, "two-insn combine", 1);
1308 /* Try each sequence of three linked insns ending with this one. */
1310 if (max_combine
>= 3)
1311 FOR_EACH_LOG_LINK (links
, insn
)
1313 rtx_insn
*link
= links
->insn
;
1315 /* If the linked insn has been replaced by a note, then there
1316 is no point in pursuing this chain any further. */
1320 FOR_EACH_LOG_LINK (nextlinks
, link
)
1321 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1322 NULL
, &new_direct_jump_p
,
1323 last_combined_insn
)) != 0)
1325 statistics_counter_event (cfun
, "three-insn combine", 1);
1330 /* Try to combine a jump insn that uses CC0
1331 with a preceding insn that sets CC0, and maybe with its
1332 logical predecessor as well.
1333 This is how we make decrement-and-branch insns.
1334 We need this special code because data flow connections
1335 via CC0 do not get entered in LOG_LINKS. */
1339 && (prev
= prev_nonnote_insn (insn
)) != 0
1340 && NONJUMP_INSN_P (prev
)
1341 && sets_cc0_p (PATTERN (prev
)))
1343 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1345 last_combined_insn
)) != 0)
1348 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1349 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1350 NULL
, &new_direct_jump_p
,
1351 last_combined_insn
)) != 0)
1355 /* Do the same for an insn that explicitly references CC0. */
1356 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1357 && (prev
= prev_nonnote_insn (insn
)) != 0
1358 && NONJUMP_INSN_P (prev
)
1359 && sets_cc0_p (PATTERN (prev
))
1360 && GET_CODE (PATTERN (insn
)) == SET
1361 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1363 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1365 last_combined_insn
)) != 0)
1368 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1369 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1370 NULL
, &new_direct_jump_p
,
1371 last_combined_insn
)) != 0)
1375 /* Finally, see if any of the insns that this insn links to
1376 explicitly references CC0. If so, try this insn, that insn,
1377 and its predecessor if it sets CC0. */
1380 FOR_EACH_LOG_LINK (links
, insn
)
1381 if (NONJUMP_INSN_P (links
->insn
)
1382 && GET_CODE (PATTERN (links
->insn
)) == SET
1383 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1384 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1385 && NONJUMP_INSN_P (prev
)
1386 && sets_cc0_p (PATTERN (prev
))
1387 && (next
= try_combine (insn
, links
->insn
,
1388 prev
, NULL
, &new_direct_jump_p
,
1389 last_combined_insn
)) != 0)
1393 /* Try combining an insn with two different insns whose results it
1395 if (max_combine
>= 3)
1396 FOR_EACH_LOG_LINK (links
, insn
)
1397 for (nextlinks
= links
->next
; nextlinks
;
1398 nextlinks
= nextlinks
->next
)
1399 if ((next
= try_combine (insn
, links
->insn
,
1400 nextlinks
->insn
, NULL
,
1402 last_combined_insn
)) != 0)
1405 statistics_counter_event (cfun
, "three-insn combine", 1);
1409 /* Try four-instruction combinations. */
1410 if (max_combine
>= 4)
1411 FOR_EACH_LOG_LINK (links
, insn
)
1413 struct insn_link
*next1
;
1414 rtx_insn
*link
= links
->insn
;
1416 /* If the linked insn has been replaced by a note, then there
1417 is no point in pursuing this chain any further. */
1421 FOR_EACH_LOG_LINK (next1
, link
)
1423 rtx_insn
*link1
= next1
->insn
;
1426 /* I0 -> I1 -> I2 -> I3. */
1427 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1428 if ((next
= try_combine (insn
, link
, link1
,
1431 last_combined_insn
)) != 0)
1433 statistics_counter_event (cfun
, "four-insn combine", 1);
1436 /* I0, I1 -> I2, I2 -> I3. */
1437 for (nextlinks
= next1
->next
; nextlinks
;
1438 nextlinks
= nextlinks
->next
)
1439 if ((next
= try_combine (insn
, link
, link1
,
1442 last_combined_insn
)) != 0)
1444 statistics_counter_event (cfun
, "four-insn combine", 1);
1449 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1451 rtx_insn
*link1
= next1
->insn
;
1454 /* I0 -> I2; I1, I2 -> I3. */
1455 FOR_EACH_LOG_LINK (nextlinks
, link
)
1456 if ((next
= try_combine (insn
, link
, link1
,
1459 last_combined_insn
)) != 0)
1461 statistics_counter_event (cfun
, "four-insn combine", 1);
1464 /* I0 -> I1; I1, I2 -> I3. */
1465 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1466 if ((next
= try_combine (insn
, link
, link1
,
1469 last_combined_insn
)) != 0)
1471 statistics_counter_event (cfun
, "four-insn combine", 1);
1477 /* Try this insn with each REG_EQUAL note it links back to. */
1478 FOR_EACH_LOG_LINK (links
, insn
)
1481 rtx_insn
*temp
= links
->insn
;
1482 if ((set
= single_set (temp
)) != 0
1483 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1484 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1485 /* Avoid using a register that may already been marked
1486 dead by an earlier instruction. */
1487 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1488 && (GET_MODE (note
) == VOIDmode
1489 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1490 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1491 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1492 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1493 == GET_MODE (note
))))))
1495 /* Temporarily replace the set's source with the
1496 contents of the REG_EQUAL note. The insn will
1497 be deleted or recognized by try_combine. */
1498 rtx orig_src
= SET_SRC (set
);
1499 rtx orig_dest
= SET_DEST (set
);
1500 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1501 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1502 SET_SRC (set
) = note
;
1504 i2mod_old_rhs
= copy_rtx (orig_src
);
1505 i2mod_new_rhs
= copy_rtx (note
);
1506 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1508 last_combined_insn
);
1512 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1515 SET_SRC (set
) = orig_src
;
1516 SET_DEST (set
) = orig_dest
;
1521 record_dead_and_set_regs (insn
);
1528 default_rtl_profile ();
1530 new_direct_jump_p
|= purge_all_dead_edges ();
1531 delete_noop_moves ();
1534 obstack_free (&insn_link_obstack
, NULL
);
1535 free (uid_log_links
);
1536 free (uid_insn_cost
);
1537 reg_stat
.release ();
1540 struct undo
*undo
, *next
;
1541 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1549 total_attempts
+= combine_attempts
;
1550 total_merges
+= combine_merges
;
1551 total_extras
+= combine_extras
;
1552 total_successes
+= combine_successes
;
1554 nonzero_sign_valid
= 0;
1555 rtl_hooks
= general_rtl_hooks
;
1557 /* Make recognizer allow volatile MEMs again. */
1560 return new_direct_jump_p
;
1563 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1566 init_reg_last (void)
1571 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1572 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1575 /* Set up any promoted values for incoming argument registers. */
1578 setup_incoming_promotions (rtx_insn
*first
)
1581 bool strictly_local
= false;
1583 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1584 arg
= DECL_CHAIN (arg
))
1586 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1588 machine_mode mode1
, mode2
, mode3
, mode4
;
1590 /* Only continue if the incoming argument is in a register. */
1594 /* Determine, if possible, whether all call sites of the current
1595 function lie within the current compilation unit. (This does
1596 take into account the exporting of a function via taking its
1597 address, and so forth.) */
1598 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1600 /* The mode and signedness of the argument before any promotions happen
1601 (equal to the mode of the pseudo holding it at that stage). */
1602 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1603 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1605 /* The mode and signedness of the argument after any source language and
1606 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1607 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1608 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1610 /* The mode and signedness of the argument as it is actually passed,
1611 see assign_parm_setup_reg in function.c. */
1612 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1613 TREE_TYPE (cfun
->decl
), 0);
1615 /* The mode of the register in which the argument is being passed. */
1616 mode4
= GET_MODE (reg
);
1618 /* Eliminate sign extensions in the callee when:
1619 (a) A mode promotion has occurred; */
1622 /* (b) The mode of the register is the same as the mode of
1623 the argument as it is passed; */
1626 /* (c) There's no language level extension; */
1629 /* (c.1) All callers are from the current compilation unit. If that's
1630 the case we don't have to rely on an ABI, we only have to know
1631 what we're generating right now, and we know that we will do the
1632 mode1 to mode2 promotion with the given sign. */
1633 else if (!strictly_local
)
1635 /* (c.2) The combination of the two promotions is useful. This is
1636 true when the signs match, or if the first promotion is unsigned.
1637 In the later case, (sign_extend (zero_extend x)) is the same as
1638 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1644 /* Record that the value was promoted from mode1 to mode3,
1645 so that any sign extension at the head of the current
1646 function may be eliminated. */
1647 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1648 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1649 record_value_for_reg (reg
, first
, x
);
1653 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1654 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1655 because some machines (maybe most) will actually do the sign-extension and
1656 this is the conservative approach.
1658 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1662 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1664 scalar_int_mode int_mode
;
1665 if (CONST_INT_P (src
)
1666 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
1667 && GET_MODE_PRECISION (int_mode
) < prec
1669 && val_signbit_known_set_p (int_mode
, INTVAL (src
)))
1670 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (int_mode
));
1675 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1679 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1682 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1683 unsigned HOST_WIDE_INT bits
= 0;
1684 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1685 unsigned int num
= 0;
1688 reg_equal
= XEXP (reg_equal_note
, 0);
1690 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1692 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1694 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1697 /* Don't call nonzero_bits if it cannot change anything. */
1698 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1700 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1701 if (reg_equal
&& bits
)
1702 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1703 rsp
->nonzero_bits
|= bits
;
1706 /* Don't call num_sign_bit_copies if it cannot change anything. */
1707 if (rsp
->sign_bit_copies
!= 1)
1709 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1710 if (reg_equal
&& maybe_ne (num
, GET_MODE_PRECISION (GET_MODE (x
))))
1712 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1713 if (num
== 0 || numeq
> num
)
1716 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1717 rsp
->sign_bit_copies
= num
;
1721 /* Called via note_stores. If X is a pseudo that is narrower than
1722 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1724 If we are setting only a portion of X and we can't figure out what
1725 portion, assume all bits will be used since we don't know what will
1728 Similarly, set how many bits of X are known to be copies of the sign bit
1729 at all locations in the function. This is the smallest number implied
1733 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1735 rtx_insn
*insn
= (rtx_insn
*) data
;
1736 scalar_int_mode mode
;
1739 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1740 /* If this register is undefined at the start of the file, we can't
1741 say what its contents were. */
1742 && ! REGNO_REG_SET_P
1743 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1744 && is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
1745 && HWI_COMPUTABLE_MODE_P (mode
))
1747 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1749 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1751 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1752 rsp
->sign_bit_copies
= 1;
1756 /* Should not happen as we only using pseduo registers. */
1757 gcc_assert (GET_CODE (set
) != CLOBBER_HIGH
);
1759 /* If this register is being initialized using itself, and the
1760 register is uninitialized in this basic block, and there are
1761 no LOG_LINKS which set the register, then part of the
1762 register is uninitialized. In that case we can't assume
1763 anything about the number of nonzero bits.
1765 ??? We could do better if we checked this in
1766 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1767 could avoid making assumptions about the insn which initially
1768 sets the register, while still using the information in other
1769 insns. We would have to be careful to check every insn
1770 involved in the combination. */
1773 && reg_referenced_p (x
, PATTERN (insn
))
1774 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1777 struct insn_link
*link
;
1779 FOR_EACH_LOG_LINK (link
, insn
)
1780 if (dead_or_set_p (link
->insn
, x
))
1784 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1785 rsp
->sign_bit_copies
= 1;
1790 /* If this is a complex assignment, see if we can convert it into a
1791 simple assignment. */
1792 set
= expand_field_assignment (set
);
1794 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1795 set what we know about X. */
1797 if (SET_DEST (set
) == x
1798 || (paradoxical_subreg_p (SET_DEST (set
))
1799 && SUBREG_REG (SET_DEST (set
)) == x
))
1800 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1803 rsp
->nonzero_bits
= GET_MODE_MASK (mode
);
1804 rsp
->sign_bit_copies
= 1;
1809 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1810 optionally insns that were previously combined into I3 or that will be
1811 combined into the merger of INSN and I3. The order is PRED, PRED2,
1812 INSN, SUCC, SUCC2, I3.
1814 Return 0 if the combination is not allowed for any reason.
1816 If the combination is allowed, *PDEST will be set to the single
1817 destination of INSN and *PSRC to the single source, and this function
1821 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1822 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1823 rtx
*pdest
, rtx
*psrc
)
1830 bool all_adjacent
= true;
1831 int (*is_volatile_p
) (const_rtx
);
1837 if (next_active_insn (succ2
) != i3
)
1838 all_adjacent
= false;
1839 if (next_active_insn (succ
) != succ2
)
1840 all_adjacent
= false;
1842 else if (next_active_insn (succ
) != i3
)
1843 all_adjacent
= false;
1844 if (next_active_insn (insn
) != succ
)
1845 all_adjacent
= false;
1847 else if (next_active_insn (insn
) != i3
)
1848 all_adjacent
= false;
1850 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1851 or a PARALLEL consisting of such a SET and CLOBBERs.
1853 If INSN has CLOBBER parallel parts, ignore them for our processing.
1854 By definition, these happen during the execution of the insn. When it
1855 is merged with another insn, all bets are off. If they are, in fact,
1856 needed and aren't also supplied in I3, they may be added by
1857 recog_for_combine. Otherwise, it won't match.
1859 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1862 Get the source and destination of INSN. If more than one, can't
1865 if (GET_CODE (PATTERN (insn
)) == SET
)
1866 set
= PATTERN (insn
);
1867 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1868 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1870 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1872 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1874 switch (GET_CODE (elt
))
1876 /* This is important to combine floating point insns
1877 for the SH4 port. */
1879 /* Combining an isolated USE doesn't make sense.
1880 We depend here on combinable_i3pat to reject them. */
1881 /* The code below this loop only verifies that the inputs of
1882 the SET in INSN do not change. We call reg_set_between_p
1883 to verify that the REG in the USE does not change between
1885 If the USE in INSN was for a pseudo register, the matching
1886 insn pattern will likely match any register; combining this
1887 with any other USE would only be safe if we knew that the
1888 used registers have identical values, or if there was
1889 something to tell them apart, e.g. different modes. For
1890 now, we forgo such complicated tests and simply disallow
1891 combining of USES of pseudo registers with any other USE. */
1892 if (REG_P (XEXP (elt
, 0))
1893 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1895 rtx i3pat
= PATTERN (i3
);
1896 int i
= XVECLEN (i3pat
, 0) - 1;
1897 unsigned int regno
= REGNO (XEXP (elt
, 0));
1901 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1903 if (GET_CODE (i3elt
) == USE
1904 && REG_P (XEXP (i3elt
, 0))
1905 && (REGNO (XEXP (i3elt
, 0)) == regno
1906 ? reg_set_between_p (XEXP (elt
, 0),
1907 PREV_INSN (insn
), i3
)
1908 : regno
>= FIRST_PSEUDO_REGISTER
))
1915 /* We can ignore CLOBBERs. */
1921 /* Ignore SETs whose result isn't used but not those that
1922 have side-effects. */
1923 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1924 && insn_nothrow_p (insn
)
1925 && !side_effects_p (elt
))
1928 /* If we have already found a SET, this is a second one and
1929 so we cannot combine with this insn. */
1937 /* Anything else means we can't combine. */
1943 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1944 so don't do anything with it. */
1945 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1954 /* The simplification in expand_field_assignment may call back to
1955 get_last_value, so set safe guard here. */
1956 subst_low_luid
= DF_INSN_LUID (insn
);
1958 set
= expand_field_assignment (set
);
1959 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1961 /* Do not eliminate user-specified register if it is in an
1962 asm input because we may break the register asm usage defined
1963 in GCC manual if allow to do so.
1964 Be aware that this may cover more cases than we expect but this
1965 should be harmless. */
1966 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1967 && extract_asm_operands (PATTERN (i3
)))
1970 /* Don't eliminate a store in the stack pointer. */
1971 if (dest
== stack_pointer_rtx
1972 /* Don't combine with an insn that sets a register to itself if it has
1973 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1974 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1975 /* Can't merge an ASM_OPERANDS. */
1976 || GET_CODE (src
) == ASM_OPERANDS
1977 /* Can't merge a function call. */
1978 || GET_CODE (src
) == CALL
1979 /* Don't eliminate a function call argument. */
1981 && (find_reg_fusage (i3
, USE
, dest
)
1983 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1984 && global_regs
[REGNO (dest
)])))
1985 /* Don't substitute into an incremented register. */
1986 || FIND_REG_INC_NOTE (i3
, dest
)
1987 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1988 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1989 /* Don't substitute into a non-local goto, this confuses CFG. */
1990 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1991 /* Make sure that DEST is not used after INSN but before SUCC, or
1992 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1995 && (reg_used_between_p (dest
, succ2
, i3
)
1996 || reg_used_between_p (dest
, succ
, succ2
)))
1997 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))
1998 || (!succ2
&& !succ
&& reg_used_between_p (dest
, insn
, i3
))
2000 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2001 that case SUCC is not in the insn stream, so use SUCC2
2002 instead for this test. */
2003 && reg_used_between_p (dest
, insn
,
2005 && INSN_UID (succ
) == INSN_UID (succ2
)
2007 /* Make sure that the value that is to be substituted for the register
2008 does not use any registers whose values alter in between. However,
2009 If the insns are adjacent, a use can't cross a set even though we
2010 think it might (this can happen for a sequence of insns each setting
2011 the same destination; last_set of that register might point to
2012 a NOTE). If INSN has a REG_EQUIV note, the register is always
2013 equivalent to the memory so the substitution is valid even if there
2014 are intervening stores. Also, don't move a volatile asm or
2015 UNSPEC_VOLATILE across any other insns. */
2018 || ! find_reg_note (insn
, REG_EQUIV
, src
))
2019 && modified_between_p (src
, insn
, i3
))
2020 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
2021 || GET_CODE (src
) == UNSPEC_VOLATILE
))
2022 /* Don't combine across a CALL_INSN, because that would possibly
2023 change whether the life span of some REGs crosses calls or not,
2024 and it is a pain to update that information.
2025 Exception: if source is a constant, moving it later can't hurt.
2026 Accept that as a special case. */
2027 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
2030 /* DEST must either be a REG or CC0. */
2033 /* If register alignment is being enforced for multi-word items in all
2034 cases except for parameters, it is possible to have a register copy
2035 insn referencing a hard register that is not allowed to contain the
2036 mode being copied and which would not be valid as an operand of most
2037 insns. Eliminate this problem by not combining with such an insn.
2039 Also, on some machines we don't want to extend the life of a hard
2043 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
2044 && !targetm
.hard_regno_mode_ok (REGNO (dest
), GET_MODE (dest
)))
2045 /* Don't extend the life of a hard register unless it is
2046 user variable (if we have few registers) or it can't
2047 fit into the desired register (meaning something special
2049 Also avoid substituting a return register into I3, because
2050 reload can't handle a conflict with constraints of other
2052 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2053 && !targetm
.hard_regno_mode_ok (REGNO (src
),
2057 else if (GET_CODE (dest
) != CC0
)
2061 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2062 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2063 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2065 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2067 /* If the clobber represents an earlyclobber operand, we must not
2068 substitute an expression containing the clobbered register.
2069 As we do not analyze the constraint strings here, we have to
2070 make the conservative assumption. However, if the register is
2071 a fixed hard reg, the clobber cannot represent any operand;
2072 we leave it up to the machine description to either accept or
2073 reject use-and-clobber patterns. */
2075 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2076 || !fixed_regs
[REGNO (reg
)])
2077 if (reg_overlap_mentioned_p (reg
, src
))
2081 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2082 or not), reject, unless nothing volatile comes between it and I3 */
2084 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2086 /* Make sure neither succ nor succ2 contains a volatile reference. */
2087 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2089 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2091 /* We'll check insns between INSN and I3 below. */
2094 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2095 to be an explicit register variable, and was chosen for a reason. */
2097 if (GET_CODE (src
) == ASM_OPERANDS
2098 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2101 /* If INSN contains volatile references (specifically volatile MEMs),
2102 we cannot combine across any other volatile references.
2103 Even if INSN doesn't contain volatile references, any intervening
2104 volatile insn might affect machine state. */
2106 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2110 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2111 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2114 /* If INSN contains an autoincrement or autodecrement, make sure that
2115 register is not used between there and I3, and not already used in
2116 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2117 Also insist that I3 not be a jump; if it were one
2118 and the incremented register were spilled, we would lose. */
2121 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2122 if (REG_NOTE_KIND (link
) == REG_INC
2124 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2125 || (pred
!= NULL_RTX
2126 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2127 || (pred2
!= NULL_RTX
2128 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2129 || (succ
!= NULL_RTX
2130 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2131 || (succ2
!= NULL_RTX
2132 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2133 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2136 /* Don't combine an insn that follows a CC0-setting insn.
2137 An insn that uses CC0 must not be separated from the one that sets it.
2138 We do, however, allow I2 to follow a CC0-setting insn if that insn
2139 is passed as I1; in that case it will be deleted also.
2140 We also allow combining in this case if all the insns are adjacent
2141 because that would leave the two CC0 insns adjacent as well.
2142 It would be more logical to test whether CC0 occurs inside I1 or I2,
2143 but that would be much slower, and this ought to be equivalent. */
2147 p
= prev_nonnote_insn (insn
);
2148 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2153 /* If we get here, we have passed all the tests and the combination is
2162 /* LOC is the location within I3 that contains its pattern or the component
2163 of a PARALLEL of the pattern. We validate that it is valid for combining.
2165 One problem is if I3 modifies its output, as opposed to replacing it
2166 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2167 doing so would produce an insn that is not equivalent to the original insns.
2171 (set (reg:DI 101) (reg:DI 100))
2172 (set (subreg:SI (reg:DI 101) 0) <foo>)
2174 This is NOT equivalent to:
2176 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2177 (set (reg:DI 101) (reg:DI 100))])
2179 Not only does this modify 100 (in which case it might still be valid
2180 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2182 We can also run into a problem if I2 sets a register that I1
2183 uses and I1 gets directly substituted into I3 (not via I2). In that
2184 case, we would be getting the wrong value of I2DEST into I3, so we
2185 must reject the combination. This case occurs when I2 and I1 both
2186 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2187 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2188 of a SET must prevent combination from occurring. The same situation
2189 can occur for I0, in which case I0_NOT_IN_SRC is set.
2191 Before doing the above check, we first try to expand a field assignment
2192 into a set of logical operations.
2194 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2195 we place a register that is both set and used within I3. If more than one
2196 such register is detected, we fail.
2198 Return 1 if the combination is valid, zero otherwise. */
2201 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2202 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2206 if (GET_CODE (x
) == SET
)
2209 rtx dest
= SET_DEST (set
);
2210 rtx src
= SET_SRC (set
);
2211 rtx inner_dest
= dest
;
2214 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2215 || GET_CODE (inner_dest
) == SUBREG
2216 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2217 inner_dest
= XEXP (inner_dest
, 0);
2219 /* Check for the case where I3 modifies its output, as discussed
2220 above. We don't want to prevent pseudos from being combined
2221 into the address of a MEM, so only prevent the combination if
2222 i1 or i2 set the same MEM. */
2223 if ((inner_dest
!= dest
&&
2224 (!MEM_P (inner_dest
)
2225 || rtx_equal_p (i2dest
, inner_dest
)
2226 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2227 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2228 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2229 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2230 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2232 /* This is the same test done in can_combine_p except we can't test
2233 all_adjacent; we don't have to, since this instruction will stay
2234 in place, thus we are not considering increasing the lifetime of
2237 Also, if this insn sets a function argument, combining it with
2238 something that might need a spill could clobber a previous
2239 function argument; the all_adjacent test in can_combine_p also
2240 checks this; here, we do a more specific test for this case. */
2242 || (REG_P (inner_dest
)
2243 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2244 && !targetm
.hard_regno_mode_ok (REGNO (inner_dest
),
2245 GET_MODE (inner_dest
)))
2246 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2247 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2250 /* If DEST is used in I3, it is being killed in this insn, so
2251 record that for later. We have to consider paradoxical
2252 subregs here, since they kill the whole register, but we
2253 ignore partial subregs, STRICT_LOW_PART, etc.
2254 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2255 STACK_POINTER_REGNUM, since these are always considered to be
2256 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2258 if (GET_CODE (subdest
) == SUBREG
&& !partial_subreg_p (subdest
))
2259 subdest
= SUBREG_REG (subdest
);
2262 && reg_referenced_p (subdest
, PATTERN (i3
))
2263 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2264 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2265 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2266 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2267 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2268 || ! fixed_regs
[REGNO (subdest
)]))
2269 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2271 if (*pi3dest_killed
)
2274 *pi3dest_killed
= subdest
;
2278 else if (GET_CODE (x
) == PARALLEL
)
2282 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2283 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2284 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2291 /* Return 1 if X is an arithmetic expression that contains a multiplication
2292 and division. We don't count multiplications by powers of two here. */
2295 contains_muldiv (rtx x
)
2297 switch (GET_CODE (x
))
2299 case MOD
: case DIV
: case UMOD
: case UDIV
:
2303 return ! (CONST_INT_P (XEXP (x
, 1))
2304 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2307 return contains_muldiv (XEXP (x
, 0))
2308 || contains_muldiv (XEXP (x
, 1));
2311 return contains_muldiv (XEXP (x
, 0));
2317 /* Determine whether INSN can be used in a combination. Return nonzero if
2318 not. This is used in try_combine to detect early some cases where we
2319 can't perform combinations. */
2322 cant_combine_insn_p (rtx_insn
*insn
)
2327 /* If this isn't really an insn, we can't do anything.
2328 This can occur when flow deletes an insn that it has merged into an
2329 auto-increment address. */
2330 if (!NONDEBUG_INSN_P (insn
))
2333 /* Never combine loads and stores involving hard regs that are likely
2334 to be spilled. The register allocator can usually handle such
2335 reg-reg moves by tying. If we allow the combiner to make
2336 substitutions of likely-spilled regs, reload might die.
2337 As an exception, we allow combinations involving fixed regs; these are
2338 not available to the register allocator so there's no risk involved. */
2340 set
= single_set (insn
);
2343 src
= SET_SRC (set
);
2344 dest
= SET_DEST (set
);
2345 if (GET_CODE (src
) == SUBREG
)
2346 src
= SUBREG_REG (src
);
2347 if (GET_CODE (dest
) == SUBREG
)
2348 dest
= SUBREG_REG (dest
);
2349 if (REG_P (src
) && REG_P (dest
)
2350 && ((HARD_REGISTER_P (src
)
2351 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2352 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2353 || (HARD_REGISTER_P (dest
)
2354 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2355 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2361 struct likely_spilled_retval_info
2363 unsigned regno
, nregs
;
2367 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2368 hard registers that are known to be written to / clobbered in full. */
2370 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2372 struct likely_spilled_retval_info
*const info
=
2373 (struct likely_spilled_retval_info
*) data
;
2374 unsigned regno
, nregs
;
2377 if (!REG_P (XEXP (set
, 0)))
2380 if (regno
>= info
->regno
+ info
->nregs
)
2382 nregs
= REG_NREGS (x
);
2383 if (regno
+ nregs
<= info
->regno
)
2385 new_mask
= (2U << (nregs
- 1)) - 1;
2386 if (regno
< info
->regno
)
2387 new_mask
>>= info
->regno
- regno
;
2389 new_mask
<<= regno
- info
->regno
;
2390 info
->mask
&= ~new_mask
;
2393 /* Return nonzero iff part of the return value is live during INSN, and
2394 it is likely spilled. This can happen when more than one insn is needed
2395 to copy the return value, e.g. when we consider to combine into the
2396 second copy insn for a complex value. */
2399 likely_spilled_retval_p (rtx_insn
*insn
)
2401 rtx_insn
*use
= BB_END (this_basic_block
);
2404 unsigned regno
, nregs
;
2405 /* We assume here that no machine mode needs more than
2406 32 hard registers when the value overlaps with a register
2407 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2409 struct likely_spilled_retval_info info
;
2411 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2413 reg
= XEXP (PATTERN (use
), 0);
2414 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2416 regno
= REGNO (reg
);
2417 nregs
= REG_NREGS (reg
);
2420 mask
= (2U << (nregs
- 1)) - 1;
2422 /* Disregard parts of the return value that are set later. */
2426 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2428 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2431 /* Check if any of the (probably) live return value registers is
2436 if ((mask
& 1 << nregs
)
2437 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2443 /* Adjust INSN after we made a change to its destination.
2445 Changing the destination can invalidate notes that say something about
2446 the results of the insn and a LOG_LINK pointing to the insn. */
2449 adjust_for_new_dest (rtx_insn
*insn
)
2451 /* For notes, be conservative and simply remove them. */
2452 remove_reg_equal_equiv_notes (insn
);
2454 /* The new insn will have a destination that was previously the destination
2455 of an insn just above it. Call distribute_links to make a LOG_LINK from
2456 the next use of that destination. */
2458 rtx set
= single_set (insn
);
2461 rtx reg
= SET_DEST (set
);
2463 while (GET_CODE (reg
) == ZERO_EXTRACT
2464 || GET_CODE (reg
) == STRICT_LOW_PART
2465 || GET_CODE (reg
) == SUBREG
)
2466 reg
= XEXP (reg
, 0);
2467 gcc_assert (REG_P (reg
));
2469 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2471 df_insn_rescan (insn
);
2474 /* Return TRUE if combine can reuse reg X in mode MODE.
2475 ADDED_SETS is nonzero if the original set is still required. */
2477 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2484 /* Don't change between modes with different underlying register sizes,
2485 since this could lead to invalid subregs. */
2486 if (maybe_ne (REGMODE_NATURAL_SIZE (mode
),
2487 REGMODE_NATURAL_SIZE (GET_MODE (x
))))
2491 /* Allow hard registers if the new mode is legal, and occupies no more
2492 registers than the old mode. */
2493 if (regno
< FIRST_PSEUDO_REGISTER
)
2494 return (targetm
.hard_regno_mode_ok (regno
, mode
)
2495 && REG_NREGS (x
) >= hard_regno_nregs (regno
, mode
));
2497 /* Or a pseudo that is only used once. */
2498 return (regno
< reg_n_sets_max
2499 && REG_N_SETS (regno
) == 1
2501 && !REG_USERVAR_P (x
));
2505 /* Check whether X, the destination of a set, refers to part of
2506 the register specified by REG. */
2509 reg_subword_p (rtx x
, rtx reg
)
2511 /* Check that reg is an integer mode register. */
2512 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2515 if (GET_CODE (x
) == STRICT_LOW_PART
2516 || GET_CODE (x
) == ZERO_EXTRACT
)
2519 return GET_CODE (x
) == SUBREG
2520 && SUBREG_REG (x
) == reg
2521 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2524 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2525 Note that the INSN should be deleted *after* removing dead edges, so
2526 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2527 but not for a (set (pc) (label_ref FOO)). */
2530 update_cfg_for_uncondjump (rtx_insn
*insn
)
2532 basic_block bb
= BLOCK_FOR_INSN (insn
);
2533 gcc_assert (BB_END (bb
) == insn
);
2535 purge_dead_edges (bb
);
2538 if (EDGE_COUNT (bb
->succs
) == 1)
2542 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2544 /* Remove barriers from the footer if there are any. */
2545 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2546 if (BARRIER_P (insn
))
2548 if (PREV_INSN (insn
))
2549 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2551 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2552 if (NEXT_INSN (insn
))
2553 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2555 else if (LABEL_P (insn
))
2560 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2561 by an arbitrary number of CLOBBERs. */
2563 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2565 if (GET_CODE (pat
) != PARALLEL
)
2568 int len
= XVECLEN (pat
, 0);
2573 for (i
= 0; i
< n
; i
++)
2574 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2575 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2577 for ( ; i
< len
; i
++)
2578 switch (GET_CODE (XVECEXP (pat
, 0, i
)))
2581 if (XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2592 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2593 CLOBBERs), can be split into individual SETs in that order, without
2594 changing semantics. */
2596 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2598 if (!insn_nothrow_p (insn
))
2601 rtx pat
= PATTERN (insn
);
2604 for (i
= 0; i
< n
; i
++)
2606 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2609 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2611 for (j
= i
+ 1; j
< n
; j
++)
2612 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2619 /* Return whether X is just a single set, with the source
2620 a general_operand. */
2622 is_just_move (rtx x
)
2627 return (GET_CODE (x
) == SET
&& general_operand (SET_SRC (x
), VOIDmode
));
2630 /* Try to combine the insns I0, I1 and I2 into I3.
2631 Here I0, I1 and I2 appear earlier than I3.
2632 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2635 If we are combining more than two insns and the resulting insn is not
2636 recognized, try splitting it into two insns. If that happens, I2 and I3
2637 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2638 Otherwise, I0, I1 and I2 are pseudo-deleted.
2640 Return 0 if the combination does not work. Then nothing is changed.
2641 If we did the combination, return the insn at which combine should
2644 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2645 new direct jump instruction.
2647 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2648 been I3 passed to an earlier try_combine within the same basic
2652 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2653 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2655 /* New patterns for I3 and I2, respectively. */
2656 rtx newpat
, newi2pat
= 0;
2657 rtvec newpat_vec_with_clobbers
= 0;
2658 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2659 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2661 int added_sets_0
, added_sets_1
, added_sets_2
;
2662 /* Total number of SETs to put into I3. */
2664 /* Nonzero if I2's or I1's body now appears in I3. */
2665 int i2_is_used
= 0, i1_is_used
= 0;
2666 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2667 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2668 /* Contains I3 if the destination of I3 is used in its source, which means
2669 that the old life of I3 is being killed. If that usage is placed into
2670 I2 and not in I3, a REG_DEAD note must be made. */
2671 rtx i3dest_killed
= 0;
2672 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2673 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2674 /* Copy of SET_SRC of I1 and I0, if needed. */
2675 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2676 /* Set if I2DEST was reused as a scratch register. */
2677 bool i2scratch
= false;
2678 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2679 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2680 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2681 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2682 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2683 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2684 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2685 /* Notes that must be added to REG_NOTES in I3 and I2. */
2686 rtx new_i3_notes
, new_i2_notes
;
2687 /* Notes that we substituted I3 into I2 instead of the normal case. */
2688 int i3_subst_into_i2
= 0;
2689 /* Notes that I1, I2 or I3 is a MULT operation. */
2693 int changed_i3_dest
= 0;
2694 bool i2_was_move
= false, i3_was_move
= false;
2697 rtx_insn
*temp_insn
;
2699 struct insn_link
*link
;
2701 rtx new_other_notes
;
2703 scalar_int_mode dest_mode
, temp_mode
;
2705 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2707 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2710 /* Only try four-insn combinations when there's high likelihood of
2711 success. Look for simple insns, such as loads of constants or
2712 binary operations involving a constant. */
2720 if (!flag_expensive_optimizations
)
2723 for (i
= 0; i
< 4; i
++)
2725 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2726 rtx set
= single_set (insn
);
2730 src
= SET_SRC (set
);
2731 if (CONSTANT_P (src
))
2736 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2738 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2739 || GET_CODE (src
) == LSHIFTRT
)
2743 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2744 are likely manipulating its value. Ideally we'll be able to combine
2745 all four insns into a bitfield insertion of some kind.
2747 Note the source in I0 might be inside a sign/zero extension and the
2748 memory modes in I0 and I3 might be different. So extract the address
2749 from the destination of I3 and search for it in the source of I0.
2751 In the event that there's a match but the source/dest do not actually
2752 refer to the same memory, the worst that happens is we try some
2753 combinations that we wouldn't have otherwise. */
2754 if ((set0
= single_set (i0
))
2755 /* Ensure the source of SET0 is a MEM, possibly buried inside
2757 && (GET_CODE (SET_SRC (set0
)) == MEM
2758 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2759 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2760 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2761 && (set3
= single_set (i3
))
2762 /* Ensure the destination of SET3 is a MEM. */
2763 && GET_CODE (SET_DEST (set3
)) == MEM
2764 /* Would it be better to extract the base address for the MEM
2765 in SET3 and look for that? I don't have cases where it matters
2766 but I could envision such cases. */
2767 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2770 if (ngood
< 2 && nshift
< 2)
2774 /* Exit early if one of the insns involved can't be used for
2777 || (i1
&& CALL_P (i1
))
2778 || (i0
&& CALL_P (i0
))
2779 || cant_combine_insn_p (i3
)
2780 || cant_combine_insn_p (i2
)
2781 || (i1
&& cant_combine_insn_p (i1
))
2782 || (i0
&& cant_combine_insn_p (i0
))
2783 || likely_spilled_retval_p (i3
))
2787 undobuf
.other_insn
= 0;
2789 /* Reset the hard register usage information. */
2790 CLEAR_HARD_REG_SET (newpat_used_regs
);
2792 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2795 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2796 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2798 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2799 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2801 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2802 INSN_UID (i2
), INSN_UID (i3
));
2805 dump_insn_slim (dump_file
, i0
);
2807 dump_insn_slim (dump_file
, i1
);
2808 dump_insn_slim (dump_file
, i2
);
2809 dump_insn_slim (dump_file
, i3
);
2812 /* If multiple insns feed into one of I2 or I3, they can be in any
2813 order. To simplify the code below, reorder them in sequence. */
2814 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2816 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2818 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2821 added_links_insn
= 0;
2822 added_notes_insn
= 0;
2824 /* First check for one important special case that the code below will
2825 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2826 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2827 we may be able to replace that destination with the destination of I3.
2828 This occurs in the common code where we compute both a quotient and
2829 remainder into a structure, in which case we want to do the computation
2830 directly into the structure to avoid register-register copies.
2832 Note that this case handles both multiple sets in I2 and also cases
2833 where I2 has a number of CLOBBERs inside the PARALLEL.
2835 We make very conservative checks below and only try to handle the
2836 most common cases of this. For example, we only handle the case
2837 where I2 and I3 are adjacent to avoid making difficult register
2840 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2841 && REG_P (SET_SRC (PATTERN (i3
)))
2842 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2843 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2844 && GET_CODE (PATTERN (i2
)) == PARALLEL
2845 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2846 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2847 below would need to check what is inside (and reg_overlap_mentioned_p
2848 doesn't support those codes anyway). Don't allow those destinations;
2849 the resulting insn isn't likely to be recognized anyway. */
2850 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2851 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2852 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2853 SET_DEST (PATTERN (i3
)))
2854 && next_active_insn (i2
) == i3
)
2856 rtx p2
= PATTERN (i2
);
2858 /* Make sure that the destination of I3,
2859 which we are going to substitute into one output of I2,
2860 is not used within another output of I2. We must avoid making this:
2861 (parallel [(set (mem (reg 69)) ...)
2862 (set (reg 69) ...)])
2863 which is not well-defined as to order of actions.
2864 (Besides, reload can't handle output reloads for this.)
2866 The problem can also happen if the dest of I3 is a memory ref,
2867 if another dest in I2 is an indirect memory ref.
2869 Neither can this PARALLEL be an asm. We do not allow combining
2870 that usually (see can_combine_p), so do not here either. */
2872 for (i
= 0; ok
&& i
< XVECLEN (p2
, 0); i
++)
2874 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2875 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
2876 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER_HIGH
)
2877 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2878 SET_DEST (XVECEXP (p2
, 0, i
))))
2880 else if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2881 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2886 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2887 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2888 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2893 subst_low_luid
= DF_INSN_LUID (i2
);
2895 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2896 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2897 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2898 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2900 /* Replace the dest in I2 with our dest and make the resulting
2901 insn the new pattern for I3. Then skip to where we validate
2902 the pattern. Everything was set up above. */
2903 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2905 i3_subst_into_i2
= 1;
2906 goto validate_replacement
;
2910 /* If I2 is setting a pseudo to a constant and I3 is setting some
2911 sub-part of it to another constant, merge them by making a new
2914 && (temp_expr
= single_set (i2
)) != 0
2915 && is_a
<scalar_int_mode
> (GET_MODE (SET_DEST (temp_expr
)), &temp_mode
)
2916 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2917 && GET_CODE (PATTERN (i3
)) == SET
2918 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2919 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2921 rtx dest
= SET_DEST (PATTERN (i3
));
2922 rtx temp_dest
= SET_DEST (temp_expr
);
2926 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2928 if (CONST_INT_P (XEXP (dest
, 1))
2929 && CONST_INT_P (XEXP (dest
, 2))
2930 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (dest
, 0)),
2933 width
= INTVAL (XEXP (dest
, 1));
2934 offset
= INTVAL (XEXP (dest
, 2));
2935 dest
= XEXP (dest
, 0);
2936 if (BITS_BIG_ENDIAN
)
2937 offset
= GET_MODE_PRECISION (dest_mode
) - width
- offset
;
2942 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2943 dest
= XEXP (dest
, 0);
2944 if (is_a
<scalar_int_mode
> (GET_MODE (dest
), &dest_mode
))
2946 width
= GET_MODE_PRECISION (dest_mode
);
2953 /* If this is the low part, we're done. */
2954 if (subreg_lowpart_p (dest
))
2956 /* Handle the case where inner is twice the size of outer. */
2957 else if (GET_MODE_PRECISION (temp_mode
)
2958 == 2 * GET_MODE_PRECISION (dest_mode
))
2959 offset
+= GET_MODE_PRECISION (dest_mode
);
2960 /* Otherwise give up for now. */
2967 rtx inner
= SET_SRC (PATTERN (i3
));
2968 rtx outer
= SET_SRC (temp_expr
);
2970 wide_int o
= wi::insert (rtx_mode_t (outer
, temp_mode
),
2971 rtx_mode_t (inner
, dest_mode
),
2976 subst_low_luid
= DF_INSN_LUID (i2
);
2977 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2979 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2981 /* Replace the source in I2 with the new constant and make the
2982 resulting insn the new pattern for I3. Then skip to where we
2983 validate the pattern. Everything was set up above. */
2984 SUBST (SET_SRC (temp_expr
),
2985 immed_wide_int_const (o
, temp_mode
));
2987 newpat
= PATTERN (i2
);
2989 /* The dest of I3 has been replaced with the dest of I2. */
2990 changed_i3_dest
= 1;
2991 goto validate_replacement
;
2995 /* If we have no I1 and I2 looks like:
2996 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2998 make up a dummy I1 that is
3001 (set (reg:CC X) (compare:CC Y (const_int 0)))
3003 (We can ignore any trailing CLOBBERs.)
3005 This undoes a previous combination and allows us to match a branch-and-
3008 if (!HAVE_cc0
&& i1
== 0
3009 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3010 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
3012 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
3013 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
3014 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
3015 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
3016 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3017 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3019 /* We make I1 with the same INSN_UID as I2. This gives it
3020 the same DF_INSN_LUID for value tracking. Our fake I1 will
3021 never appear in the insn stream so giving it the same INSN_UID
3022 as I2 will not cause a problem. */
3024 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3025 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
3027 INSN_UID (i1
) = INSN_UID (i2
);
3029 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
3030 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
3031 SET_DEST (PATTERN (i1
)));
3032 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
3033 SUBST_LINK (LOG_LINKS (i2
),
3034 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
3037 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3038 make those two SETs separate I1 and I2 insns, and make an I0 that is
3040 if (!HAVE_cc0
&& i0
== 0
3041 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
3042 && can_split_parallel_of_n_reg_sets (i2
, 2)
3043 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3044 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
)
3045 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
3046 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
3048 /* If there is no I1, there is no I0 either. */
3051 /* We make I1 with the same INSN_UID as I2. This gives it
3052 the same DF_INSN_LUID for value tracking. Our fake I1 will
3053 never appear in the insn stream so giving it the same INSN_UID
3054 as I2 will not cause a problem. */
3056 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
3057 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
3059 INSN_UID (i1
) = INSN_UID (i2
);
3061 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
3064 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3065 if (!can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
))
3068 fprintf (dump_file
, "Can't combine i2 into i3\n");
3072 if (i1
&& !can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
, &i1dest
, &i1src
))
3075 fprintf (dump_file
, "Can't combine i1 into i3\n");
3079 if (i0
&& !can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
, &i0dest
, &i0src
))
3082 fprintf (dump_file
, "Can't combine i0 into i3\n");
3087 /* Record whether i2 and i3 are trivial moves. */
3088 i2_was_move
= is_just_move (i2
);
3089 i3_was_move
= is_just_move (i3
);
3091 /* Record whether I2DEST is used in I2SRC and similarly for the other
3092 cases. Knowing this will help in register status updating below. */
3093 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
3094 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
3095 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
3096 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
3097 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
3098 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
3099 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
3100 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
3101 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
3103 /* For the earlier insns, determine which of the subsequent ones they
3105 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
3106 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
3107 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
3108 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
3109 && reg_overlap_mentioned_p (i0dest
, i2src
))));
3111 /* Ensure that I3's pattern can be the destination of combines. */
3112 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3113 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3114 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3115 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3122 /* See if any of the insns is a MULT operation. Unless one is, we will
3123 reject a combination that is, since it must be slower. Be conservative
3125 if (GET_CODE (i2src
) == MULT
3126 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3127 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3128 || (GET_CODE (PATTERN (i3
)) == SET
3129 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3132 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3133 We used to do this EXCEPT in one case: I3 has a post-inc in an
3134 output operand. However, that exception can give rise to insns like
3136 which is a famous insn on the PDP-11 where the value of r3 used as the
3137 source was model-dependent. Avoid this sort of thing. */
3140 if (!(GET_CODE (PATTERN (i3
)) == SET
3141 && REG_P (SET_SRC (PATTERN (i3
)))
3142 && MEM_P (SET_DEST (PATTERN (i3
)))
3143 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3144 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3145 /* It's not the exception. */
3150 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3151 if (REG_NOTE_KIND (link
) == REG_INC
3152 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3154 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3161 /* See if the SETs in I1 or I2 need to be kept around in the merged
3162 instruction: whenever the value set there is still needed past I3.
3163 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3165 For the SET in I1, we have two cases: if I1 and I2 independently feed
3166 into I3, the set in I1 needs to be kept around unless I1DEST dies
3167 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3168 in I1 needs to be kept around unless I1DEST dies or is set in either
3169 I2 or I3. The same considerations apply to I0. */
3171 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3174 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3175 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3180 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3181 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3182 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3183 && dead_or_set_p (i2
, i0dest
)));
3187 /* We are about to copy insns for the case where they need to be kept
3188 around. Check that they can be copied in the merged instruction. */
3190 if (targetm
.cannot_copy_insn_p
3191 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3192 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3193 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3199 /* If the set in I2 needs to be kept around, we must make a copy of
3200 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3201 PATTERN (I2), we are only substituting for the original I1DEST, not into
3202 an already-substituted copy. This also prevents making self-referential
3203 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3208 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3209 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3211 i2pat
= copy_rtx (PATTERN (i2
));
3216 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3217 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3219 i1pat
= copy_rtx (PATTERN (i1
));
3224 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3225 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3227 i0pat
= copy_rtx (PATTERN (i0
));
3232 /* Substitute in the latest insn for the regs set by the earlier ones. */
3234 maxreg
= max_reg_num ();
3238 /* Many machines that don't use CC0 have insns that can both perform an
3239 arithmetic operation and set the condition code. These operations will
3240 be represented as a PARALLEL with the first element of the vector
3241 being a COMPARE of an arithmetic operation with the constant zero.
3242 The second element of the vector will set some pseudo to the result
3243 of the same arithmetic operation. If we simplify the COMPARE, we won't
3244 match such a pattern and so will generate an extra insn. Here we test
3245 for this case, where both the comparison and the operation result are
3246 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3247 I2SRC. Later we will make the PARALLEL that contains I2. */
3249 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3250 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3251 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3252 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3255 rtx
*cc_use_loc
= NULL
;
3256 rtx_insn
*cc_use_insn
= NULL
;
3257 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3258 machine_mode compare_mode
, orig_compare_mode
;
3259 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3260 scalar_int_mode mode
;
3262 newpat
= PATTERN (i3
);
3263 newpat_dest
= SET_DEST (newpat
);
3264 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3266 if (undobuf
.other_insn
== 0
3267 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3270 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3271 if (is_a
<scalar_int_mode
> (GET_MODE (i2dest
), &mode
))
3272 compare_code
= simplify_compare_const (compare_code
, mode
,
3274 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3277 /* Do the rest only if op1 is const0_rtx, which may be the
3278 result of simplification. */
3279 if (op1
== const0_rtx
)
3281 /* If a single use of the CC is found, prepare to modify it
3282 when SELECT_CC_MODE returns a new CC-class mode, or when
3283 the above simplify_compare_const() returned a new comparison
3284 operator. undobuf.other_insn is assigned the CC use insn
3285 when modifying it. */
3288 #ifdef SELECT_CC_MODE
3289 machine_mode new_mode
3290 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3291 if (new_mode
!= orig_compare_mode
3292 && can_change_dest_mode (SET_DEST (newpat
),
3293 added_sets_2
, new_mode
))
3295 unsigned int regno
= REGNO (newpat_dest
);
3296 compare_mode
= new_mode
;
3297 if (regno
< FIRST_PSEUDO_REGISTER
)
3298 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3301 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3302 newpat_dest
= regno_reg_rtx
[regno
];
3306 /* Cases for modifying the CC-using comparison. */
3307 if (compare_code
!= orig_compare_code
3308 /* ??? Do we need to verify the zero rtx? */
3309 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3311 /* Replace cc_use_loc with entire new RTX. */
3313 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3314 newpat_dest
, const0_rtx
));
3315 undobuf
.other_insn
= cc_use_insn
;
3317 else if (compare_mode
!= orig_compare_mode
)
3319 /* Just replace the CC reg with a new mode. */
3320 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3321 undobuf
.other_insn
= cc_use_insn
;
3325 /* Now we modify the current newpat:
3326 First, SET_DEST(newpat) is updated if the CC mode has been
3327 altered. For targets without SELECT_CC_MODE, this should be
3329 if (compare_mode
!= orig_compare_mode
)
3330 SUBST (SET_DEST (newpat
), newpat_dest
);
3331 /* This is always done to propagate i2src into newpat. */
3332 SUBST (SET_SRC (newpat
),
3333 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3334 /* Create new version of i2pat if needed; the below PARALLEL
3335 creation needs this to work correctly. */
3336 if (! rtx_equal_p (i2src
, op0
))
3337 i2pat
= gen_rtx_SET (i2dest
, op0
);
3342 if (i2_is_used
== 0)
3344 /* It is possible that the source of I2 or I1 may be performing
3345 an unneeded operation, such as a ZERO_EXTEND of something
3346 that is known to have the high part zero. Handle that case
3347 by letting subst look at the inner insns.
3349 Another way to do this would be to have a function that tries
3350 to simplify a single insn instead of merging two or more
3351 insns. We don't do this because of the potential of infinite
3352 loops and because of the potential extra memory required.
3353 However, doing it the way we are is a bit of a kludge and
3354 doesn't catch all cases.
3356 But only do this if -fexpensive-optimizations since it slows
3357 things down and doesn't usually win.
3359 This is not done in the COMPARE case above because the
3360 unmodified I2PAT is used in the PARALLEL and so a pattern
3361 with a modified I2SRC would not match. */
3363 if (flag_expensive_optimizations
)
3365 /* Pass pc_rtx so no substitutions are done, just
3369 subst_low_luid
= DF_INSN_LUID (i1
);
3370 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3373 subst_low_luid
= DF_INSN_LUID (i2
);
3374 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3377 n_occurrences
= 0; /* `subst' counts here */
3378 subst_low_luid
= DF_INSN_LUID (i2
);
3380 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3381 copy of I2SRC each time we substitute it, in order to avoid creating
3382 self-referential RTL when we will be substituting I1SRC for I1DEST
3383 later. Likewise if I0 feeds into I2, either directly or indirectly
3384 through I1, and I0DEST is in I0SRC. */
3385 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3386 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3387 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3388 && i0dest_in_i0src
));
3391 /* Record whether I2's body now appears within I3's body. */
3392 i2_is_used
= n_occurrences
;
3395 /* If we already got a failure, don't try to do more. Otherwise, try to
3396 substitute I1 if we have it. */
3398 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3400 /* Check that an autoincrement side-effect on I1 has not been lost.
3401 This happens if I1DEST is mentioned in I2 and dies there, and
3402 has disappeared from the new pattern. */
3403 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3405 && dead_or_set_p (i2
, i1dest
)
3406 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3407 /* Before we can do this substitution, we must redo the test done
3408 above (see detailed comments there) that ensures I1DEST isn't
3409 mentioned in any SETs in NEWPAT that are field assignments. */
3410 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3418 subst_low_luid
= DF_INSN_LUID (i1
);
3420 /* If the following substitution will modify I1SRC, make a copy of it
3421 for the case where it is substituted for I1DEST in I2PAT later. */
3422 if (added_sets_2
&& i1_feeds_i2_n
)
3423 i1src_copy
= copy_rtx (i1src
);
3425 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3426 copy of I1SRC each time we substitute it, in order to avoid creating
3427 self-referential RTL when we will be substituting I0SRC for I0DEST
3429 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3430 i0_feeds_i1_n
&& i0dest_in_i0src
);
3433 /* Record whether I1's body now appears within I3's body. */
3434 i1_is_used
= n_occurrences
;
3437 /* Likewise for I0 if we have it. */
3439 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3441 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3442 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3443 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3444 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3445 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3452 /* If the following substitution will modify I0SRC, make a copy of it
3453 for the case where it is substituted for I0DEST in I1PAT later. */
3454 if (added_sets_1
&& i0_feeds_i1_n
)
3455 i0src_copy
= copy_rtx (i0src
);
3456 /* And a copy for I0DEST in I2PAT substitution. */
3457 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3458 || (i0_feeds_i2_n
)))
3459 i0src_copy2
= copy_rtx (i0src
);
3462 subst_low_luid
= DF_INSN_LUID (i0
);
3463 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3467 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3468 to count all the ways that I2SRC and I1SRC can be used. */
3469 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3470 && i2_is_used
+ added_sets_2
> 1)
3471 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3472 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3474 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3475 && (n_occurrences
+ added_sets_0
3476 + (added_sets_1
&& i0_feeds_i1_n
)
3477 + (added_sets_2
&& i0_feeds_i2_n
)
3479 /* Fail if we tried to make a new register. */
3480 || max_reg_num () != maxreg
3481 /* Fail if we couldn't do something and have a CLOBBER. */
3482 || GET_CODE (newpat
) == CLOBBER
3483 /* Fail if this new pattern is a MULT and we didn't have one before
3484 at the outer level. */
3485 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3492 /* If the actions of the earlier insns must be kept
3493 in addition to substituting them into the latest one,
3494 we must make a new PARALLEL for the latest insn
3495 to hold additional the SETs. */
3497 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3499 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3502 if (GET_CODE (newpat
) == PARALLEL
)
3504 rtvec old
= XVEC (newpat
, 0);
3505 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3506 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3507 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3508 sizeof (old
->elem
[0]) * old
->num_elem
);
3513 total_sets
= 1 + extra_sets
;
3514 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3515 XVECEXP (newpat
, 0, 0) = old
;
3519 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3525 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3527 XVECEXP (newpat
, 0, --total_sets
) = t
;
3533 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3534 i0_feeds_i1_n
&& i0dest_in_i0src
);
3535 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3536 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3538 XVECEXP (newpat
, 0, --total_sets
) = t
;
3542 validate_replacement
:
3544 /* Note which hard regs this insn has as inputs. */
3545 mark_used_regs_combine (newpat
);
3547 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3548 consider splitting this pattern, we might need these clobbers. */
3549 if (i1
&& GET_CODE (newpat
) == PARALLEL
3550 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3552 int len
= XVECLEN (newpat
, 0);
3554 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3555 for (i
= 0; i
< len
; i
++)
3556 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3559 /* We have recognized nothing yet. */
3560 insn_code_number
= -1;
3562 /* See if this is a PARALLEL of two SETs where one SET's destination is
3563 a register that is unused and this isn't marked as an instruction that
3564 might trap in an EH region. In that case, we just need the other SET.
3565 We prefer this over the PARALLEL.
3567 This can occur when simplifying a divmod insn. We *must* test for this
3568 case here because the code below that splits two independent SETs doesn't
3569 handle this case correctly when it updates the register status.
3571 It's pointless doing this if we originally had two sets, one from
3572 i3, and one from i2. Combining then splitting the parallel results
3573 in the original i2 again plus an invalid insn (which we delete).
3574 The net effect is only to move instructions around, which makes
3575 debug info less accurate.
3577 If the remaining SET came from I2 its destination should not be used
3578 between I2 and I3. See PR82024. */
3580 if (!(added_sets_2
&& i1
== 0)
3581 && is_parallel_of_n_reg_sets (newpat
, 2)
3582 && asm_noperands (newpat
) < 0)
3584 rtx set0
= XVECEXP (newpat
, 0, 0);
3585 rtx set1
= XVECEXP (newpat
, 0, 1);
3586 rtx oldpat
= newpat
;
3588 if (((REG_P (SET_DEST (set1
))
3589 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3590 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3591 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3592 && insn_nothrow_p (i3
)
3593 && !side_effects_p (SET_SRC (set1
)))
3596 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3599 else if (((REG_P (SET_DEST (set0
))
3600 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3601 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3602 && find_reg_note (i3
, REG_UNUSED
,
3603 SUBREG_REG (SET_DEST (set0
)))))
3604 && insn_nothrow_p (i3
)
3605 && !side_effects_p (SET_SRC (set0
)))
3607 rtx dest
= SET_DEST (set1
);
3608 if (GET_CODE (dest
) == SUBREG
)
3609 dest
= SUBREG_REG (dest
);
3610 if (!reg_used_between_p (dest
, i2
, i3
))
3613 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3615 if (insn_code_number
>= 0)
3616 changed_i3_dest
= 1;
3620 if (insn_code_number
< 0)
3624 /* Is the result of combination a valid instruction? */
3625 if (insn_code_number
< 0)
3626 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3628 /* If we were combining three insns and the result is a simple SET
3629 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3630 insns. There are two ways to do this. It can be split using a
3631 machine-specific method (like when you have an addition of a large
3632 constant) or by combine in the function find_split_point. */
3634 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3635 && asm_noperands (newpat
) < 0)
3637 rtx parallel
, *split
;
3638 rtx_insn
*m_split_insn
;
3640 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3641 use I2DEST as a scratch register will help. In the latter case,
3642 convert I2DEST to the mode of the source of NEWPAT if we can. */
3644 m_split_insn
= combine_split_insns (newpat
, i3
);
3646 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3647 inputs of NEWPAT. */
3649 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3650 possible to try that as a scratch reg. This would require adding
3651 more code to make it work though. */
3653 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3655 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3657 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3658 (temporarily, until we are committed to this instruction
3659 combination) does not work: for example, any call to nonzero_bits
3660 on the register (from a splitter in the MD file, for example)
3661 will get the old information, which is invalid.
3663 Since nowadays we can create registers during combine just fine,
3664 we should just create a new one here, not reuse i2dest. */
3666 /* First try to split using the original register as a
3667 scratch register. */
3668 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3669 gen_rtvec (2, newpat
,
3670 gen_rtx_CLOBBER (VOIDmode
,
3672 m_split_insn
= combine_split_insns (parallel
, i3
);
3674 /* If that didn't work, try changing the mode of I2DEST if
3676 if (m_split_insn
== 0
3677 && new_mode
!= GET_MODE (i2dest
)
3678 && new_mode
!= VOIDmode
3679 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3681 machine_mode old_mode
= GET_MODE (i2dest
);
3684 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3685 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3688 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3689 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3692 parallel
= (gen_rtx_PARALLEL
3694 gen_rtvec (2, newpat
,
3695 gen_rtx_CLOBBER (VOIDmode
,
3697 m_split_insn
= combine_split_insns (parallel
, i3
);
3699 if (m_split_insn
== 0
3700 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3704 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3705 buf
= undobuf
.undos
;
3706 undobuf
.undos
= buf
->next
;
3707 buf
->next
= undobuf
.frees
;
3708 undobuf
.frees
= buf
;
3712 i2scratch
= m_split_insn
!= 0;
3715 /* If recog_for_combine has discarded clobbers, try to use them
3716 again for the split. */
3717 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3719 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3720 m_split_insn
= combine_split_insns (parallel
, i3
);
3723 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3725 rtx m_split_pat
= PATTERN (m_split_insn
);
3726 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3727 if (insn_code_number
>= 0)
3728 newpat
= m_split_pat
;
3730 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3731 && (next_nonnote_nondebug_insn (i2
) == i3
3732 || !modified_between_p (PATTERN (m_split_insn
), i2
, i3
)))
3735 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3736 newi2pat
= PATTERN (m_split_insn
);
3738 i3set
= single_set (NEXT_INSN (m_split_insn
));
3739 i2set
= single_set (m_split_insn
);
3741 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3743 /* If I2 or I3 has multiple SETs, we won't know how to track
3744 register status, so don't use these insns. If I2's destination
3745 is used between I2 and I3, we also can't use these insns. */
3747 if (i2_code_number
>= 0 && i2set
&& i3set
3748 && (next_nonnote_nondebug_insn (i2
) == i3
3749 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3750 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3752 if (insn_code_number
>= 0)
3755 /* It is possible that both insns now set the destination of I3.
3756 If so, we must show an extra use of it. */
3758 if (insn_code_number
>= 0)
3760 rtx new_i3_dest
= SET_DEST (i3set
);
3761 rtx new_i2_dest
= SET_DEST (i2set
);
3763 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3764 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3765 || GET_CODE (new_i3_dest
) == SUBREG
)
3766 new_i3_dest
= XEXP (new_i3_dest
, 0);
3768 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3769 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3770 || GET_CODE (new_i2_dest
) == SUBREG
)
3771 new_i2_dest
= XEXP (new_i2_dest
, 0);
3773 if (REG_P (new_i3_dest
)
3774 && REG_P (new_i2_dest
)
3775 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3776 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3777 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3781 /* If we can split it and use I2DEST, go ahead and see if that
3782 helps things be recognized. Verify that none of the registers
3783 are set between I2 and I3. */
3784 if (insn_code_number
< 0
3785 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3786 && (!HAVE_cc0
|| REG_P (i2dest
))
3787 /* We need I2DEST in the proper mode. If it is a hard register
3788 or the only use of a pseudo, we can change its mode.
3789 Make sure we don't change a hard register to have a mode that
3790 isn't valid for it, or change the number of registers. */
3791 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3792 || GET_MODE (*split
) == VOIDmode
3793 || can_change_dest_mode (i2dest
, added_sets_2
,
3795 && (next_nonnote_nondebug_insn (i2
) == i3
3796 || !modified_between_p (*split
, i2
, i3
))
3797 /* We can't overwrite I2DEST if its value is still used by
3799 && ! reg_referenced_p (i2dest
, newpat
))
3801 rtx newdest
= i2dest
;
3802 enum rtx_code split_code
= GET_CODE (*split
);
3803 machine_mode split_mode
= GET_MODE (*split
);
3804 bool subst_done
= false;
3805 newi2pat
= NULL_RTX
;
3809 /* *SPLIT may be part of I2SRC, so make sure we have the
3810 original expression around for later debug processing.
3811 We should not need I2SRC any more in other cases. */
3812 if (MAY_HAVE_DEBUG_BIND_INSNS
)
3813 i2src
= copy_rtx (i2src
);
3817 /* Get NEWDEST as a register in the proper mode. We have already
3818 validated that we can do this. */
3819 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3821 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3822 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3825 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3826 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3830 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3831 an ASHIFT. This can occur if it was inside a PLUS and hence
3832 appeared to be a memory address. This is a kludge. */
3833 if (split_code
== MULT
3834 && CONST_INT_P (XEXP (*split
, 1))
3835 && INTVAL (XEXP (*split
, 1)) > 0
3836 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3838 rtx i_rtx
= gen_int_shift_amount (split_mode
, i
);
3839 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3840 XEXP (*split
, 0), i_rtx
));
3841 /* Update split_code because we may not have a multiply
3843 split_code
= GET_CODE (*split
);
3846 /* Similarly for (plus (mult FOO (const_int pow2))). */
3847 if (split_code
== PLUS
3848 && GET_CODE (XEXP (*split
, 0)) == MULT
3849 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3850 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3851 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3853 rtx nsplit
= XEXP (*split
, 0);
3854 rtx i_rtx
= gen_int_shift_amount (GET_MODE (nsplit
), i
);
3855 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3858 /* Update split_code because we may not have a multiply
3860 split_code
= GET_CODE (*split
);
3863 #ifdef INSN_SCHEDULING
3864 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3865 be written as a ZERO_EXTEND. */
3866 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3868 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3869 what it really is. */
3870 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3872 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3873 SUBREG_REG (*split
)));
3875 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3876 SUBREG_REG (*split
)));
3880 /* Attempt to split binary operators using arithmetic identities. */
3881 if (BINARY_P (SET_SRC (newpat
))
3882 && split_mode
== GET_MODE (SET_SRC (newpat
))
3883 && ! side_effects_p (SET_SRC (newpat
)))
3885 rtx setsrc
= SET_SRC (newpat
);
3886 machine_mode mode
= GET_MODE (setsrc
);
3887 enum rtx_code code
= GET_CODE (setsrc
);
3888 rtx src_op0
= XEXP (setsrc
, 0);
3889 rtx src_op1
= XEXP (setsrc
, 1);
3891 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3892 if (rtx_equal_p (src_op0
, src_op1
))
3894 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3895 SUBST (XEXP (setsrc
, 0), newdest
);
3896 SUBST (XEXP (setsrc
, 1), newdest
);
3899 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3900 else if ((code
== PLUS
|| code
== MULT
)
3901 && GET_CODE (src_op0
) == code
3902 && GET_CODE (XEXP (src_op0
, 0)) == code
3903 && (INTEGRAL_MODE_P (mode
)
3904 || (FLOAT_MODE_P (mode
)
3905 && flag_unsafe_math_optimizations
)))
3907 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3908 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3909 rtx r
= XEXP (src_op0
, 1);
3912 /* Split both "((X op Y) op X) op Y" and
3913 "((X op Y) op Y) op X" as "T op T" where T is
3915 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3916 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3918 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3919 SUBST (XEXP (setsrc
, 0), newdest
);
3920 SUBST (XEXP (setsrc
, 1), newdest
);
3923 /* Split "((X op X) op Y) op Y)" as "T op T" where
3925 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3927 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3928 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3929 SUBST (XEXP (setsrc
, 0), newdest
);
3930 SUBST (XEXP (setsrc
, 1), newdest
);
3938 newi2pat
= gen_rtx_SET (newdest
, *split
);
3939 SUBST (*split
, newdest
);
3942 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3944 /* recog_for_combine might have added CLOBBERs to newi2pat.
3945 Make sure NEWPAT does not depend on the clobbered regs. */
3946 if (GET_CODE (newi2pat
) == PARALLEL
)
3947 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3948 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3950 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3951 if (reg_overlap_mentioned_p (reg
, newpat
))
3958 /* If the split point was a MULT and we didn't have one before,
3959 don't use one now. */
3960 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3961 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3965 /* Check for a case where we loaded from memory in a narrow mode and
3966 then sign extended it, but we need both registers. In that case,
3967 we have a PARALLEL with both loads from the same memory location.
3968 We can split this into a load from memory followed by a register-register
3969 copy. This saves at least one insn, more if register allocation can
3972 We cannot do this if the destination of the first assignment is a
3973 condition code register or cc0. We eliminate this case by making sure
3974 the SET_DEST and SET_SRC have the same mode.
3976 We cannot do this if the destination of the second assignment is
3977 a register that we have already assumed is zero-extended. Similarly
3978 for a SUBREG of such a register. */
3980 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3981 && GET_CODE (newpat
) == PARALLEL
3982 && XVECLEN (newpat
, 0) == 2
3983 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3984 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3985 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3986 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3987 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3988 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3989 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3990 && !modified_between_p (SET_SRC (XVECEXP (newpat
, 0, 1)), i2
, i3
)
3991 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3992 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3993 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3995 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3996 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
3998 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4000 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4001 != GET_MODE_MASK (word_mode
))))
4002 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
4003 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
4005 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
4006 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4008 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr
)),
4010 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
4011 != GET_MODE_MASK (word_mode
)))))
4012 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4013 SET_SRC (XVECEXP (newpat
, 0, 1)))
4014 && ! find_reg_note (i3
, REG_UNUSED
,
4015 SET_DEST (XVECEXP (newpat
, 0, 0))))
4019 newi2pat
= XVECEXP (newpat
, 0, 0);
4020 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
4021 newpat
= XVECEXP (newpat
, 0, 1);
4022 SUBST (SET_SRC (newpat
),
4023 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
4024 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4026 if (i2_code_number
>= 0)
4027 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4029 if (insn_code_number
>= 0)
4033 /* Similarly, check for a case where we have a PARALLEL of two independent
4034 SETs but we started with three insns. In this case, we can do the sets
4035 as two separate insns. This case occurs when some SET allows two
4036 other insns to combine, but the destination of that SET is still live.
4038 Also do this if we started with two insns and (at least) one of the
4039 resulting sets is a noop; this noop will be deleted later. */
4041 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
4042 && GET_CODE (newpat
) == PARALLEL
4043 && XVECLEN (newpat
, 0) == 2
4044 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
4045 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
4047 || set_noop_p (XVECEXP (newpat
, 0, 0))
4048 || set_noop_p (XVECEXP (newpat
, 0, 1))
4049 || (!i2_was_move
&& !i3_was_move
))
4050 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
4051 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
4052 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
4053 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
4054 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
4055 XVECEXP (newpat
, 0, 0))
4056 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
4057 XVECEXP (newpat
, 0, 1))
4058 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
4059 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
4061 rtx set0
= XVECEXP (newpat
, 0, 0);
4062 rtx set1
= XVECEXP (newpat
, 0, 1);
4064 /* Normally, it doesn't matter which of the two is done first,
4065 but the one that references cc0 can't be the second, and
4066 one which uses any regs/memory set in between i2 and i3 can't
4067 be first. The PARALLEL might also have been pre-existing in i3,
4068 so we need to make sure that we won't wrongly hoist a SET to i2
4069 that would conflict with a death note present in there. */
4070 if (!modified_between_p (SET_SRC (set1
), i2
, i3
)
4071 && !(REG_P (SET_DEST (set1
))
4072 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
4073 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
4074 && find_reg_note (i2
, REG_DEAD
,
4075 SUBREG_REG (SET_DEST (set1
))))
4076 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
4077 /* If I3 is a jump, ensure that set0 is a jump so that
4078 we do not create invalid RTL. */
4079 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
4085 else if (!modified_between_p (SET_SRC (set0
), i2
, i3
)
4086 && !(REG_P (SET_DEST (set0
))
4087 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
4088 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
4089 && find_reg_note (i2
, REG_DEAD
,
4090 SUBREG_REG (SET_DEST (set0
))))
4091 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
4092 /* If I3 is a jump, ensure that set1 is a jump so that
4093 we do not create invalid RTL. */
4094 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
4106 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
4108 if (i2_code_number
>= 0)
4110 /* recog_for_combine might have added CLOBBERs to newi2pat.
4111 Make sure NEWPAT does not depend on the clobbered regs. */
4112 if (GET_CODE (newi2pat
) == PARALLEL
)
4114 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
4115 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
4117 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
4118 if (reg_overlap_mentioned_p (reg
, newpat
))
4126 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
4128 if (insn_code_number
>= 0)
4133 /* If it still isn't recognized, fail and change things back the way they
4135 if ((insn_code_number
< 0
4136 /* Is the result a reasonable ASM_OPERANDS? */
4137 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4143 /* If we had to change another insn, make sure it is valid also. */
4144 if (undobuf
.other_insn
)
4146 CLEAR_HARD_REG_SET (newpat_used_regs
);
4148 other_pat
= PATTERN (undobuf
.other_insn
);
4149 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4152 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4159 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4160 they are adjacent to each other or not. */
4163 rtx_insn
*p
= prev_nonnote_insn (i3
);
4164 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4165 && sets_cc0_p (newi2pat
))
4172 /* Only allow this combination if insn_cost reports that the
4173 replacement instructions are cheaper than the originals. */
4174 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4180 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4184 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4185 if (undo
->kind
== UNDO_MODE
)
4187 rtx reg
= *undo
->where
.r
;
4188 machine_mode new_mode
= GET_MODE (reg
);
4189 machine_mode old_mode
= undo
->old_contents
.m
;
4191 /* Temporarily revert mode back. */
4192 adjust_reg_mode (reg
, old_mode
);
4194 if (reg
== i2dest
&& i2scratch
)
4196 /* If we used i2dest as a scratch register with a
4197 different mode, substitute it for the original
4198 i2src while its original mode is temporarily
4199 restored, and then clear i2scratch so that we don't
4200 do it again later. */
4201 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4204 /* Put back the new mode. */
4205 adjust_reg_mode (reg
, new_mode
);
4209 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4210 rtx_insn
*first
, *last
;
4215 last
= last_combined_insn
;
4220 last
= undobuf
.other_insn
;
4222 if (DF_INSN_LUID (last
)
4223 < DF_INSN_LUID (last_combined_insn
))
4224 last
= last_combined_insn
;
4227 /* We're dealing with a reg that changed mode but not
4228 meaning, so we want to turn it into a subreg for
4229 the new mode. However, because of REG sharing and
4230 because its mode had already changed, we have to do
4231 it in two steps. First, replace any debug uses of
4232 reg, with its original mode temporarily restored,
4233 with this copy we have created; then, replace the
4234 copy with the SUBREG of the original shared reg,
4235 once again changed to the new mode. */
4236 propagate_for_debug (first
, last
, reg
, tempreg
,
4238 adjust_reg_mode (reg
, new_mode
);
4239 propagate_for_debug (first
, last
, tempreg
,
4240 lowpart_subreg (old_mode
, reg
, new_mode
),
4246 /* If we will be able to accept this, we have made a
4247 change to the destination of I3. This requires us to
4248 do a few adjustments. */
4250 if (changed_i3_dest
)
4252 PATTERN (i3
) = newpat
;
4253 adjust_for_new_dest (i3
);
4256 /* We now know that we can do this combination. Merge the insns and
4257 update the status of registers and LOG_LINKS. */
4259 if (undobuf
.other_insn
)
4263 PATTERN (undobuf
.other_insn
) = other_pat
;
4265 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4266 ensure that they are still valid. Then add any non-duplicate
4267 notes added by recog_for_combine. */
4268 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4270 next
= XEXP (note
, 1);
4272 if ((REG_NOTE_KIND (note
) == REG_DEAD
4273 && !reg_referenced_p (XEXP (note
, 0),
4274 PATTERN (undobuf
.other_insn
)))
4275 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4276 && !reg_set_p (XEXP (note
, 0),
4277 PATTERN (undobuf
.other_insn
)))
4278 /* Simply drop equal note since it may be no longer valid
4279 for other_insn. It may be possible to record that CC
4280 register is changed and only discard those notes, but
4281 in practice it's unnecessary complication and doesn't
4282 give any meaningful improvement.
4285 || REG_NOTE_KIND (note
) == REG_EQUAL
4286 || REG_NOTE_KIND (note
) == REG_EQUIV
)
4287 remove_note (undobuf
.other_insn
, note
);
4290 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4291 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4297 /* I3 now uses what used to be its destination and which is now
4298 I2's destination. This requires us to do a few adjustments. */
4299 PATTERN (i3
) = newpat
;
4300 adjust_for_new_dest (i3
);
4303 if (swap_i2i3
|| split_i2i3
)
4305 /* We might need a LOG_LINK from I3 to I2. But then we used to
4306 have one, so we still will.
4308 However, some later insn might be using I2's dest and have
4309 a LOG_LINK pointing at I3. We should change it to point at
4312 /* newi2pat is usually a SET here; however, recog_for_combine might
4313 have added some clobbers. */
4315 if (GET_CODE (x
) == PARALLEL
)
4316 x
= XVECEXP (newi2pat
, 0, 0);
4318 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4319 unsigned int regno
= reg_or_subregno (SET_DEST (x
));
4322 for (rtx_insn
*insn
= NEXT_INSN (i3
);
4325 && NONDEBUG_INSN_P (insn
)
4326 && BLOCK_FOR_INSN (insn
) == this_basic_block
;
4327 insn
= NEXT_INSN (insn
))
4329 struct insn_link
*link
;
4330 FOR_EACH_LOG_LINK (link
, insn
)
4331 if (link
->insn
== i3
&& link
->regno
== regno
)
4341 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4342 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4345 /* Compute which registers we expect to eliminate. newi2pat may be setting
4346 either i3dest or i2dest, so we must check it. */
4347 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4348 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4351 /* For i1, we need to compute both local elimination and global
4352 elimination information with respect to newi2pat because i1dest
4353 may be the same as i3dest, in which case newi2pat may be setting
4354 i1dest. Global information is used when distributing REG_DEAD
4355 note for i2 and i3, in which case it does matter if newi2pat sets
4358 Local information is used when distributing REG_DEAD note for i1,
4359 in which case it doesn't matter if newi2pat sets i1dest or not.
4360 See PR62151, if we have four insns combination:
4362 i1: r1 <- i1src (using r0)
4364 i2: r0 <- i2src (using r1)
4365 i3: r3 <- i3src (using r0)
4367 From i1's point of view, r0 is eliminated, no matter if it is set
4368 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4369 should be discarded.
4371 Note local information only affects cases in forms like "I1->I2->I3",
4372 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4373 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4375 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4378 rtx elim_i1
= (local_elim_i1
== 0
4379 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4381 /* Same case as i1. */
4382 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4384 rtx elim_i0
= (local_elim_i0
== 0
4385 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4388 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4390 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4391 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4393 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4395 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4397 /* Ensure that we do not have something that should not be shared but
4398 occurs multiple times in the new insns. Check this by first
4399 resetting all the `used' flags and then copying anything is shared. */
4401 reset_used_flags (i3notes
);
4402 reset_used_flags (i2notes
);
4403 reset_used_flags (i1notes
);
4404 reset_used_flags (i0notes
);
4405 reset_used_flags (newpat
);
4406 reset_used_flags (newi2pat
);
4407 if (undobuf
.other_insn
)
4408 reset_used_flags (PATTERN (undobuf
.other_insn
));
4410 i3notes
= copy_rtx_if_shared (i3notes
);
4411 i2notes
= copy_rtx_if_shared (i2notes
);
4412 i1notes
= copy_rtx_if_shared (i1notes
);
4413 i0notes
= copy_rtx_if_shared (i0notes
);
4414 newpat
= copy_rtx_if_shared (newpat
);
4415 newi2pat
= copy_rtx_if_shared (newi2pat
);
4416 if (undobuf
.other_insn
)
4417 reset_used_flags (PATTERN (undobuf
.other_insn
));
4419 INSN_CODE (i3
) = insn_code_number
;
4420 PATTERN (i3
) = newpat
;
4422 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4424 for (rtx link
= CALL_INSN_FUNCTION_USAGE (i3
); link
;
4425 link
= XEXP (link
, 1))
4429 /* I2SRC must still be meaningful at this point. Some
4430 splitting operations can invalidate I2SRC, but those
4431 operations do not apply to calls. */
4433 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4437 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4440 XEXP (link
, 0) = simplify_replace_rtx (XEXP (link
, 0),
4445 if (undobuf
.other_insn
)
4446 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4448 /* We had one special case above where I2 had more than one set and
4449 we replaced a destination of one of those sets with the destination
4450 of I3. In that case, we have to update LOG_LINKS of insns later
4451 in this basic block. Note that this (expensive) case is rare.
4453 Also, in this case, we must pretend that all REG_NOTEs for I2
4454 actually came from I3, so that REG_UNUSED notes from I2 will be
4455 properly handled. */
4457 if (i3_subst_into_i2
)
4459 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4460 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4461 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4462 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4463 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4464 && ! find_reg_note (i2
, REG_UNUSED
,
4465 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4466 for (temp_insn
= NEXT_INSN (i2
);
4468 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4469 || BB_HEAD (this_basic_block
) != temp_insn
);
4470 temp_insn
= NEXT_INSN (temp_insn
))
4471 if (temp_insn
!= i3
&& NONDEBUG_INSN_P (temp_insn
))
4472 FOR_EACH_LOG_LINK (link
, temp_insn
)
4473 if (link
->insn
== i2
)
4479 while (XEXP (link
, 1))
4480 link
= XEXP (link
, 1);
4481 XEXP (link
, 1) = i2notes
;
4488 LOG_LINKS (i3
) = NULL
;
4490 LOG_LINKS (i2
) = NULL
;
4495 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2scratch
)
4496 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4498 INSN_CODE (i2
) = i2_code_number
;
4499 PATTERN (i2
) = newi2pat
;
4503 if (MAY_HAVE_DEBUG_BIND_INSNS
&& i2src
)
4504 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4506 SET_INSN_DELETED (i2
);
4511 LOG_LINKS (i1
) = NULL
;
4513 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4514 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4516 SET_INSN_DELETED (i1
);
4521 LOG_LINKS (i0
) = NULL
;
4523 if (MAY_HAVE_DEBUG_BIND_INSNS
)
4524 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4526 SET_INSN_DELETED (i0
);
4529 /* Get death notes for everything that is now used in either I3 or
4530 I2 and used to die in a previous insn. If we built two new
4531 patterns, move from I1 to I2 then I2 to I3 so that we get the
4532 proper movement on registers that I2 modifies. */
4535 from_luid
= DF_INSN_LUID (i0
);
4537 from_luid
= DF_INSN_LUID (i1
);
4539 from_luid
= DF_INSN_LUID (i2
);
4541 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4542 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4544 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4546 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4547 elim_i2
, elim_i1
, elim_i0
);
4549 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4550 elim_i2
, elim_i1
, elim_i0
);
4552 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4553 elim_i2
, local_elim_i1
, local_elim_i0
);
4555 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4556 elim_i2
, elim_i1
, local_elim_i0
);
4558 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4559 elim_i2
, elim_i1
, elim_i0
);
4561 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4562 know these are REG_UNUSED and want them to go to the desired insn,
4563 so we always pass it as i3. */
4565 if (newi2pat
&& new_i2_notes
)
4566 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4570 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4573 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4574 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4575 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4576 in that case, it might delete I2. Similarly for I2 and I1.
4577 Show an additional death due to the REG_DEAD note we make here. If
4578 we discard it in distribute_notes, we will decrement it again. */
4582 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4583 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4584 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4587 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4588 elim_i2
, elim_i1
, elim_i0
);
4591 if (i2dest_in_i2src
)
4593 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4594 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4595 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4596 NULL_RTX
, NULL_RTX
);
4598 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4599 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4602 if (i1dest_in_i1src
)
4604 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4605 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4606 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4607 NULL_RTX
, NULL_RTX
);
4609 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4610 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4613 if (i0dest_in_i0src
)
4615 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4616 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4617 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4618 NULL_RTX
, NULL_RTX
);
4620 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4621 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4624 distribute_links (i3links
);
4625 distribute_links (i2links
);
4626 distribute_links (i1links
);
4627 distribute_links (i0links
);
4631 struct insn_link
*link
;
4632 rtx_insn
*i2_insn
= 0;
4633 rtx i2_val
= 0, set
;
4635 /* The insn that used to set this register doesn't exist, and
4636 this life of the register may not exist either. See if one of
4637 I3's links points to an insn that sets I2DEST. If it does,
4638 that is now the last known value for I2DEST. If we don't update
4639 this and I2 set the register to a value that depended on its old
4640 contents, we will get confused. If this insn is used, thing
4641 will be set correctly in combine_instructions. */
4642 FOR_EACH_LOG_LINK (link
, i3
)
4643 if ((set
= single_set (link
->insn
)) != 0
4644 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4645 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4647 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4649 /* If the reg formerly set in I2 died only once and that was in I3,
4650 zero its use count so it won't make `reload' do any work. */
4652 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4653 && ! i2dest_in_i2src
4654 && REGNO (i2dest
) < reg_n_sets_max
)
4655 INC_REG_N_SETS (REGNO (i2dest
), -1);
4658 if (i1
&& REG_P (i1dest
))
4660 struct insn_link
*link
;
4661 rtx_insn
*i1_insn
= 0;
4662 rtx i1_val
= 0, set
;
4664 FOR_EACH_LOG_LINK (link
, i3
)
4665 if ((set
= single_set (link
->insn
)) != 0
4666 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4667 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4669 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4672 && ! i1dest_in_i1src
4673 && REGNO (i1dest
) < reg_n_sets_max
)
4674 INC_REG_N_SETS (REGNO (i1dest
), -1);
4677 if (i0
&& REG_P (i0dest
))
4679 struct insn_link
*link
;
4680 rtx_insn
*i0_insn
= 0;
4681 rtx i0_val
= 0, set
;
4683 FOR_EACH_LOG_LINK (link
, i3
)
4684 if ((set
= single_set (link
->insn
)) != 0
4685 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4686 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4688 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4691 && ! i0dest_in_i0src
4692 && REGNO (i0dest
) < reg_n_sets_max
)
4693 INC_REG_N_SETS (REGNO (i0dest
), -1);
4696 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4697 been made to this insn. The order is important, because newi2pat
4698 can affect nonzero_bits of newpat. */
4700 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4701 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4704 if (undobuf
.other_insn
!= NULL_RTX
)
4708 fprintf (dump_file
, "modifying other_insn ");
4709 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4711 df_insn_rescan (undobuf
.other_insn
);
4714 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4718 fprintf (dump_file
, "modifying insn i0 ");
4719 dump_insn_slim (dump_file
, i0
);
4721 df_insn_rescan (i0
);
4724 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4728 fprintf (dump_file
, "modifying insn i1 ");
4729 dump_insn_slim (dump_file
, i1
);
4731 df_insn_rescan (i1
);
4734 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4738 fprintf (dump_file
, "modifying insn i2 ");
4739 dump_insn_slim (dump_file
, i2
);
4741 df_insn_rescan (i2
);
4744 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4748 fprintf (dump_file
, "modifying insn i3 ");
4749 dump_insn_slim (dump_file
, i3
);
4751 df_insn_rescan (i3
);
4754 /* Set new_direct_jump_p if a new return or simple jump instruction
4755 has been created. Adjust the CFG accordingly. */
4756 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4758 *new_direct_jump_p
= 1;
4759 mark_jump_label (PATTERN (i3
), i3
, 0);
4760 update_cfg_for_uncondjump (i3
);
4763 if (undobuf
.other_insn
!= NULL_RTX
4764 && (returnjump_p (undobuf
.other_insn
)
4765 || any_uncondjump_p (undobuf
.other_insn
)))
4767 *new_direct_jump_p
= 1;
4768 update_cfg_for_uncondjump (undobuf
.other_insn
);
4771 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4772 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4774 basic_block bb
= BLOCK_FOR_INSN (i3
);
4776 remove_edge (split_block (bb
, i3
));
4777 emit_barrier_after_bb (bb
);
4778 *new_direct_jump_p
= 1;
4781 if (undobuf
.other_insn
4782 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4783 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4785 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4787 remove_edge (split_block (bb
, undobuf
.other_insn
));
4788 emit_barrier_after_bb (bb
);
4789 *new_direct_jump_p
= 1;
4792 /* A noop might also need cleaning up of CFG, if it comes from the
4793 simplification of a jump. */
4795 && GET_CODE (newpat
) == SET
4796 && SET_SRC (newpat
) == pc_rtx
4797 && SET_DEST (newpat
) == pc_rtx
)
4799 *new_direct_jump_p
= 1;
4800 update_cfg_for_uncondjump (i3
);
4803 if (undobuf
.other_insn
!= NULL_RTX
4804 && JUMP_P (undobuf
.other_insn
)
4805 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4806 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4807 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4809 *new_direct_jump_p
= 1;
4810 update_cfg_for_uncondjump (undobuf
.other_insn
);
4813 combine_successes
++;
4816 rtx_insn
*ret
= newi2pat
? i2
: i3
;
4817 if (added_links_insn
&& DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (ret
))
4818 ret
= added_links_insn
;
4819 if (added_notes_insn
&& DF_INSN_LUID (added_notes_insn
) < DF_INSN_LUID (ret
))
4820 ret
= added_notes_insn
;
4825 /* Get a marker for undoing to the current state. */
4828 get_undo_marker (void)
4830 return undobuf
.undos
;
4833 /* Undo the modifications up to the marker. */
4836 undo_to_marker (void *marker
)
4838 struct undo
*undo
, *next
;
4840 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4848 *undo
->where
.r
= undo
->old_contents
.r
;
4851 *undo
->where
.i
= undo
->old_contents
.i
;
4854 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4857 *undo
->where
.l
= undo
->old_contents
.l
;
4863 undo
->next
= undobuf
.frees
;
4864 undobuf
.frees
= undo
;
4867 undobuf
.undos
= (struct undo
*) marker
;
4870 /* Undo all the modifications recorded in undobuf. */
4878 /* We've committed to accepting the changes we made. Move all
4879 of the undos to the free list. */
4884 struct undo
*undo
, *next
;
4886 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4889 undo
->next
= undobuf
.frees
;
4890 undobuf
.frees
= undo
;
4895 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4896 where we have an arithmetic expression and return that point. LOC will
4899 try_combine will call this function to see if an insn can be split into
4903 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4906 enum rtx_code code
= GET_CODE (x
);
4908 unsigned HOST_WIDE_INT len
= 0;
4909 HOST_WIDE_INT pos
= 0;
4911 rtx inner
= NULL_RTX
;
4912 scalar_int_mode mode
, inner_mode
;
4914 /* First special-case some codes. */
4918 #ifdef INSN_SCHEDULING
4919 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4921 if (MEM_P (SUBREG_REG (x
)))
4924 return find_split_point (&SUBREG_REG (x
), insn
, false);
4927 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4928 using LO_SUM and HIGH. */
4929 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4930 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4932 machine_mode address_mode
= get_address_mode (x
);
4935 gen_rtx_LO_SUM (address_mode
,
4936 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4938 return &XEXP (XEXP (x
, 0), 0);
4941 /* If we have a PLUS whose second operand is a constant and the
4942 address is not valid, perhaps will can split it up using
4943 the machine-specific way to split large constants. We use
4944 the first pseudo-reg (one of the virtual regs) as a placeholder;
4945 it will not remain in the result. */
4946 if (GET_CODE (XEXP (x
, 0)) == PLUS
4947 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4948 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4949 MEM_ADDR_SPACE (x
)))
4951 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4952 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4955 /* This should have produced two insns, each of which sets our
4956 placeholder. If the source of the second is a valid address,
4957 we can make put both sources together and make a split point
4961 && NEXT_INSN (seq
) != NULL_RTX
4962 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4963 && NONJUMP_INSN_P (seq
)
4964 && GET_CODE (PATTERN (seq
)) == SET
4965 && SET_DEST (PATTERN (seq
)) == reg
4966 && ! reg_mentioned_p (reg
,
4967 SET_SRC (PATTERN (seq
)))
4968 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4969 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4970 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4971 && memory_address_addr_space_p
4972 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4973 MEM_ADDR_SPACE (x
)))
4975 rtx src1
= SET_SRC (PATTERN (seq
));
4976 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4978 /* Replace the placeholder in SRC2 with SRC1. If we can
4979 find where in SRC2 it was placed, that can become our
4980 split point and we can replace this address with SRC2.
4981 Just try two obvious places. */
4983 src2
= replace_rtx (src2
, reg
, src1
);
4985 if (XEXP (src2
, 0) == src1
)
4986 split
= &XEXP (src2
, 0);
4987 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4988 && XEXP (XEXP (src2
, 0), 0) == src1
)
4989 split
= &XEXP (XEXP (src2
, 0), 0);
4993 SUBST (XEXP (x
, 0), src2
);
4998 /* If that didn't work, perhaps the first operand is complex and
4999 needs to be computed separately, so make a split point there.
5000 This will occur on machines that just support REG + CONST
5001 and have a constant moved through some previous computation. */
5003 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
5004 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5005 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5006 return &XEXP (XEXP (x
, 0), 0);
5009 /* If we have a PLUS whose first operand is complex, try computing it
5010 separately by making a split there. */
5011 if (GET_CODE (XEXP (x
, 0)) == PLUS
5012 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
5014 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
5015 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
5016 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
5017 return &XEXP (XEXP (x
, 0), 0);
5021 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5022 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5023 we need to put the operand into a register. So split at that
5026 if (SET_DEST (x
) == cc0_rtx
5027 && GET_CODE (SET_SRC (x
)) != COMPARE
5028 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
5029 && !OBJECT_P (SET_SRC (x
))
5030 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
5031 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
5032 return &SET_SRC (x
);
5034 /* See if we can split SET_SRC as it stands. */
5035 split
= find_split_point (&SET_SRC (x
), insn
, true);
5036 if (split
&& split
!= &SET_SRC (x
))
5039 /* See if we can split SET_DEST as it stands. */
5040 split
= find_split_point (&SET_DEST (x
), insn
, false);
5041 if (split
&& split
!= &SET_DEST (x
))
5044 /* See if this is a bitfield assignment with everything constant. If
5045 so, this is an IOR of an AND, so split it into that. */
5046 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
5047 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_DEST (x
), 0)),
5049 && HWI_COMPUTABLE_MODE_P (inner_mode
)
5050 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
5051 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
5052 && CONST_INT_P (SET_SRC (x
))
5053 && ((INTVAL (XEXP (SET_DEST (x
), 1))
5054 + INTVAL (XEXP (SET_DEST (x
), 2)))
5055 <= GET_MODE_PRECISION (inner_mode
))
5056 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
5058 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
5059 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
5060 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
5061 rtx dest
= XEXP (SET_DEST (x
), 0);
5062 unsigned HOST_WIDE_INT mask
5063 = (HOST_WIDE_INT_1U
<< len
) - 1;
5066 if (BITS_BIG_ENDIAN
)
5067 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5069 or_mask
= gen_int_mode (src
<< pos
, inner_mode
);
5072 simplify_gen_binary (IOR
, inner_mode
, dest
, or_mask
));
5075 rtx negmask
= gen_int_mode (~(mask
<< pos
), inner_mode
);
5077 simplify_gen_binary (IOR
, inner_mode
,
5078 simplify_gen_binary (AND
, inner_mode
,
5083 SUBST (SET_DEST (x
), dest
);
5085 split
= find_split_point (&SET_SRC (x
), insn
, true);
5086 if (split
&& split
!= &SET_SRC (x
))
5090 /* Otherwise, see if this is an operation that we can split into two.
5091 If so, try to split that. */
5092 code
= GET_CODE (SET_SRC (x
));
5097 /* If we are AND'ing with a large constant that is only a single
5098 bit and the result is only being used in a context where we
5099 need to know if it is zero or nonzero, replace it with a bit
5100 extraction. This will avoid the large constant, which might
5101 have taken more than one insn to make. If the constant were
5102 not a valid argument to the AND but took only one insn to make,
5103 this is no worse, but if it took more than one insn, it will
5106 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5107 && REG_P (XEXP (SET_SRC (x
), 0))
5108 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
5109 && REG_P (SET_DEST (x
))
5110 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
5111 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
5112 && XEXP (*split
, 0) == SET_DEST (x
)
5113 && XEXP (*split
, 1) == const0_rtx
)
5115 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
5116 XEXP (SET_SRC (x
), 0),
5117 pos
, NULL_RTX
, 1, 1, 0, 0);
5118 if (extraction
!= 0)
5120 SUBST (SET_SRC (x
), extraction
);
5121 return find_split_point (loc
, insn
, false);
5127 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5128 is known to be on, this can be converted into a NEG of a shift. */
5129 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
5130 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
5131 && ((pos
= exact_log2 (nonzero_bits (XEXP (SET_SRC (x
), 0),
5132 GET_MODE (XEXP (SET_SRC (x
),
5135 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
5136 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5139 gen_rtx_LSHIFTRT (mode
,
5140 XEXP (SET_SRC (x
), 0),
5143 split
= find_split_point (&SET_SRC (x
), insn
, true);
5144 if (split
&& split
!= &SET_SRC (x
))
5150 inner
= XEXP (SET_SRC (x
), 0);
5152 /* We can't optimize if either mode is a partial integer
5153 mode as we don't know how many bits are significant
5155 if (!is_int_mode (GET_MODE (inner
), &inner_mode
)
5156 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5160 len
= GET_MODE_PRECISION (inner_mode
);
5166 if (is_a
<scalar_int_mode
> (GET_MODE (XEXP (SET_SRC (x
), 0)),
5168 && CONST_INT_P (XEXP (SET_SRC (x
), 1))
5169 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5171 inner
= XEXP (SET_SRC (x
), 0);
5172 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5173 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5175 if (BITS_BIG_ENDIAN
)
5176 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
5177 unsignedp
= (code
== ZERO_EXTRACT
);
5186 && known_subrange_p (pos
, len
,
5187 0, GET_MODE_PRECISION (GET_MODE (inner
)))
5188 && is_a
<scalar_int_mode
> (GET_MODE (SET_SRC (x
)), &mode
))
5190 /* For unsigned, we have a choice of a shift followed by an
5191 AND or two shifts. Use two shifts for field sizes where the
5192 constant might be too large. We assume here that we can
5193 always at least get 8-bit constants in an AND insn, which is
5194 true for every current RISC. */
5196 if (unsignedp
&& len
<= 8)
5198 unsigned HOST_WIDE_INT mask
5199 = (HOST_WIDE_INT_1U
<< len
) - 1;
5200 rtx pos_rtx
= gen_int_shift_amount (mode
, pos
);
5204 (mode
, gen_lowpart (mode
, inner
), pos_rtx
),
5205 gen_int_mode (mask
, mode
)));
5207 split
= find_split_point (&SET_SRC (x
), insn
, true);
5208 if (split
&& split
!= &SET_SRC (x
))
5213 int left_bits
= GET_MODE_PRECISION (mode
) - len
- pos
;
5214 int right_bits
= GET_MODE_PRECISION (mode
) - len
;
5217 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5218 gen_rtx_ASHIFT (mode
,
5219 gen_lowpart (mode
, inner
),
5220 gen_int_shift_amount (mode
, left_bits
)),
5221 gen_int_shift_amount (mode
, right_bits
)));
5223 split
= find_split_point (&SET_SRC (x
), insn
, true);
5224 if (split
&& split
!= &SET_SRC (x
))
5229 /* See if this is a simple operation with a constant as the second
5230 operand. It might be that this constant is out of range and hence
5231 could be used as a split point. */
5232 if (BINARY_P (SET_SRC (x
))
5233 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5234 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5235 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5236 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5237 return &XEXP (SET_SRC (x
), 1);
5239 /* Finally, see if this is a simple operation with its first operand
5240 not in a register. The operation might require this operand in a
5241 register, so return it as a split point. We can always do this
5242 because if the first operand were another operation, we would have
5243 already found it as a split point. */
5244 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5245 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5246 return &XEXP (SET_SRC (x
), 0);
5252 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5253 it is better to write this as (not (ior A B)) so we can split it.
5254 Similarly for IOR. */
5255 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5258 gen_rtx_NOT (GET_MODE (x
),
5259 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5261 XEXP (XEXP (x
, 0), 0),
5262 XEXP (XEXP (x
, 1), 0))));
5263 return find_split_point (loc
, insn
, set_src
);
5266 /* Many RISC machines have a large set of logical insns. If the
5267 second operand is a NOT, put it first so we will try to split the
5268 other operand first. */
5269 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5271 rtx tem
= XEXP (x
, 0);
5272 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5273 SUBST (XEXP (x
, 1), tem
);
5279 /* Canonicalization can produce (minus A (mult B C)), where C is a
5280 constant. It may be better to try splitting (plus (mult B -C) A)
5281 instead if this isn't a multiply by a power of two. */
5282 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5283 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5284 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5286 machine_mode mode
= GET_MODE (x
);
5287 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5288 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5289 SUBST (*loc
, gen_rtx_PLUS (mode
,
5291 XEXP (XEXP (x
, 1), 0),
5292 gen_int_mode (other_int
,
5295 return find_split_point (loc
, insn
, set_src
);
5298 /* Split at a multiply-accumulate instruction. However if this is
5299 the SET_SRC, we likely do not have such an instruction and it's
5300 worthless to try this split. */
5302 && (GET_CODE (XEXP (x
, 0)) == MULT
5303 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5304 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5311 /* Otherwise, select our actions depending on our rtx class. */
5312 switch (GET_RTX_CLASS (code
))
5314 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5316 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5321 case RTX_COMM_ARITH
:
5323 case RTX_COMM_COMPARE
:
5324 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5329 /* Some machines have (and (shift ...) ...) insns. If X is not
5330 an AND, but XEXP (X, 0) is, use it as our split point. */
5331 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5332 return &XEXP (x
, 0);
5334 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5340 /* Otherwise, we don't have a split point. */
5345 /* Throughout X, replace FROM with TO, and return the result.
5346 The result is TO if X is FROM;
5347 otherwise the result is X, but its contents may have been modified.
5348 If they were modified, a record was made in undobuf so that
5349 undo_all will (among other things) return X to its original state.
5351 If the number of changes necessary is too much to record to undo,
5352 the excess changes are not made, so the result is invalid.
5353 The changes already made can still be undone.
5354 undobuf.num_undo is incremented for such changes, so by testing that
5355 the caller can tell whether the result is valid.
5357 `n_occurrences' is incremented each time FROM is replaced.
5359 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5361 IN_COND is nonzero if we are at the top level of a condition.
5363 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5364 by copying if `n_occurrences' is nonzero. */
5367 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5369 enum rtx_code code
= GET_CODE (x
);
5370 machine_mode op0_mode
= VOIDmode
;
5375 /* Two expressions are equal if they are identical copies of a shared
5376 RTX or if they are both registers with the same register number
5379 #define COMBINE_RTX_EQUAL_P(X,Y) \
5381 || (REG_P (X) && REG_P (Y) \
5382 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5384 /* Do not substitute into clobbers of regs -- this will never result in
5386 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5389 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5392 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5395 /* If X and FROM are the same register but different modes, they
5396 will not have been seen as equal above. However, the log links code
5397 will make a LOG_LINKS entry for that case. If we do nothing, we
5398 will try to rerecognize our original insn and, when it succeeds,
5399 we will delete the feeding insn, which is incorrect.
5401 So force this insn not to match in this (rare) case. */
5402 if (! in_dest
&& code
== REG
&& REG_P (from
)
5403 && reg_overlap_mentioned_p (x
, from
))
5404 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5406 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5407 of which may contain things that can be combined. */
5408 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5411 /* It is possible to have a subexpression appear twice in the insn.
5412 Suppose that FROM is a register that appears within TO.
5413 Then, after that subexpression has been scanned once by `subst',
5414 the second time it is scanned, TO may be found. If we were
5415 to scan TO here, we would find FROM within it and create a
5416 self-referent rtl structure which is completely wrong. */
5417 if (COMBINE_RTX_EQUAL_P (x
, to
))
5420 /* Parallel asm_operands need special attention because all of the
5421 inputs are shared across the arms. Furthermore, unsharing the
5422 rtl results in recognition failures. Failure to handle this case
5423 specially can result in circular rtl.
5425 Solve this by doing a normal pass across the first entry of the
5426 parallel, and only processing the SET_DESTs of the subsequent
5429 if (code
== PARALLEL
5430 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5431 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5433 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5435 /* If this substitution failed, this whole thing fails. */
5436 if (GET_CODE (new_rtx
) == CLOBBER
5437 && XEXP (new_rtx
, 0) == const0_rtx
)
5440 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5442 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5444 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5447 && GET_CODE (dest
) != CC0
5448 && GET_CODE (dest
) != PC
)
5450 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5452 /* If this substitution failed, this whole thing fails. */
5453 if (GET_CODE (new_rtx
) == CLOBBER
5454 && XEXP (new_rtx
, 0) == const0_rtx
)
5457 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5463 len
= GET_RTX_LENGTH (code
);
5464 fmt
= GET_RTX_FORMAT (code
);
5466 /* We don't need to process a SET_DEST that is a register, CC0,
5467 or PC, so set up to skip this common case. All other cases
5468 where we want to suppress replacing something inside a
5469 SET_SRC are handled via the IN_DEST operand. */
5471 && (REG_P (SET_DEST (x
))
5472 || GET_CODE (SET_DEST (x
)) == CC0
5473 || GET_CODE (SET_DEST (x
)) == PC
))
5476 /* Trying to simplify the operands of a widening MULT is not likely
5477 to create RTL matching a machine insn. */
5479 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5480 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5481 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5482 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5483 && REG_P (XEXP (XEXP (x
, 0), 0))
5484 && REG_P (XEXP (XEXP (x
, 1), 0))
5489 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5492 op0_mode
= GET_MODE (XEXP (x
, 0));
5494 for (i
= 0; i
< len
; i
++)
5499 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5501 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5503 new_rtx
= (unique_copy
&& n_occurrences
5504 ? copy_rtx (to
) : to
);
5509 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5512 /* If this substitution failed, this whole thing
5514 if (GET_CODE (new_rtx
) == CLOBBER
5515 && XEXP (new_rtx
, 0) == const0_rtx
)
5519 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5522 else if (fmt
[i
] == 'e')
5524 /* If this is a register being set, ignore it. */
5525 new_rtx
= XEXP (x
, i
);
5528 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5530 || code
== STRICT_LOW_PART
))
5533 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5535 /* In general, don't install a subreg involving two
5536 modes not tieable. It can worsen register
5537 allocation, and can even make invalid reload
5538 insns, since the reg inside may need to be copied
5539 from in the outside mode, and that may be invalid
5540 if it is an fp reg copied in integer mode.
5542 We allow two exceptions to this: It is valid if
5543 it is inside another SUBREG and the mode of that
5544 SUBREG and the mode of the inside of TO is
5545 tieable and it is valid if X is a SET that copies
5548 if (GET_CODE (to
) == SUBREG
5549 && !targetm
.modes_tieable_p (GET_MODE (to
),
5550 GET_MODE (SUBREG_REG (to
)))
5551 && ! (code
== SUBREG
5552 && (targetm
.modes_tieable_p
5553 (GET_MODE (x
), GET_MODE (SUBREG_REG (to
)))))
5557 && XEXP (x
, 0) == cc0_rtx
))))
5558 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5562 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5563 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5566 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5568 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5572 /* If we are in a SET_DEST, suppress most cases unless we
5573 have gone inside a MEM, in which case we want to
5574 simplify the address. We assume here that things that
5575 are actually part of the destination have their inner
5576 parts in the first expression. This is true for SUBREG,
5577 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5578 things aside from REG and MEM that should appear in a
5580 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5582 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5583 || code
== ZERO_EXTRACT
))
5586 code
== IF_THEN_ELSE
&& i
== 0,
5589 /* If we found that we will have to reject this combination,
5590 indicate that by returning the CLOBBER ourselves, rather than
5591 an expression containing it. This will speed things up as
5592 well as prevent accidents where two CLOBBERs are considered
5593 to be equal, thus producing an incorrect simplification. */
5595 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5598 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5600 machine_mode mode
= GET_MODE (x
);
5602 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5603 GET_MODE (SUBREG_REG (x
)),
5606 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5608 else if (CONST_SCALAR_INT_P (new_rtx
)
5609 && (GET_CODE (x
) == ZERO_EXTEND
5610 || GET_CODE (x
) == FLOAT
5611 || GET_CODE (x
) == UNSIGNED_FLOAT
))
5613 x
= simplify_unary_operation (GET_CODE (x
), GET_MODE (x
),
5615 GET_MODE (XEXP (x
, 0)));
5617 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5620 SUBST (XEXP (x
, i
), new_rtx
);
5625 /* Check if we are loading something from the constant pool via float
5626 extension; in this case we would undo compress_float_constant
5627 optimization and degenerate constant load to an immediate value. */
5628 if (GET_CODE (x
) == FLOAT_EXTEND
5629 && MEM_P (XEXP (x
, 0))
5630 && MEM_READONLY_P (XEXP (x
, 0)))
5632 rtx tmp
= avoid_constant_pool_reference (x
);
5637 /* Try to simplify X. If the simplification changed the code, it is likely
5638 that further simplification will help, so loop, but limit the number
5639 of repetitions that will be performed. */
5641 for (i
= 0; i
< 4; i
++)
5643 /* If X is sufficiently simple, don't bother trying to do anything
5645 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5646 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5648 if (GET_CODE (x
) == code
)
5651 code
= GET_CODE (x
);
5653 /* We no longer know the original mode of operand 0 since we
5654 have changed the form of X) */
5655 op0_mode
= VOIDmode
;
5661 /* If X is a commutative operation whose operands are not in the canonical
5662 order, use substitutions to swap them. */
5665 maybe_swap_commutative_operands (rtx x
)
5667 if (COMMUTATIVE_ARITH_P (x
)
5668 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5670 rtx temp
= XEXP (x
, 0);
5671 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5672 SUBST (XEXP (x
, 1), temp
);
5676 /* Simplify X, a piece of RTL. We just operate on the expression at the
5677 outer level; call `subst' to simplify recursively. Return the new
5680 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5681 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5685 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5688 enum rtx_code code
= GET_CODE (x
);
5689 machine_mode mode
= GET_MODE (x
);
5690 scalar_int_mode int_mode
;
5694 /* If this is a commutative operation, put a constant last and a complex
5695 expression first. We don't need to do this for comparisons here. */
5696 maybe_swap_commutative_operands (x
);
5698 /* Try to fold this expression in case we have constants that weren't
5701 switch (GET_RTX_CLASS (code
))
5704 if (op0_mode
== VOIDmode
)
5705 op0_mode
= GET_MODE (XEXP (x
, 0));
5706 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5709 case RTX_COMM_COMPARE
:
5711 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5712 if (cmp_mode
== VOIDmode
)
5714 cmp_mode
= GET_MODE (XEXP (x
, 1));
5715 if (cmp_mode
== VOIDmode
)
5716 cmp_mode
= op0_mode
;
5718 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5719 XEXP (x
, 0), XEXP (x
, 1));
5722 case RTX_COMM_ARITH
:
5724 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5726 case RTX_BITFIELD_OPS
:
5728 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5729 XEXP (x
, 1), XEXP (x
, 2));
5738 code
= GET_CODE (temp
);
5739 op0_mode
= VOIDmode
;
5740 mode
= GET_MODE (temp
);
5743 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5744 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5745 things. Check for cases where both arms are testing the same
5748 Don't do anything if all operands are very simple. */
5751 && ((!OBJECT_P (XEXP (x
, 0))
5752 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5753 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5754 || (!OBJECT_P (XEXP (x
, 1))
5755 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5756 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5758 && (!OBJECT_P (XEXP (x
, 0))
5759 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5760 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5762 rtx cond
, true_rtx
, false_rtx
;
5764 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5766 /* If everything is a comparison, what we have is highly unlikely
5767 to be simpler, so don't use it. */
5768 && ! (COMPARISON_P (x
)
5769 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
)))
5770 /* Similarly, if we end up with one of the expressions the same
5771 as the original, it is certainly not simpler. */
5772 && ! rtx_equal_p (x
, true_rtx
)
5773 && ! rtx_equal_p (x
, false_rtx
))
5775 rtx cop1
= const0_rtx
;
5776 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5778 if (cond_code
== NE
&& COMPARISON_P (cond
))
5781 /* Simplify the alternative arms; this may collapse the true and
5782 false arms to store-flag values. Be careful to use copy_rtx
5783 here since true_rtx or false_rtx might share RTL with x as a
5784 result of the if_then_else_cond call above. */
5785 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5786 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5788 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5789 is unlikely to be simpler. */
5790 if (general_operand (true_rtx
, VOIDmode
)
5791 && general_operand (false_rtx
, VOIDmode
))
5793 enum rtx_code reversed
;
5795 /* Restarting if we generate a store-flag expression will cause
5796 us to loop. Just drop through in this case. */
5798 /* If the result values are STORE_FLAG_VALUE and zero, we can
5799 just make the comparison operation. */
5800 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5801 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5803 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5804 && ((reversed
= reversed_comparison_code_parts
5805 (cond_code
, cond
, cop1
, NULL
))
5807 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5810 /* Likewise, we can make the negate of a comparison operation
5811 if the result values are - STORE_FLAG_VALUE and zero. */
5812 else if (CONST_INT_P (true_rtx
)
5813 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5814 && false_rtx
== const0_rtx
)
5815 x
= simplify_gen_unary (NEG
, mode
,
5816 simplify_gen_relational (cond_code
,
5820 else if (CONST_INT_P (false_rtx
)
5821 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5822 && true_rtx
== const0_rtx
5823 && ((reversed
= reversed_comparison_code_parts
5824 (cond_code
, cond
, cop1
, NULL
))
5826 x
= simplify_gen_unary (NEG
, mode
,
5827 simplify_gen_relational (reversed
,
5832 return gen_rtx_IF_THEN_ELSE (mode
,
5833 simplify_gen_relational (cond_code
,
5838 true_rtx
, false_rtx
);
5840 code
= GET_CODE (x
);
5841 op0_mode
= VOIDmode
;
5846 /* First see if we can apply the inverse distributive law. */
5847 if (code
== PLUS
|| code
== MINUS
5848 || code
== AND
|| code
== IOR
|| code
== XOR
)
5850 x
= apply_distributive_law (x
);
5851 code
= GET_CODE (x
);
5852 op0_mode
= VOIDmode
;
5855 /* If CODE is an associative operation not otherwise handled, see if we
5856 can associate some operands. This can win if they are constants or
5857 if they are logically related (i.e. (a & b) & a). */
5858 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5859 || code
== AND
|| code
== IOR
|| code
== XOR
5860 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5861 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5862 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5864 if (GET_CODE (XEXP (x
, 0)) == code
)
5866 rtx other
= XEXP (XEXP (x
, 0), 0);
5867 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5868 rtx inner_op1
= XEXP (x
, 1);
5871 /* Make sure we pass the constant operand if any as the second
5872 one if this is a commutative operation. */
5873 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5874 std::swap (inner_op0
, inner_op1
);
5875 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5876 : code
== DIV
? MULT
5878 mode
, inner_op0
, inner_op1
);
5880 /* For commutative operations, try the other pair if that one
5882 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5884 other
= XEXP (XEXP (x
, 0), 1);
5885 inner
= simplify_binary_operation (code
, mode
,
5886 XEXP (XEXP (x
, 0), 0),
5891 return simplify_gen_binary (code
, mode
, other
, inner
);
5895 /* A little bit of algebraic simplification here. */
5899 /* Ensure that our address has any ASHIFTs converted to MULT in case
5900 address-recognizing predicates are called later. */
5901 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5902 SUBST (XEXP (x
, 0), temp
);
5906 if (op0_mode
== VOIDmode
)
5907 op0_mode
= GET_MODE (SUBREG_REG (x
));
5909 /* See if this can be moved to simplify_subreg. */
5910 if (CONSTANT_P (SUBREG_REG (x
))
5911 && known_eq (subreg_lowpart_offset (mode
, op0_mode
), SUBREG_BYTE (x
))
5912 /* Don't call gen_lowpart if the inner mode
5913 is VOIDmode and we cannot simplify it, as SUBREG without
5914 inner mode is invalid. */
5915 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5916 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5917 return gen_lowpart (mode
, SUBREG_REG (x
));
5919 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5923 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5928 /* If op is known to have all lower bits zero, the result is zero. */
5929 scalar_int_mode int_mode
, int_op0_mode
;
5931 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5932 && is_a
<scalar_int_mode
> (op0_mode
, &int_op0_mode
)
5933 && (GET_MODE_PRECISION (int_mode
)
5934 < GET_MODE_PRECISION (int_op0_mode
))
5935 && known_eq (subreg_lowpart_offset (int_mode
, int_op0_mode
),
5937 && HWI_COMPUTABLE_MODE_P (int_op0_mode
)
5938 && (nonzero_bits (SUBREG_REG (x
), int_op0_mode
)
5939 & GET_MODE_MASK (int_mode
)) == 0)
5940 return CONST0_RTX (int_mode
);
5943 /* Don't change the mode of the MEM if that would change the meaning
5945 if (MEM_P (SUBREG_REG (x
))
5946 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5947 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5948 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5949 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5951 /* Note that we cannot do any narrowing for non-constants since
5952 we might have been counting on using the fact that some bits were
5953 zero. We now do this in the SET. */
5958 temp
= expand_compound_operation (XEXP (x
, 0));
5960 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5961 replaced by (lshiftrt X C). This will convert
5962 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5964 if (GET_CODE (temp
) == ASHIFTRT
5965 && CONST_INT_P (XEXP (temp
, 1))
5966 && INTVAL (XEXP (temp
, 1)) == GET_MODE_UNIT_PRECISION (mode
) - 1)
5967 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5968 INTVAL (XEXP (temp
, 1)));
5970 /* If X has only a single bit that might be nonzero, say, bit I, convert
5971 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5972 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5973 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5974 or a SUBREG of one since we'd be making the expression more
5975 complex if it was just a register. */
5978 && ! (GET_CODE (temp
) == SUBREG
5979 && REG_P (SUBREG_REG (temp
)))
5980 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
5981 && (i
= exact_log2 (nonzero_bits (temp
, int_mode
))) >= 0)
5983 rtx temp1
= simplify_shift_const
5984 (NULL_RTX
, ASHIFTRT
, int_mode
,
5985 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
, temp
,
5986 GET_MODE_PRECISION (int_mode
) - 1 - i
),
5987 GET_MODE_PRECISION (int_mode
) - 1 - i
);
5989 /* If all we did was surround TEMP with the two shifts, we
5990 haven't improved anything, so don't use it. Otherwise,
5991 we are better off with TEMP1. */
5992 if (GET_CODE (temp1
) != ASHIFTRT
5993 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5994 || XEXP (XEXP (temp1
, 0), 0) != temp
)
6000 /* We can't handle truncation to a partial integer mode here
6001 because we don't know the real bitsize of the partial
6003 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
6006 if (HWI_COMPUTABLE_MODE_P (mode
))
6008 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
6009 GET_MODE_MASK (mode
), 0));
6011 /* We can truncate a constant value and return it. */
6014 if (poly_int_rtx_p (XEXP (x
, 0), &c
))
6015 return gen_int_mode (c
, mode
);
6018 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6019 whose value is a comparison can be replaced with a subreg if
6020 STORE_FLAG_VALUE permits. */
6021 if (HWI_COMPUTABLE_MODE_P (mode
)
6022 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
6023 && (temp
= get_last_value (XEXP (x
, 0)))
6024 && COMPARISON_P (temp
))
6025 return gen_lowpart (mode
, XEXP (x
, 0));
6029 /* (const (const X)) can become (const X). Do it this way rather than
6030 returning the inner CONST since CONST can be shared with a
6032 if (GET_CODE (XEXP (x
, 0)) == CONST
)
6033 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
6037 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6038 can add in an offset. find_split_point will split this address up
6039 again if it doesn't match. */
6040 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
6041 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
6046 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6047 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6048 bit-field and can be replaced by either a sign_extend or a
6049 sign_extract. The `and' may be a zero_extend and the two
6050 <c>, -<c> constants may be reversed. */
6051 if (GET_CODE (XEXP (x
, 0)) == XOR
6052 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6053 && CONST_INT_P (XEXP (x
, 1))
6054 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
6055 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
6056 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
6057 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
6058 && HWI_COMPUTABLE_MODE_P (int_mode
)
6059 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
6060 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6061 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
6062 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
6063 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
6064 && known_eq ((GET_MODE_PRECISION
6065 (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))),
6066 (unsigned int) i
+ 1))))
6067 return simplify_shift_const
6068 (NULL_RTX
, ASHIFTRT
, int_mode
,
6069 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6070 XEXP (XEXP (XEXP (x
, 0), 0), 0),
6071 GET_MODE_PRECISION (int_mode
) - (i
+ 1)),
6072 GET_MODE_PRECISION (int_mode
) - (i
+ 1));
6074 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6075 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6076 the bitsize of the mode - 1. This allows simplification of
6077 "a = (b & 8) == 0;" */
6078 if (XEXP (x
, 1) == constm1_rtx
6079 && !REG_P (XEXP (x
, 0))
6080 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
6081 && REG_P (SUBREG_REG (XEXP (x
, 0))))
6082 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6083 && nonzero_bits (XEXP (x
, 0), int_mode
) == 1)
6084 return simplify_shift_const
6085 (NULL_RTX
, ASHIFTRT
, int_mode
,
6086 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6087 gen_rtx_XOR (int_mode
, XEXP (x
, 0),
6089 GET_MODE_PRECISION (int_mode
) - 1),
6090 GET_MODE_PRECISION (int_mode
) - 1);
6092 /* If we are adding two things that have no bits in common, convert
6093 the addition into an IOR. This will often be further simplified,
6094 for example in cases like ((a & 1) + (a & 2)), which can
6097 if (HWI_COMPUTABLE_MODE_P (mode
)
6098 && (nonzero_bits (XEXP (x
, 0), mode
)
6099 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
6101 /* Try to simplify the expression further. */
6102 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6103 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
6105 /* If we could, great. If not, do not go ahead with the IOR
6106 replacement, since PLUS appears in many special purpose
6107 address arithmetic instructions. */
6108 if (GET_CODE (temp
) != CLOBBER
6109 && (GET_CODE (temp
) != IOR
6110 || ((XEXP (temp
, 0) != XEXP (x
, 0)
6111 || XEXP (temp
, 1) != XEXP (x
, 1))
6112 && (XEXP (temp
, 0) != XEXP (x
, 1)
6113 || XEXP (temp
, 1) != XEXP (x
, 0)))))
6117 /* Canonicalize x + x into x << 1. */
6118 if (GET_MODE_CLASS (mode
) == MODE_INT
6119 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
6120 && !side_effects_p (XEXP (x
, 0)))
6121 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
6126 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6127 (and <foo> (const_int pow2-1)) */
6128 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6129 && GET_CODE (XEXP (x
, 1)) == AND
6130 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
6131 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
6132 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
6133 return simplify_and_const_int (NULL_RTX
, int_mode
, XEXP (x
, 0),
6134 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
6138 /* If we have (mult (plus A B) C), apply the distributive law and then
6139 the inverse distributive law to see if things simplify. This
6140 occurs mostly in addresses, often when unrolling loops. */
6142 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
6144 rtx result
= distribute_and_simplify_rtx (x
, 0);
6149 /* Try simplify a*(b/c) as (a*b)/c. */
6150 if (FLOAT_MODE_P (mode
) && flag_associative_math
6151 && GET_CODE (XEXP (x
, 0)) == DIV
)
6153 rtx tem
= simplify_binary_operation (MULT
, mode
,
6154 XEXP (XEXP (x
, 0), 0),
6157 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
6162 /* If this is a divide by a power of two, treat it as a shift if
6163 its first operand is a shift. */
6164 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
6165 && CONST_INT_P (XEXP (x
, 1))
6166 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
6167 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
6168 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
6169 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
6170 || GET_CODE (XEXP (x
, 0)) == ROTATE
6171 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
6172 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, int_mode
,
6177 case GT
: case GTU
: case GE
: case GEU
:
6178 case LT
: case LTU
: case LE
: case LEU
:
6179 case UNEQ
: case LTGT
:
6180 case UNGT
: case UNGE
:
6181 case UNLT
: case UNLE
:
6182 case UNORDERED
: case ORDERED
:
6183 /* If the first operand is a condition code, we can't do anything
6185 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6186 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6187 && ! CC0_P (XEXP (x
, 0))))
6189 rtx op0
= XEXP (x
, 0);
6190 rtx op1
= XEXP (x
, 1);
6191 enum rtx_code new_code
;
6193 if (GET_CODE (op0
) == COMPARE
)
6194 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6196 /* Simplify our comparison, if possible. */
6197 new_code
= simplify_comparison (code
, &op0
, &op1
);
6199 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6200 if only the low-order bit is possibly nonzero in X (such as when
6201 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6202 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6203 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6206 Remove any ZERO_EXTRACT we made when thinking this was a
6207 comparison. It may now be simpler to use, e.g., an AND. If a
6208 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6209 the call to make_compound_operation in the SET case.
6211 Don't apply these optimizations if the caller would
6212 prefer a comparison rather than a value.
6213 E.g., for the condition in an IF_THEN_ELSE most targets need
6214 an explicit comparison. */
6219 else if (STORE_FLAG_VALUE
== 1
6221 && is_int_mode (mode
, &int_mode
)
6222 && op1
== const0_rtx
6223 && int_mode
== GET_MODE (op0
)
6224 && nonzero_bits (op0
, int_mode
) == 1)
6225 return gen_lowpart (int_mode
,
6226 expand_compound_operation (op0
));
6228 else if (STORE_FLAG_VALUE
== 1
6230 && is_int_mode (mode
, &int_mode
)
6231 && op1
== const0_rtx
6232 && int_mode
== GET_MODE (op0
)
6233 && (num_sign_bit_copies (op0
, int_mode
)
6234 == GET_MODE_PRECISION (int_mode
)))
6236 op0
= expand_compound_operation (op0
);
6237 return simplify_gen_unary (NEG
, int_mode
,
6238 gen_lowpart (int_mode
, op0
),
6242 else if (STORE_FLAG_VALUE
== 1
6244 && is_int_mode (mode
, &int_mode
)
6245 && op1
== const0_rtx
6246 && int_mode
== GET_MODE (op0
)
6247 && nonzero_bits (op0
, int_mode
) == 1)
6249 op0
= expand_compound_operation (op0
);
6250 return simplify_gen_binary (XOR
, int_mode
,
6251 gen_lowpart (int_mode
, op0
),
6255 else if (STORE_FLAG_VALUE
== 1
6257 && is_int_mode (mode
, &int_mode
)
6258 && op1
== const0_rtx
6259 && int_mode
== GET_MODE (op0
)
6260 && (num_sign_bit_copies (op0
, int_mode
)
6261 == GET_MODE_PRECISION (int_mode
)))
6263 op0
= expand_compound_operation (op0
);
6264 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), 1);
6267 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6272 else if (STORE_FLAG_VALUE
== -1
6274 && is_int_mode (mode
, &int_mode
)
6275 && op1
== const0_rtx
6276 && int_mode
== GET_MODE (op0
)
6277 && (num_sign_bit_copies (op0
, int_mode
)
6278 == GET_MODE_PRECISION (int_mode
)))
6279 return gen_lowpart (int_mode
, expand_compound_operation (op0
));
6281 else if (STORE_FLAG_VALUE
== -1
6283 && is_int_mode (mode
, &int_mode
)
6284 && op1
== const0_rtx
6285 && int_mode
== GET_MODE (op0
)
6286 && nonzero_bits (op0
, int_mode
) == 1)
6288 op0
= expand_compound_operation (op0
);
6289 return simplify_gen_unary (NEG
, int_mode
,
6290 gen_lowpart (int_mode
, op0
),
6294 else if (STORE_FLAG_VALUE
== -1
6296 && is_int_mode (mode
, &int_mode
)
6297 && op1
== const0_rtx
6298 && int_mode
== GET_MODE (op0
)
6299 && (num_sign_bit_copies (op0
, int_mode
)
6300 == GET_MODE_PRECISION (int_mode
)))
6302 op0
= expand_compound_operation (op0
);
6303 return simplify_gen_unary (NOT
, int_mode
,
6304 gen_lowpart (int_mode
, op0
),
6308 /* If X is 0/1, (eq X 0) is X-1. */
6309 else if (STORE_FLAG_VALUE
== -1
6311 && is_int_mode (mode
, &int_mode
)
6312 && op1
== const0_rtx
6313 && int_mode
== GET_MODE (op0
)
6314 && nonzero_bits (op0
, int_mode
) == 1)
6316 op0
= expand_compound_operation (op0
);
6317 return plus_constant (int_mode
, gen_lowpart (int_mode
, op0
), -1);
6320 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6321 one bit that might be nonzero, we can convert (ne x 0) to
6322 (ashift x c) where C puts the bit in the sign bit. Remove any
6323 AND with STORE_FLAG_VALUE when we are done, since we are only
6324 going to test the sign bit. */
6326 && is_int_mode (mode
, &int_mode
)
6327 && HWI_COMPUTABLE_MODE_P (int_mode
)
6328 && val_signbit_p (int_mode
, STORE_FLAG_VALUE
)
6329 && op1
== const0_rtx
6330 && int_mode
== GET_MODE (op0
)
6331 && (i
= exact_log2 (nonzero_bits (op0
, int_mode
))) >= 0)
6333 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6334 expand_compound_operation (op0
),
6335 GET_MODE_PRECISION (int_mode
) - 1 - i
);
6336 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6342 /* If the code changed, return a whole new comparison.
6343 We also need to avoid using SUBST in cases where
6344 simplify_comparison has widened a comparison with a CONST_INT,
6345 since in that case the wider CONST_INT may fail the sanity
6346 checks in do_SUBST. */
6347 if (new_code
!= code
6348 || (CONST_INT_P (op1
)
6349 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6350 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6351 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6353 /* Otherwise, keep this operation, but maybe change its operands.
6354 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6355 SUBST (XEXP (x
, 0), op0
);
6356 SUBST (XEXP (x
, 1), op1
);
6361 return simplify_if_then_else (x
);
6367 /* If we are processing SET_DEST, we are done. */
6371 return expand_compound_operation (x
);
6374 return simplify_set (x
);
6378 return simplify_logical (x
);
6385 /* If this is a shift by a constant amount, simplify it. */
6386 if (CONST_INT_P (XEXP (x
, 1)))
6387 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6388 INTVAL (XEXP (x
, 1)));
6390 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6392 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6394 << exact_log2 (GET_MODE_UNIT_BITSIZE
6407 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6410 simplify_if_then_else (rtx x
)
6412 machine_mode mode
= GET_MODE (x
);
6413 rtx cond
= XEXP (x
, 0);
6414 rtx true_rtx
= XEXP (x
, 1);
6415 rtx false_rtx
= XEXP (x
, 2);
6416 enum rtx_code true_code
= GET_CODE (cond
);
6417 int comparison_p
= COMPARISON_P (cond
);
6420 enum rtx_code false_code
;
6422 scalar_int_mode int_mode
, inner_mode
;
6424 /* Simplify storing of the truth value. */
6425 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6426 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6427 XEXP (cond
, 0), XEXP (cond
, 1));
6429 /* Also when the truth value has to be reversed. */
6431 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6432 && (reversed
= reversed_comparison (cond
, mode
)))
6435 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6436 in it is being compared against certain values. Get the true and false
6437 comparisons and see if that says anything about the value of each arm. */
6440 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6442 && REG_P (XEXP (cond
, 0)))
6445 rtx from
= XEXP (cond
, 0);
6446 rtx true_val
= XEXP (cond
, 1);
6447 rtx false_val
= true_val
;
6450 /* If FALSE_CODE is EQ, swap the codes and arms. */
6452 if (false_code
== EQ
)
6454 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6455 std::swap (true_rtx
, false_rtx
);
6458 scalar_int_mode from_mode
;
6459 if (is_a
<scalar_int_mode
> (GET_MODE (from
), &from_mode
))
6461 /* If we are comparing against zero and the expression being
6462 tested has only a single bit that might be nonzero, that is
6463 its value when it is not equal to zero. Similarly if it is
6464 known to be -1 or 0. */
6466 && true_val
== const0_rtx
6467 && pow2p_hwi (nzb
= nonzero_bits (from
, from_mode
)))
6470 false_val
= gen_int_mode (nzb
, from_mode
);
6472 else if (true_code
== EQ
6473 && true_val
== const0_rtx
6474 && (num_sign_bit_copies (from
, from_mode
)
6475 == GET_MODE_PRECISION (from_mode
)))
6478 false_val
= constm1_rtx
;
6482 /* Now simplify an arm if we know the value of the register in the
6483 branch and it is used in the arm. Be careful due to the potential
6484 of locally-shared RTL. */
6486 if (reg_mentioned_p (from
, true_rtx
))
6487 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6489 pc_rtx
, pc_rtx
, 0, 0, 0);
6490 if (reg_mentioned_p (from
, false_rtx
))
6491 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6493 pc_rtx
, pc_rtx
, 0, 0, 0);
6495 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6496 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6498 true_rtx
= XEXP (x
, 1);
6499 false_rtx
= XEXP (x
, 2);
6500 true_code
= GET_CODE (cond
);
6503 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6504 reversed, do so to avoid needing two sets of patterns for
6505 subtract-and-branch insns. Similarly if we have a constant in the true
6506 arm, the false arm is the same as the first operand of the comparison, or
6507 the false arm is more complicated than the true arm. */
6510 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6511 && (true_rtx
== pc_rtx
6512 || (CONSTANT_P (true_rtx
)
6513 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6514 || true_rtx
== const0_rtx
6515 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6516 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6517 && !OBJECT_P (false_rtx
))
6518 || reg_mentioned_p (true_rtx
, false_rtx
)
6519 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6521 true_code
= reversed_comparison_code (cond
, NULL
);
6522 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6523 SUBST (XEXP (x
, 1), false_rtx
);
6524 SUBST (XEXP (x
, 2), true_rtx
);
6526 std::swap (true_rtx
, false_rtx
);
6529 /* It is possible that the conditional has been simplified out. */
6530 true_code
= GET_CODE (cond
);
6531 comparison_p
= COMPARISON_P (cond
);
6534 /* If the two arms are identical, we don't need the comparison. */
6536 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6539 /* Convert a == b ? b : a to "a". */
6540 if (true_code
== EQ
&& ! side_effects_p (cond
)
6541 && !HONOR_NANS (mode
)
6542 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6543 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6545 else if (true_code
== NE
&& ! side_effects_p (cond
)
6546 && !HONOR_NANS (mode
)
6547 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6548 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6551 /* Look for cases where we have (abs x) or (neg (abs X)). */
6553 if (GET_MODE_CLASS (mode
) == MODE_INT
6555 && XEXP (cond
, 1) == const0_rtx
6556 && GET_CODE (false_rtx
) == NEG
6557 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6558 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6559 && ! side_effects_p (true_rtx
))
6564 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6568 simplify_gen_unary (NEG
, mode
,
6569 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6575 /* Look for MIN or MAX. */
6577 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6579 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6580 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6581 && ! side_effects_p (cond
))
6586 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6589 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6592 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6595 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6600 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6601 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6602 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6603 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6604 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6605 neither 1 or -1, but it isn't worth checking for. */
6607 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6609 && is_int_mode (mode
, &int_mode
)
6610 && ! side_effects_p (x
))
6612 rtx t
= make_compound_operation (true_rtx
, SET
);
6613 rtx f
= make_compound_operation (false_rtx
, SET
);
6614 rtx cond_op0
= XEXP (cond
, 0);
6615 rtx cond_op1
= XEXP (cond
, 1);
6616 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6617 scalar_int_mode m
= int_mode
;
6618 rtx z
= 0, c1
= NULL_RTX
;
6620 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6621 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6622 || GET_CODE (t
) == ASHIFT
6623 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6624 && rtx_equal_p (XEXP (t
, 0), f
))
6625 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6627 /* If an identity-zero op is commutative, check whether there
6628 would be a match if we swapped the operands. */
6629 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6630 || GET_CODE (t
) == XOR
)
6631 && rtx_equal_p (XEXP (t
, 1), f
))
6632 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6633 else if (GET_CODE (t
) == SIGN_EXTEND
6634 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6635 && (GET_CODE (XEXP (t
, 0)) == PLUS
6636 || GET_CODE (XEXP (t
, 0)) == MINUS
6637 || GET_CODE (XEXP (t
, 0)) == IOR
6638 || GET_CODE (XEXP (t
, 0)) == XOR
6639 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6640 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6641 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6642 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6643 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6644 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6645 && (num_sign_bit_copies (f
, GET_MODE (f
))
6647 (GET_MODE_PRECISION (int_mode
)
6648 - GET_MODE_PRECISION (inner_mode
))))
6650 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6651 extend_op
= SIGN_EXTEND
;
6654 else if (GET_CODE (t
) == SIGN_EXTEND
6655 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6656 && (GET_CODE (XEXP (t
, 0)) == PLUS
6657 || GET_CODE (XEXP (t
, 0)) == IOR
6658 || GET_CODE (XEXP (t
, 0)) == XOR
)
6659 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6660 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6661 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6662 && (num_sign_bit_copies (f
, GET_MODE (f
))
6664 (GET_MODE_PRECISION (int_mode
)
6665 - GET_MODE_PRECISION (inner_mode
))))
6667 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6668 extend_op
= SIGN_EXTEND
;
6671 else if (GET_CODE (t
) == ZERO_EXTEND
6672 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6673 && (GET_CODE (XEXP (t
, 0)) == PLUS
6674 || GET_CODE (XEXP (t
, 0)) == MINUS
6675 || GET_CODE (XEXP (t
, 0)) == IOR
6676 || GET_CODE (XEXP (t
, 0)) == XOR
6677 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6678 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6679 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6680 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6681 && HWI_COMPUTABLE_MODE_P (int_mode
)
6682 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6683 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6684 && ((nonzero_bits (f
, GET_MODE (f
))
6685 & ~GET_MODE_MASK (inner_mode
))
6688 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6689 extend_op
= ZERO_EXTEND
;
6692 else if (GET_CODE (t
) == ZERO_EXTEND
6693 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (t
, 0)), &inner_mode
)
6694 && (GET_CODE (XEXP (t
, 0)) == PLUS
6695 || GET_CODE (XEXP (t
, 0)) == IOR
6696 || GET_CODE (XEXP (t
, 0)) == XOR
)
6697 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6698 && HWI_COMPUTABLE_MODE_P (int_mode
)
6699 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6700 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6701 && ((nonzero_bits (f
, GET_MODE (f
))
6702 & ~GET_MODE_MASK (inner_mode
))
6705 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6706 extend_op
= ZERO_EXTEND
;
6712 machine_mode cm
= m
;
6713 if ((op
== ASHIFT
|| op
== LSHIFTRT
|| op
== ASHIFTRT
)
6714 && GET_MODE (c1
) != VOIDmode
)
6716 temp
= subst (simplify_gen_relational (true_code
, cm
, VOIDmode
,
6717 cond_op0
, cond_op1
),
6718 pc_rtx
, pc_rtx
, 0, 0, 0);
6719 temp
= simplify_gen_binary (MULT
, cm
, temp
,
6720 simplify_gen_binary (MULT
, cm
, c1
,
6722 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6723 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6725 if (extend_op
!= UNKNOWN
)
6726 temp
= simplify_gen_unary (extend_op
, int_mode
, temp
, m
);
6732 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6733 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6734 negation of a single bit, we can convert this operation to a shift. We
6735 can actually do this more generally, but it doesn't seem worth it. */
6738 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6739 && XEXP (cond
, 1) == const0_rtx
6740 && false_rtx
== const0_rtx
6741 && CONST_INT_P (true_rtx
)
6742 && ((nonzero_bits (XEXP (cond
, 0), int_mode
) == 1
6743 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6744 || ((num_sign_bit_copies (XEXP (cond
, 0), int_mode
)
6745 == GET_MODE_PRECISION (int_mode
))
6746 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6748 simplify_shift_const (NULL_RTX
, ASHIFT
, int_mode
,
6749 gen_lowpart (int_mode
, XEXP (cond
, 0)), i
);
6751 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6752 non-zero bit in A is C1. */
6753 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6754 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6755 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
6756 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (cond
, 0)), &inner_mode
)
6757 && (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))
6758 == nonzero_bits (XEXP (cond
, 0), inner_mode
)
6759 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (int_mode
))) >= 0)
6761 rtx val
= XEXP (cond
, 0);
6762 if (inner_mode
== int_mode
)
6764 else if (GET_MODE_PRECISION (inner_mode
) < GET_MODE_PRECISION (int_mode
))
6765 return simplify_gen_unary (ZERO_EXTEND
, int_mode
, val
, inner_mode
);
6771 /* Simplify X, a SET expression. Return the new expression. */
6774 simplify_set (rtx x
)
6776 rtx src
= SET_SRC (x
);
6777 rtx dest
= SET_DEST (x
);
6779 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6780 rtx_insn
*other_insn
;
6782 scalar_int_mode int_mode
;
6784 /* (set (pc) (return)) gets written as (return). */
6785 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6788 /* Now that we know for sure which bits of SRC we are using, see if we can
6789 simplify the expression for the object knowing that we only need the
6792 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6794 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6795 SUBST (SET_SRC (x
), src
);
6798 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6799 the comparison result and try to simplify it unless we already have used
6800 undobuf.other_insn. */
6801 if ((GET_MODE_CLASS (mode
) == MODE_CC
6802 || GET_CODE (src
) == COMPARE
6804 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6805 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6806 && COMPARISON_P (*cc_use
)
6807 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6809 enum rtx_code old_code
= GET_CODE (*cc_use
);
6810 enum rtx_code new_code
;
6812 int other_changed
= 0;
6813 rtx inner_compare
= NULL_RTX
;
6814 machine_mode compare_mode
= GET_MODE (dest
);
6816 if (GET_CODE (src
) == COMPARE
)
6818 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6819 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6821 inner_compare
= op0
;
6822 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6826 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6828 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6831 new_code
= old_code
;
6832 else if (!CONSTANT_P (tmp
))
6834 new_code
= GET_CODE (tmp
);
6835 op0
= XEXP (tmp
, 0);
6836 op1
= XEXP (tmp
, 1);
6840 rtx pat
= PATTERN (other_insn
);
6841 undobuf
.other_insn
= other_insn
;
6842 SUBST (*cc_use
, tmp
);
6844 /* Attempt to simplify CC user. */
6845 if (GET_CODE (pat
) == SET
)
6847 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6848 if (new_rtx
!= NULL_RTX
)
6849 SUBST (SET_SRC (pat
), new_rtx
);
6852 /* Convert X into a no-op move. */
6853 SUBST (SET_DEST (x
), pc_rtx
);
6854 SUBST (SET_SRC (x
), pc_rtx
);
6858 /* Simplify our comparison, if possible. */
6859 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6861 #ifdef SELECT_CC_MODE
6862 /* If this machine has CC modes other than CCmode, check to see if we
6863 need to use a different CC mode here. */
6864 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6865 compare_mode
= GET_MODE (op0
);
6866 else if (inner_compare
6867 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6868 && new_code
== old_code
6869 && op0
== XEXP (inner_compare
, 0)
6870 && op1
== XEXP (inner_compare
, 1))
6871 compare_mode
= GET_MODE (inner_compare
);
6873 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6875 /* If the mode changed, we have to change SET_DEST, the mode in the
6876 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6877 a hard register, just build new versions with the proper mode. If it
6878 is a pseudo, we lose unless it is only time we set the pseudo, in
6879 which case we can safely change its mode. */
6880 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6882 if (can_change_dest_mode (dest
, 0, compare_mode
))
6884 unsigned int regno
= REGNO (dest
);
6887 if (regno
< FIRST_PSEUDO_REGISTER
)
6888 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6891 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6892 new_dest
= regno_reg_rtx
[regno
];
6895 SUBST (SET_DEST (x
), new_dest
);
6896 SUBST (XEXP (*cc_use
, 0), new_dest
);
6902 #endif /* SELECT_CC_MODE */
6904 /* If the code changed, we have to build a new comparison in
6905 undobuf.other_insn. */
6906 if (new_code
!= old_code
)
6908 int other_changed_previously
= other_changed
;
6909 unsigned HOST_WIDE_INT mask
;
6910 rtx old_cc_use
= *cc_use
;
6912 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6916 /* If the only change we made was to change an EQ into an NE or
6917 vice versa, OP0 has only one bit that might be nonzero, and OP1
6918 is zero, check if changing the user of the condition code will
6919 produce a valid insn. If it won't, we can keep the original code
6920 in that insn by surrounding our operation with an XOR. */
6922 if (((old_code
== NE
&& new_code
== EQ
)
6923 || (old_code
== EQ
&& new_code
== NE
))
6924 && ! other_changed_previously
&& op1
== const0_rtx
6925 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6926 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6928 rtx pat
= PATTERN (other_insn
), note
= 0;
6930 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6931 && ! check_asm_operands (pat
)))
6933 *cc_use
= old_cc_use
;
6936 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6944 undobuf
.other_insn
= other_insn
;
6946 /* Don't generate a compare of a CC with 0, just use that CC. */
6947 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6949 SUBST (SET_SRC (x
), op0
);
6952 /* Otherwise, if we didn't previously have the same COMPARE we
6953 want, create it from scratch. */
6954 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6955 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6957 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6963 /* Get SET_SRC in a form where we have placed back any
6964 compound expressions. Then do the checks below. */
6965 src
= make_compound_operation (src
, SET
);
6966 SUBST (SET_SRC (x
), src
);
6969 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6970 and X being a REG or (subreg (reg)), we may be able to convert this to
6971 (set (subreg:m2 x) (op)).
6973 We can always do this if M1 is narrower than M2 because that means that
6974 we only care about the low bits of the result.
6976 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6977 perform a narrower operation than requested since the high-order bits will
6978 be undefined. On machine where it is defined, this transformation is safe
6979 as long as M1 and M2 have the same number of words. */
6981 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6982 && !OBJECT_P (SUBREG_REG (src
))
6983 && (known_equal_after_align_up
6984 (GET_MODE_SIZE (GET_MODE (src
)),
6985 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))),
6987 && (WORD_REGISTER_OPERATIONS
|| !paradoxical_subreg_p (src
))
6988 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6989 && !REG_CAN_CHANGE_MODE_P (REGNO (dest
),
6990 GET_MODE (SUBREG_REG (src
)),
6993 || (GET_CODE (dest
) == SUBREG
6994 && REG_P (SUBREG_REG (dest
)))))
6996 SUBST (SET_DEST (x
),
6997 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6999 SUBST (SET_SRC (x
), SUBREG_REG (src
));
7001 src
= SET_SRC (x
), dest
= SET_DEST (x
);
7004 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7007 && partial_subreg_p (src
)
7008 && subreg_lowpart_p (src
))
7010 rtx inner
= SUBREG_REG (src
);
7011 machine_mode inner_mode
= GET_MODE (inner
);
7013 /* Here we make sure that we don't have a sign bit on. */
7014 if (val_signbit_known_clear_p (GET_MODE (src
),
7015 nonzero_bits (inner
, inner_mode
)))
7017 SUBST (SET_SRC (x
), inner
);
7022 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7023 would require a paradoxical subreg. Replace the subreg with a
7024 zero_extend to avoid the reload that would otherwise be required.
7025 Don't do this unless we have a scalar integer mode, otherwise the
7026 transformation is incorrect. */
7028 enum rtx_code extend_op
;
7029 if (paradoxical_subreg_p (src
)
7030 && MEM_P (SUBREG_REG (src
))
7031 && SCALAR_INT_MODE_P (GET_MODE (src
))
7032 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
7035 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
7040 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7041 are comparing an item known to be 0 or -1 against 0, use a logical
7042 operation instead. Check for one of the arms being an IOR of the other
7043 arm with some value. We compute three terms to be IOR'ed together. In
7044 practice, at most two will be nonzero. Then we do the IOR's. */
7046 if (GET_CODE (dest
) != PC
7047 && GET_CODE (src
) == IF_THEN_ELSE
7048 && is_int_mode (GET_MODE (src
), &int_mode
)
7049 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
7050 && XEXP (XEXP (src
, 0), 1) == const0_rtx
7051 && int_mode
== GET_MODE (XEXP (XEXP (src
, 0), 0))
7052 && (!HAVE_conditional_move
7053 || ! can_conditionally_move_p (int_mode
))
7054 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0), int_mode
)
7055 == GET_MODE_PRECISION (int_mode
))
7056 && ! side_effects_p (src
))
7058 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7059 ? XEXP (src
, 1) : XEXP (src
, 2));
7060 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
7061 ? XEXP (src
, 2) : XEXP (src
, 1));
7062 rtx term1
= const0_rtx
, term2
, term3
;
7064 if (GET_CODE (true_rtx
) == IOR
7065 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
7066 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
7067 else if (GET_CODE (true_rtx
) == IOR
7068 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
7069 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
7070 else if (GET_CODE (false_rtx
) == IOR
7071 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
7072 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
7073 else if (GET_CODE (false_rtx
) == IOR
7074 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
7075 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
7077 term2
= simplify_gen_binary (AND
, int_mode
,
7078 XEXP (XEXP (src
, 0), 0), true_rtx
);
7079 term3
= simplify_gen_binary (AND
, int_mode
,
7080 simplify_gen_unary (NOT
, int_mode
,
7081 XEXP (XEXP (src
, 0), 0),
7086 simplify_gen_binary (IOR
, int_mode
,
7087 simplify_gen_binary (IOR
, int_mode
,
7094 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7095 whole thing fail. */
7096 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
7098 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
7101 /* Convert this into a field assignment operation, if possible. */
7102 return make_field_assignment (x
);
7105 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7109 simplify_logical (rtx x
)
7111 rtx op0
= XEXP (x
, 0);
7112 rtx op1
= XEXP (x
, 1);
7113 scalar_int_mode mode
;
7115 switch (GET_CODE (x
))
7118 /* We can call simplify_and_const_int only if we don't lose
7119 any (sign) bits when converting INTVAL (op1) to
7120 "unsigned HOST_WIDE_INT". */
7121 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
)
7122 && CONST_INT_P (op1
)
7123 && (HWI_COMPUTABLE_MODE_P (mode
)
7124 || INTVAL (op1
) > 0))
7126 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
7127 if (GET_CODE (x
) != AND
)
7134 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7135 apply the distributive law and then the inverse distributive
7136 law to see if things simplify. */
7137 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
7139 rtx result
= distribute_and_simplify_rtx (x
, 0);
7143 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
7145 rtx result
= distribute_and_simplify_rtx (x
, 1);
7152 /* If we have (ior (and A B) C), apply the distributive law and then
7153 the inverse distributive law to see if things simplify. */
7155 if (GET_CODE (op0
) == AND
)
7157 rtx result
= distribute_and_simplify_rtx (x
, 0);
7162 if (GET_CODE (op1
) == AND
)
7164 rtx result
= distribute_and_simplify_rtx (x
, 1);
7177 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7178 operations" because they can be replaced with two more basic operations.
7179 ZERO_EXTEND is also considered "compound" because it can be replaced with
7180 an AND operation, which is simpler, though only one operation.
7182 The function expand_compound_operation is called with an rtx expression
7183 and will convert it to the appropriate shifts and AND operations,
7184 simplifying at each stage.
7186 The function make_compound_operation is called to convert an expression
7187 consisting of shifts and ANDs into the equivalent compound expression.
7188 It is the inverse of this function, loosely speaking. */
7191 expand_compound_operation (rtx x
)
7193 unsigned HOST_WIDE_INT pos
= 0, len
;
7195 unsigned int modewidth
;
7197 scalar_int_mode inner_mode
;
7199 switch (GET_CODE (x
))
7205 /* We can't necessarily use a const_int for a multiword mode;
7206 it depends on implicitly extending the value.
7207 Since we don't know the right way to extend it,
7208 we can't tell whether the implicit way is right.
7210 Even for a mode that is no wider than a const_int,
7211 we can't win, because we need to sign extend one of its bits through
7212 the rest of it, and we don't know which bit. */
7213 if (CONST_INT_P (XEXP (x
, 0)))
7216 /* Reject modes that aren't scalar integers because turning vector
7217 or complex modes into shifts causes problems. */
7218 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7221 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7222 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7223 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7224 reloaded. If not for that, MEM's would very rarely be safe.
7226 Reject modes bigger than a word, because we might not be able
7227 to reference a two-register group starting with an arbitrary register
7228 (and currently gen_lowpart might crash for a SUBREG). */
7230 if (GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7233 len
= GET_MODE_PRECISION (inner_mode
);
7234 /* If the inner object has VOIDmode (the only way this can happen
7235 is if it is an ASM_OPERANDS), we can't do anything since we don't
7236 know how much masking to do. */
7248 /* If the operand is a CLOBBER, just return it. */
7249 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7252 if (!CONST_INT_P (XEXP (x
, 1))
7253 || !CONST_INT_P (XEXP (x
, 2)))
7256 /* Reject modes that aren't scalar integers because turning vector
7257 or complex modes into shifts causes problems. */
7258 if (!is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
))
7261 len
= INTVAL (XEXP (x
, 1));
7262 pos
= INTVAL (XEXP (x
, 2));
7264 /* This should stay within the object being extracted, fail otherwise. */
7265 if (len
+ pos
> GET_MODE_PRECISION (inner_mode
))
7268 if (BITS_BIG_ENDIAN
)
7269 pos
= GET_MODE_PRECISION (inner_mode
) - len
- pos
;
7277 /* We've rejected non-scalar operations by now. */
7278 scalar_int_mode mode
= as_a
<scalar_int_mode
> (GET_MODE (x
));
7280 /* Convert sign extension to zero extension, if we know that the high
7281 bit is not set, as this is easier to optimize. It will be converted
7282 back to cheaper alternative in make_extraction. */
7283 if (GET_CODE (x
) == SIGN_EXTEND
7284 && HWI_COMPUTABLE_MODE_P (mode
)
7285 && ((nonzero_bits (XEXP (x
, 0), inner_mode
)
7286 & ~(((unsigned HOST_WIDE_INT
) GET_MODE_MASK (inner_mode
)) >> 1))
7289 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7290 rtx temp2
= expand_compound_operation (temp
);
7292 /* Make sure this is a profitable operation. */
7293 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7294 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7296 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7297 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7303 /* We can optimize some special cases of ZERO_EXTEND. */
7304 if (GET_CODE (x
) == ZERO_EXTEND
)
7306 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7307 know that the last value didn't have any inappropriate bits
7309 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7310 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7311 && HWI_COMPUTABLE_MODE_P (mode
)
7312 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), mode
)
7313 & ~GET_MODE_MASK (inner_mode
)) == 0)
7314 return XEXP (XEXP (x
, 0), 0);
7316 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7317 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7318 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7319 && subreg_lowpart_p (XEXP (x
, 0))
7320 && HWI_COMPUTABLE_MODE_P (mode
)
7321 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), mode
)
7322 & ~GET_MODE_MASK (inner_mode
)) == 0)
7323 return SUBREG_REG (XEXP (x
, 0));
7325 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7326 is a comparison and STORE_FLAG_VALUE permits. This is like
7327 the first case, but it works even when MODE is larger
7328 than HOST_WIDE_INT. */
7329 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7330 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == mode
7331 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7332 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7333 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7334 return XEXP (XEXP (x
, 0), 0);
7336 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7337 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7338 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
7339 && subreg_lowpart_p (XEXP (x
, 0))
7340 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7341 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
7342 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (inner_mode
)) == 0)
7343 return SUBREG_REG (XEXP (x
, 0));
7347 /* If we reach here, we want to return a pair of shifts. The inner
7348 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7349 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7350 logical depending on the value of UNSIGNEDP.
7352 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7353 converted into an AND of a shift.
7355 We must check for the case where the left shift would have a negative
7356 count. This can happen in a case like (x >> 31) & 255 on machines
7357 that can't shift by a constant. On those machines, we would first
7358 combine the shift with the AND to produce a variable-position
7359 extraction. Then the constant of 31 would be substituted in
7360 to produce such a position. */
7362 modewidth
= GET_MODE_PRECISION (mode
);
7363 if (modewidth
>= pos
+ len
)
7365 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7366 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7368 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7369 tem
, modewidth
- pos
- len
);
7370 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7371 mode
, tem
, modewidth
- len
);
7373 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7374 tem
= simplify_and_const_int (NULL_RTX
, mode
,
7375 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7378 (HOST_WIDE_INT_1U
<< len
) - 1);
7380 /* Any other cases we can't handle. */
7383 /* If we couldn't do this for some reason, return the original
7385 if (GET_CODE (tem
) == CLOBBER
)
7391 /* X is a SET which contains an assignment of one object into
7392 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7393 or certain SUBREGS). If possible, convert it into a series of
7396 We half-heartedly support variable positions, but do not at all
7397 support variable lengths. */
7400 expand_field_assignment (const_rtx x
)
7403 rtx pos
; /* Always counts from low bit. */
7405 rtx mask
, cleared
, masked
;
7406 scalar_int_mode compute_mode
;
7408 /* Loop until we find something we can't simplify. */
7411 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7412 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7414 rtx x0
= XEXP (SET_DEST (x
), 0);
7415 if (!GET_MODE_PRECISION (GET_MODE (x0
)).is_constant (&len
))
7417 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7418 pos
= gen_int_mode (subreg_lsb (XEXP (SET_DEST (x
), 0)),
7421 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7422 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7424 inner
= XEXP (SET_DEST (x
), 0);
7425 if (!GET_MODE_PRECISION (GET_MODE (inner
)).is_constant (&inner_len
))
7428 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7429 pos
= XEXP (SET_DEST (x
), 2);
7431 /* A constant position should stay within the width of INNER. */
7432 if (CONST_INT_P (pos
) && INTVAL (pos
) + len
> inner_len
)
7435 if (BITS_BIG_ENDIAN
)
7437 if (CONST_INT_P (pos
))
7438 pos
= GEN_INT (inner_len
- len
- INTVAL (pos
));
7439 else if (GET_CODE (pos
) == MINUS
7440 && CONST_INT_P (XEXP (pos
, 1))
7441 && INTVAL (XEXP (pos
, 1)) == inner_len
- len
)
7442 /* If position is ADJUST - X, new position is X. */
7443 pos
= XEXP (pos
, 0);
7445 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7446 gen_int_mode (inner_len
- len
,
7452 /* If the destination is a subreg that overwrites the whole of the inner
7453 register, we can move the subreg to the source. */
7454 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7455 /* We need SUBREGs to compute nonzero_bits properly. */
7456 && nonzero_sign_valid
7457 && !read_modify_subreg_p (SET_DEST (x
)))
7459 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7461 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7468 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7469 inner
= SUBREG_REG (inner
);
7471 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7472 if (!is_a
<scalar_int_mode
> (GET_MODE (inner
), &compute_mode
))
7474 /* Don't do anything for vector or complex integral types. */
7475 if (! FLOAT_MODE_P (GET_MODE (inner
)))
7478 /* Try to find an integral mode to pun with. */
7479 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner
)), 0)
7480 .exists (&compute_mode
))
7483 inner
= gen_lowpart (compute_mode
, inner
);
7486 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7487 if (len
>= HOST_BITS_PER_WIDE_INT
)
7490 /* Don't try to compute in too wide unsupported modes. */
7491 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7494 /* Now compute the equivalent expression. Make a copy of INNER
7495 for the SET_DEST in case it is a MEM into which we will substitute;
7496 we don't want shared RTL in that case. */
7497 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7499 cleared
= simplify_gen_binary (AND
, compute_mode
,
7500 simplify_gen_unary (NOT
, compute_mode
,
7501 simplify_gen_binary (ASHIFT
,
7506 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7507 simplify_gen_binary (
7509 gen_lowpart (compute_mode
, SET_SRC (x
)),
7513 x
= gen_rtx_SET (copy_rtx (inner
),
7514 simplify_gen_binary (IOR
, compute_mode
,
7521 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7522 it is an RTX that represents the (variable) starting position; otherwise,
7523 POS is the (constant) starting bit position. Both are counted from the LSB.
7525 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7527 IN_DEST is nonzero if this is a reference in the destination of a SET.
7528 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7529 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7532 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7533 ZERO_EXTRACT should be built even for bits starting at bit 0.
7535 MODE is the desired mode of the result (if IN_DEST == 0).
7537 The result is an RTX for the extraction or NULL_RTX if the target
7541 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7542 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7543 int in_dest
, int in_compare
)
7545 /* This mode describes the size of the storage area
7546 to fetch the overall value from. Within that, we
7547 ignore the POS lowest bits, etc. */
7548 machine_mode is_mode
= GET_MODE (inner
);
7549 machine_mode inner_mode
;
7550 scalar_int_mode wanted_inner_mode
;
7551 scalar_int_mode wanted_inner_reg_mode
= word_mode
;
7552 scalar_int_mode pos_mode
= word_mode
;
7553 machine_mode extraction_mode
= word_mode
;
7555 rtx orig_pos_rtx
= pos_rtx
;
7556 HOST_WIDE_INT orig_pos
;
7558 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7559 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7561 if (GET_CODE (inner
) == SUBREG
7562 && subreg_lowpart_p (inner
)
7563 && (paradoxical_subreg_p (inner
)
7564 /* If trying or potentionally trying to extract
7565 bits outside of is_mode, don't look through
7566 non-paradoxical SUBREGs. See PR82192. */
7567 || (pos_rtx
== NULL_RTX
7568 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))))
7570 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7571 consider just the QI as the memory to extract from.
7572 The subreg adds or removes high bits; its mode is
7573 irrelevant to the meaning of this extraction,
7574 since POS and LEN count from the lsb. */
7575 if (MEM_P (SUBREG_REG (inner
)))
7576 is_mode
= GET_MODE (SUBREG_REG (inner
));
7577 inner
= SUBREG_REG (inner
);
7579 else if (GET_CODE (inner
) == ASHIFT
7580 && CONST_INT_P (XEXP (inner
, 1))
7581 && pos_rtx
== 0 && pos
== 0
7582 && len
> UINTVAL (XEXP (inner
, 1)))
7584 /* We're extracting the least significant bits of an rtx
7585 (ashift X (const_int C)), where LEN > C. Extract the
7586 least significant (LEN - C) bits of X, giving an rtx
7587 whose mode is MODE, then shift it left C times. */
7588 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7589 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7590 unsignedp
, in_dest
, in_compare
);
7592 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7594 else if (GET_CODE (inner
) == TRUNCATE
7595 /* If trying or potentionally trying to extract
7596 bits outside of is_mode, don't look through
7597 TRUNCATE. See PR82192. */
7598 && pos_rtx
== NULL_RTX
7599 && known_le (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7600 inner
= XEXP (inner
, 0);
7602 inner_mode
= GET_MODE (inner
);
7604 /* See if this can be done without an extraction. We never can if the
7605 width of the field is not the same as that of some integer mode. For
7606 registers, we can only avoid the extraction if the position is at the
7607 low-order bit and this is either not in the destination or we have the
7608 appropriate STRICT_LOW_PART operation available.
7610 For MEM, we can avoid an extract if the field starts on an appropriate
7611 boundary and we can change the mode of the memory reference. */
7613 scalar_int_mode tmode
;
7614 if (int_mode_for_size (len
, 1).exists (&tmode
)
7615 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7617 && (pos
== 0 || REG_P (inner
))
7618 && (inner_mode
== tmode
7620 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7621 || reg_truncated_to_mode (tmode
, inner
))
7624 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7625 || (MEM_P (inner
) && pos_rtx
== 0
7627 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7628 : BITS_PER_UNIT
)) == 0
7629 /* We can't do this if we are widening INNER_MODE (it
7630 may not be aligned, for one thing). */
7631 && !paradoxical_subreg_p (tmode
, inner_mode
)
7632 && (inner_mode
== tmode
7633 || (! mode_dependent_address_p (XEXP (inner
, 0),
7634 MEM_ADDR_SPACE (inner
))
7635 && ! MEM_VOLATILE_P (inner
))))))
7637 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7638 field. If the original and current mode are the same, we need not
7639 adjust the offset. Otherwise, we do if bytes big endian.
7641 If INNER is not a MEM, get a piece consisting of just the field
7642 of interest (in this case POS % BITS_PER_WORD must be 0). */
7648 /* POS counts from lsb, but make OFFSET count in memory order. */
7649 if (BYTES_BIG_ENDIAN
)
7650 offset
= bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode
)
7653 offset
= pos
/ BITS_PER_UNIT
;
7655 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7657 else if (REG_P (inner
))
7659 if (tmode
!= inner_mode
)
7661 /* We can't call gen_lowpart in a DEST since we
7662 always want a SUBREG (see below) and it would sometimes
7663 return a new hard register. */
7667 = subreg_offset_from_lsb (tmode
, inner_mode
, pos
);
7669 /* Avoid creating invalid subregs, for example when
7670 simplifying (x>>32)&255. */
7671 if (!validate_subreg (tmode
, inner_mode
, inner
, offset
))
7674 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, offset
);
7677 new_rtx
= gen_lowpart (tmode
, inner
);
7683 new_rtx
= force_to_mode (inner
, tmode
,
7684 len
>= HOST_BITS_PER_WIDE_INT
7686 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7688 /* If this extraction is going into the destination of a SET,
7689 make a STRICT_LOW_PART unless we made a MEM. */
7692 return (MEM_P (new_rtx
) ? new_rtx
7693 : (GET_CODE (new_rtx
) != SUBREG
7694 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7695 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7700 if (CONST_SCALAR_INT_P (new_rtx
))
7701 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7702 mode
, new_rtx
, tmode
);
7704 /* If we know that no extraneous bits are set, and that the high
7705 bit is not set, convert the extraction to the cheaper of
7706 sign and zero extension, that are equivalent in these cases. */
7707 if (flag_expensive_optimizations
7708 && (HWI_COMPUTABLE_MODE_P (tmode
)
7709 && ((nonzero_bits (new_rtx
, tmode
)
7710 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7713 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7714 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7716 /* Prefer ZERO_EXTENSION, since it gives more information to
7718 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7719 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7724 /* Otherwise, sign- or zero-extend unless we already are in the
7727 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7731 /* Unless this is a COMPARE or we have a funny memory reference,
7732 don't do anything with zero-extending field extracts starting at
7733 the low-order bit since they are simple AND operations. */
7734 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7735 && ! in_compare
&& unsignedp
)
7738 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7739 if the position is not a constant and the length is not 1. In all
7740 other cases, we would only be going outside our object in cases when
7741 an original shift would have been undefined. */
7743 && ((pos_rtx
== 0 && maybe_gt (pos
+ len
, GET_MODE_PRECISION (is_mode
)))
7744 || (pos_rtx
!= 0 && len
!= 1)))
7747 enum extraction_pattern pattern
= (in_dest
? EP_insv
7748 : unsignedp
? EP_extzv
: EP_extv
);
7750 /* If INNER is not from memory, we want it to have the mode of a register
7751 extraction pattern's structure operand, or word_mode if there is no
7752 such pattern. The same applies to extraction_mode and pos_mode
7753 and their respective operands.
7755 For memory, assume that the desired extraction_mode and pos_mode
7756 are the same as for a register operation, since at present we don't
7757 have named patterns for aligned memory structures. */
7758 struct extraction_insn insn
;
7759 unsigned int inner_size
;
7760 if (GET_MODE_BITSIZE (inner_mode
).is_constant (&inner_size
)
7761 && get_best_reg_extraction_insn (&insn
, pattern
, inner_size
, mode
))
7763 wanted_inner_reg_mode
= insn
.struct_mode
.require ();
7764 pos_mode
= insn
.pos_mode
;
7765 extraction_mode
= insn
.field_mode
;
7768 /* Never narrow an object, since that might not be safe. */
7770 if (mode
!= VOIDmode
7771 && partial_subreg_p (extraction_mode
, mode
))
7772 extraction_mode
= mode
;
7775 wanted_inner_mode
= wanted_inner_reg_mode
;
7778 /* Be careful not to go beyond the extracted object and maintain the
7779 natural alignment of the memory. */
7780 wanted_inner_mode
= smallest_int_mode_for_size (len
);
7781 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7782 > GET_MODE_BITSIZE (wanted_inner_mode
))
7783 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
).require ();
7788 if (BITS_BIG_ENDIAN
)
7790 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7791 BITS_BIG_ENDIAN style. If position is constant, compute new
7792 position. Otherwise, build subtraction.
7793 Note that POS is relative to the mode of the original argument.
7794 If it's a MEM we need to recompute POS relative to that.
7795 However, if we're extracting from (or inserting into) a register,
7796 we want to recompute POS relative to wanted_inner_mode. */
7799 width
= GET_MODE_BITSIZE (wanted_inner_mode
);
7800 else if (!GET_MODE_BITSIZE (is_mode
).is_constant (&width
))
7804 pos
= width
- len
- pos
;
7807 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7808 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7810 /* POS may be less than 0 now, but we check for that below.
7811 Note that it can only be less than 0 if !MEM_P (inner). */
7814 /* If INNER has a wider mode, and this is a constant extraction, try to
7815 make it smaller and adjust the byte to point to the byte containing
7817 if (wanted_inner_mode
!= VOIDmode
7818 && inner_mode
!= wanted_inner_mode
7820 && partial_subreg_p (wanted_inner_mode
, is_mode
)
7822 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7823 && ! MEM_VOLATILE_P (inner
))
7825 poly_int64 offset
= 0;
7827 /* The computations below will be correct if the machine is big
7828 endian in both bits and bytes or little endian in bits and bytes.
7829 If it is mixed, we must adjust. */
7831 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7832 adjust OFFSET to compensate. */
7833 if (BYTES_BIG_ENDIAN
7834 && paradoxical_subreg_p (is_mode
, inner_mode
))
7835 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7837 /* We can now move to the desired byte. */
7838 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7839 * GET_MODE_SIZE (wanted_inner_mode
);
7840 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7842 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7843 && is_mode
!= wanted_inner_mode
)
7844 offset
= (GET_MODE_SIZE (is_mode
)
7845 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7847 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7850 /* If INNER is not memory, get it into the proper mode. If we are changing
7851 its mode, POS must be a constant and smaller than the size of the new
7853 else if (!MEM_P (inner
))
7855 /* On the LHS, don't create paradoxical subregs implicitely truncating
7856 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7858 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7862 if (GET_MODE (inner
) != wanted_inner_mode
7864 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7870 inner
= force_to_mode (inner
, wanted_inner_mode
,
7872 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7874 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7879 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7880 have to zero extend. Otherwise, we can just use a SUBREG.
7882 We dealt with constant rtxes earlier, so pos_rtx cannot
7883 have VOIDmode at this point. */
7885 && (GET_MODE_SIZE (pos_mode
)
7886 > GET_MODE_SIZE (as_a
<scalar_int_mode
> (GET_MODE (pos_rtx
)))))
7888 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7889 GET_MODE (pos_rtx
));
7891 /* If we know that no extraneous bits are set, and that the high
7892 bit is not set, convert extraction to cheaper one - either
7893 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7895 if (flag_expensive_optimizations
7896 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7897 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7898 & ~(((unsigned HOST_WIDE_INT
)
7899 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7903 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7904 GET_MODE (pos_rtx
));
7906 /* Prefer ZERO_EXTENSION, since it gives more information to
7908 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7909 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7915 /* Make POS_RTX unless we already have it and it is correct. If we don't
7916 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7918 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7919 pos_rtx
= orig_pos_rtx
;
7921 else if (pos_rtx
== 0)
7922 pos_rtx
= GEN_INT (pos
);
7924 /* Make the required operation. See if we can use existing rtx. */
7925 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7926 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7928 new_rtx
= gen_lowpart (mode
, new_rtx
);
7933 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7934 can be commuted with any other operations in X. Return X without
7935 that shift if so. */
7938 extract_left_shift (scalar_int_mode mode
, rtx x
, int count
)
7940 enum rtx_code code
= GET_CODE (x
);
7946 /* This is the shift itself. If it is wide enough, we will return
7947 either the value being shifted if the shift count is equal to
7948 COUNT or a shift for the difference. */
7949 if (CONST_INT_P (XEXP (x
, 1))
7950 && INTVAL (XEXP (x
, 1)) >= count
)
7951 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7952 INTVAL (XEXP (x
, 1)) - count
);
7956 if ((tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7957 return simplify_gen_unary (code
, mode
, tem
, mode
);
7961 case PLUS
: case IOR
: case XOR
: case AND
:
7962 /* If we can safely shift this constant and we find the inner shift,
7963 make a new operation. */
7964 if (CONST_INT_P (XEXP (x
, 1))
7965 && (UINTVAL (XEXP (x
, 1))
7966 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7967 && (tem
= extract_left_shift (mode
, XEXP (x
, 0), count
)) != 0)
7969 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7970 return simplify_gen_binary (code
, mode
, tem
,
7971 gen_int_mode (val
, mode
));
7982 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7983 level of the expression and MODE is its mode. IN_CODE is as for
7984 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7985 that should be used when recursing on operands of *X_PTR.
7987 There are two possible actions:
7989 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7990 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7992 - Return a new rtx, which the caller returns directly. */
7995 make_compound_operation_int (scalar_int_mode mode
, rtx
*x_ptr
,
7996 enum rtx_code in_code
,
7997 enum rtx_code
*next_code_ptr
)
8000 enum rtx_code next_code
= *next_code_ptr
;
8001 enum rtx_code code
= GET_CODE (x
);
8002 int mode_width
= GET_MODE_PRECISION (mode
);
8007 scalar_int_mode inner_mode
;
8008 bool equality_comparison
= false;
8012 equality_comparison
= true;
8016 /* Process depending on the code of this operation. If NEW is set
8017 nonzero, it will be returned. */
8022 /* Convert shifts by constants into multiplications if inside
8024 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
8025 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8026 && INTVAL (XEXP (x
, 1)) >= 0)
8028 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
8029 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
8031 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8032 if (GET_CODE (new_rtx
) == NEG
)
8034 new_rtx
= XEXP (new_rtx
, 0);
8037 multval
= trunc_int_for_mode (multval
, mode
);
8038 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
8045 lhs
= make_compound_operation (lhs
, next_code
);
8046 rhs
= make_compound_operation (rhs
, next_code
);
8047 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
8049 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
8051 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8053 else if (GET_CODE (lhs
) == MULT
8054 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
8056 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
8057 simplify_gen_unary (NEG
, mode
,
8060 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
8064 SUBST (XEXP (x
, 0), lhs
);
8065 SUBST (XEXP (x
, 1), rhs
);
8067 maybe_swap_commutative_operands (x
);
8073 lhs
= make_compound_operation (lhs
, next_code
);
8074 rhs
= make_compound_operation (rhs
, next_code
);
8075 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
8077 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
8079 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8081 else if (GET_CODE (rhs
) == MULT
8082 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
8084 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
8085 simplify_gen_unary (NEG
, mode
,
8088 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
8092 SUBST (XEXP (x
, 0), lhs
);
8093 SUBST (XEXP (x
, 1), rhs
);
8098 /* If the second operand is not a constant, we can't do anything
8100 if (!CONST_INT_P (XEXP (x
, 1)))
8103 /* If the constant is a power of two minus one and the first operand
8104 is a logical right shift, make an extraction. */
8105 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8106 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8108 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8109 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1),
8110 i
, 1, 0, in_code
== COMPARE
);
8113 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8114 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
8115 && subreg_lowpart_p (XEXP (x
, 0))
8116 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (XEXP (x
, 0))),
8118 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
8119 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8121 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
8122 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
8123 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
8125 i
, 1, 0, in_code
== COMPARE
);
8127 /* If we narrowed the mode when dropping the subreg, then we lose. */
8128 if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
))
8131 /* If that didn't give anything, see if the AND simplifies on
8133 if (!new_rtx
&& i
>= 0)
8135 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8136 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
8137 0, in_code
== COMPARE
);
8140 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8141 else if ((GET_CODE (XEXP (x
, 0)) == XOR
8142 || GET_CODE (XEXP (x
, 0)) == IOR
)
8143 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
8144 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
8145 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8147 /* Apply the distributive law, and then try to make extractions. */
8148 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
8149 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
8151 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
8153 new_rtx
= make_compound_operation (new_rtx
, in_code
);
8156 /* If we are have (and (rotate X C) M) and C is larger than the number
8157 of bits in M, this is an extraction. */
8159 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
8160 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8161 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
8162 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
8164 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
8165 new_rtx
= make_extraction (mode
, new_rtx
,
8166 (GET_MODE_PRECISION (mode
)
8167 - INTVAL (XEXP (XEXP (x
, 0), 1))),
8168 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8171 /* On machines without logical shifts, if the operand of the AND is
8172 a logical shift and our mask turns off all the propagated sign
8173 bits, we can replace the logical shift with an arithmetic shift. */
8174 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8175 && !have_insn_for (LSHIFTRT
, mode
)
8176 && have_insn_for (ASHIFTRT
, mode
)
8177 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8178 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8179 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8180 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
8182 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
8184 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
8185 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
8187 gen_rtx_ASHIFTRT (mode
,
8188 make_compound_operation (XEXP (XEXP (x
,
8192 XEXP (XEXP (x
, 0), 1)));
8195 /* If the constant is one less than a power of two, this might be
8196 representable by an extraction even if no shift is present.
8197 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8198 we are in a COMPARE. */
8199 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8200 new_rtx
= make_extraction (mode
,
8201 make_compound_operation (XEXP (x
, 0),
8203 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8205 /* If we are in a comparison and this is an AND with a power of two,
8206 convert this into the appropriate bit extract. */
8207 else if (in_code
== COMPARE
8208 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8209 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8210 new_rtx
= make_extraction (mode
,
8211 make_compound_operation (XEXP (x
, 0),
8213 i
, NULL_RTX
, 1, 1, 0, 1);
8215 /* If the one operand is a paradoxical subreg of a register or memory and
8216 the constant (limited to the smaller mode) has only zero bits where
8217 the sub expression has known zero bits, this can be expressed as
8219 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8223 sub
= XEXP (XEXP (x
, 0), 0);
8224 machine_mode sub_mode
= GET_MODE (sub
);
8226 if ((REG_P (sub
) || MEM_P (sub
))
8227 && GET_MODE_PRECISION (sub_mode
).is_constant (&sub_width
)
8228 && sub_width
< mode_width
)
8230 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8231 unsigned HOST_WIDE_INT mask
;
8233 /* original AND constant with all the known zero bits set */
8234 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8235 if ((mask
& mode_mask
) == mode_mask
)
8237 new_rtx
= make_compound_operation (sub
, next_code
);
8238 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0, sub_width
,
8239 1, 0, in_code
== COMPARE
);
8247 /* If the sign bit is known to be zero, replace this with an
8248 arithmetic shift. */
8249 if (have_insn_for (ASHIFTRT
, mode
)
8250 && ! have_insn_for (LSHIFTRT
, mode
)
8251 && mode_width
<= HOST_BITS_PER_WIDE_INT
8252 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8254 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8255 make_compound_operation (XEXP (x
, 0),
8267 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8268 this is a SIGN_EXTRACT. */
8269 if (CONST_INT_P (rhs
)
8270 && GET_CODE (lhs
) == ASHIFT
8271 && CONST_INT_P (XEXP (lhs
, 1))
8272 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8273 && INTVAL (XEXP (lhs
, 1)) >= 0
8274 && INTVAL (rhs
) < mode_width
)
8276 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8277 new_rtx
= make_extraction (mode
, new_rtx
,
8278 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8279 NULL_RTX
, mode_width
- INTVAL (rhs
),
8280 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8284 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8285 If so, try to merge the shifts into a SIGN_EXTEND. We could
8286 also do this for some cases of SIGN_EXTRACT, but it doesn't
8287 seem worth the effort; the case checked for occurs on Alpha. */
8290 && ! (GET_CODE (lhs
) == SUBREG
8291 && (OBJECT_P (SUBREG_REG (lhs
))))
8292 && CONST_INT_P (rhs
)
8293 && INTVAL (rhs
) >= 0
8294 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8295 && INTVAL (rhs
) < mode_width
8296 && (new_rtx
= extract_left_shift (mode
, lhs
, INTVAL (rhs
))) != 0)
8297 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
,
8299 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8300 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8305 /* Call ourselves recursively on the inner expression. If we are
8306 narrowing the object and it has a different RTL code from
8307 what it originally did, do this SUBREG as a force_to_mode. */
8309 rtx inner
= SUBREG_REG (x
), simplified
;
8310 enum rtx_code subreg_code
= in_code
;
8312 /* If the SUBREG is masking of a logical right shift,
8313 make an extraction. */
8314 if (GET_CODE (inner
) == LSHIFTRT
8315 && is_a
<scalar_int_mode
> (GET_MODE (inner
), &inner_mode
)
8316 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (inner_mode
)
8317 && CONST_INT_P (XEXP (inner
, 1))
8318 && UINTVAL (XEXP (inner
, 1)) < GET_MODE_PRECISION (inner_mode
)
8319 && subreg_lowpart_p (x
))
8321 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8322 int width
= GET_MODE_PRECISION (inner_mode
)
8323 - INTVAL (XEXP (inner
, 1));
8324 if (width
> mode_width
)
8326 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8327 width
, 1, 0, in_code
== COMPARE
);
8331 /* If in_code is COMPARE, it isn't always safe to pass it through
8332 to the recursive make_compound_operation call. */
8333 if (subreg_code
== COMPARE
8334 && (!subreg_lowpart_p (x
)
8335 || GET_CODE (inner
) == SUBREG
8336 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8337 is (const_int 0), rather than
8338 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8339 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8340 for non-equality comparisons against 0 is not equivalent
8341 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8342 || (GET_CODE (inner
) == AND
8343 && CONST_INT_P (XEXP (inner
, 1))
8344 && partial_subreg_p (x
)
8345 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8346 >= GET_MODE_BITSIZE (mode
) - 1)))
8349 tem
= make_compound_operation (inner
, subreg_code
);
8352 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8356 if (GET_CODE (tem
) != GET_CODE (inner
)
8357 && partial_subreg_p (x
)
8358 && subreg_lowpart_p (x
))
8361 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8363 /* If we have something other than a SUBREG, we might have
8364 done an expansion, so rerun ourselves. */
8365 if (GET_CODE (newer
) != SUBREG
)
8366 newer
= make_compound_operation (newer
, in_code
);
8368 /* force_to_mode can expand compounds. If it just re-expanded
8369 the compound, use gen_lowpart to convert to the desired
8371 if (rtx_equal_p (newer
, x
)
8372 /* Likewise if it re-expanded the compound only partially.
8373 This happens for SUBREG of ZERO_EXTRACT if they extract
8374 the same number of bits. */
8375 || (GET_CODE (newer
) == SUBREG
8376 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8377 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8378 && GET_CODE (inner
) == AND
8379 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8380 return gen_lowpart (GET_MODE (x
), tem
);
8395 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8396 *next_code_ptr
= next_code
;
8400 /* Look at the expression rooted at X. Look for expressions
8401 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8402 Form these expressions.
8404 Return the new rtx, usually just X.
8406 Also, for machines like the VAX that don't have logical shift insns,
8407 try to convert logical to arithmetic shift operations in cases where
8408 they are equivalent. This undoes the canonicalizations to logical
8409 shifts done elsewhere.
8411 We try, as much as possible, to re-use rtl expressions to save memory.
8413 IN_CODE says what kind of expression we are processing. Normally, it is
8414 SET. In a memory address it is MEM. When processing the arguments of
8415 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8416 precisely it is an equality comparison against zero. */
8419 make_compound_operation (rtx x
, enum rtx_code in_code
)
8421 enum rtx_code code
= GET_CODE (x
);
8424 enum rtx_code next_code
;
8427 /* Select the code to be used in recursive calls. Once we are inside an
8428 address, we stay there. If we have a comparison, set to COMPARE,
8429 but once inside, go back to our default of SET. */
8431 next_code
= (code
== MEM
? MEM
8432 : ((code
== COMPARE
|| COMPARISON_P (x
))
8433 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8434 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8436 scalar_int_mode mode
;
8437 if (is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
8439 rtx new_rtx
= make_compound_operation_int (mode
, &x
, in_code
,
8443 code
= GET_CODE (x
);
8446 /* Now recursively process each operand of this operation. We need to
8447 handle ZERO_EXTEND specially so that we don't lose track of the
8449 if (code
== ZERO_EXTEND
)
8451 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8452 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8453 new_rtx
, GET_MODE (XEXP (x
, 0)));
8456 SUBST (XEXP (x
, 0), new_rtx
);
8460 fmt
= GET_RTX_FORMAT (code
);
8461 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8464 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8465 SUBST (XEXP (x
, i
), new_rtx
);
8467 else if (fmt
[i
] == 'E')
8468 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8470 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8471 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8474 maybe_swap_commutative_operands (x
);
8478 /* Given M see if it is a value that would select a field of bits
8479 within an item, but not the entire word. Return -1 if not.
8480 Otherwise, return the starting position of the field, where 0 is the
8483 *PLEN is set to the length of the field. */
8486 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8488 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8489 int pos
= m
? ctz_hwi (m
) : -1;
8493 /* Now shift off the low-order zero bits and see if we have a
8494 power of two minus 1. */
8495 len
= exact_log2 ((m
>> pos
) + 1);
8504 /* If X refers to a register that equals REG in value, replace these
8505 references with REG. */
8507 canon_reg_for_combine (rtx x
, rtx reg
)
8514 enum rtx_code code
= GET_CODE (x
);
8515 switch (GET_RTX_CLASS (code
))
8518 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8519 if (op0
!= XEXP (x
, 0))
8520 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8525 case RTX_COMM_ARITH
:
8526 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8527 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8528 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8529 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8533 case RTX_COMM_COMPARE
:
8534 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8535 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8536 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8537 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8538 GET_MODE (op0
), op0
, op1
);
8542 case RTX_BITFIELD_OPS
:
8543 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8544 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8545 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8546 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8547 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8548 GET_MODE (op0
), op0
, op1
, op2
);
8554 if (rtx_equal_p (get_last_value (reg
), x
)
8555 || rtx_equal_p (reg
, get_last_value (x
)))
8564 fmt
= GET_RTX_FORMAT (code
);
8566 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8569 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8570 if (op
!= XEXP (x
, i
))
8580 else if (fmt
[i
] == 'E')
8583 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8585 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8586 if (op
!= XVECEXP (x
, i
, j
))
8593 XVECEXP (x
, i
, j
) = op
;
8604 /* Return X converted to MODE. If the value is already truncated to
8605 MODE we can just return a subreg even though in the general case we
8606 would need an explicit truncation. */
8609 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8611 if (!CONST_INT_P (x
)
8612 && partial_subreg_p (mode
, GET_MODE (x
))
8613 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8614 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8616 /* Bit-cast X into an integer mode. */
8617 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8618 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)).require (), x
);
8619 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
).require (),
8623 return gen_lowpart (mode
, x
);
8626 /* See if X can be simplified knowing that we will only refer to it in
8627 MODE and will only refer to those bits that are nonzero in MASK.
8628 If other bits are being computed or if masking operations are done
8629 that select a superset of the bits in MASK, they can sometimes be
8632 Return a possibly simplified expression, but always convert X to
8633 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8635 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8636 are all off in X. This is used when X will be complemented, by either
8637 NOT, NEG, or XOR. */
8640 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8643 enum rtx_code code
= GET_CODE (x
);
8644 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8645 machine_mode op_mode
;
8646 unsigned HOST_WIDE_INT nonzero
;
8648 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8649 code below will do the wrong thing since the mode of such an
8650 expression is VOIDmode.
8652 Also do nothing if X is a CLOBBER; this can happen if X was
8653 the return value from a call to gen_lowpart. */
8654 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8657 /* We want to perform the operation in its present mode unless we know
8658 that the operation is valid in MODE, in which case we do the operation
8660 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8661 && have_insn_for (code
, mode
))
8662 ? mode
: GET_MODE (x
));
8664 /* It is not valid to do a right-shift in a narrower mode
8665 than the one it came in with. */
8666 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8667 && partial_subreg_p (mode
, GET_MODE (x
)))
8668 op_mode
= GET_MODE (x
);
8670 /* Truncate MASK to fit OP_MODE. */
8672 mask
&= GET_MODE_MASK (op_mode
);
8674 /* Determine what bits of X are guaranteed to be (non)zero. */
8675 nonzero
= nonzero_bits (x
, mode
);
8677 /* If none of the bits in X are needed, return a zero. */
8678 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8681 /* If X is a CONST_INT, return a new one. Do this here since the
8682 test below will fail. */
8683 if (CONST_INT_P (x
))
8685 if (SCALAR_INT_MODE_P (mode
))
8686 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8689 x
= GEN_INT (INTVAL (x
) & mask
);
8690 return gen_lowpart_common (mode
, x
);
8694 /* If X is narrower than MODE and we want all the bits in X's mode, just
8695 get X in the proper mode. */
8696 if (paradoxical_subreg_p (mode
, GET_MODE (x
))
8697 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8698 return gen_lowpart (mode
, x
);
8700 /* We can ignore the effect of a SUBREG if it narrows the mode or
8701 if the constant masks to zero all the bits the mode doesn't have. */
8702 if (GET_CODE (x
) == SUBREG
8703 && subreg_lowpart_p (x
)
8704 && (partial_subreg_p (x
)
8706 & GET_MODE_MASK (GET_MODE (x
))
8707 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))) == 0))
8708 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8710 scalar_int_mode int_mode
, xmode
;
8711 if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
8712 && is_a
<scalar_int_mode
> (GET_MODE (x
), &xmode
))
8713 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8715 return force_int_to_mode (x
, int_mode
, xmode
,
8716 as_a
<scalar_int_mode
> (op_mode
),
8719 return gen_lowpart_or_truncate (mode
, x
);
8722 /* Subroutine of force_to_mode that handles cases in which both X and
8723 the result are scalar integers. MODE is the mode of the result,
8724 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8725 is preferred for simplified versions of X. The other arguments
8726 are as for force_to_mode. */
8729 force_int_to_mode (rtx x
, scalar_int_mode mode
, scalar_int_mode xmode
,
8730 scalar_int_mode op_mode
, unsigned HOST_WIDE_INT mask
,
8733 enum rtx_code code
= GET_CODE (x
);
8734 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8735 unsigned HOST_WIDE_INT fuller_mask
;
8737 poly_int64 const_op0
;
8739 /* When we have an arithmetic operation, or a shift whose count we
8740 do not know, we need to assume that all bits up to the highest-order
8741 bit in MASK will be needed. This is how we form such a mask. */
8742 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8743 fuller_mask
= HOST_WIDE_INT_M1U
;
8745 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8751 /* If X is a (clobber (const_int)), return it since we know we are
8752 generating something that won't match. */
8759 x
= expand_compound_operation (x
);
8760 if (GET_CODE (x
) != code
)
8761 return force_to_mode (x
, mode
, mask
, next_select
);
8765 /* Similarly for a truncate. */
8766 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8769 /* If this is an AND with a constant, convert it into an AND
8770 whose constant is the AND of that constant with MASK. If it
8771 remains an AND of MASK, delete it since it is redundant. */
8773 if (CONST_INT_P (XEXP (x
, 1)))
8775 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8776 mask
& INTVAL (XEXP (x
, 1)));
8779 /* If X is still an AND, see if it is an AND with a mask that
8780 is just some low-order bits. If so, and it is MASK, we don't
8783 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8784 && (INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (xmode
)) == mask
)
8787 /* If it remains an AND, try making another AND with the bits
8788 in the mode mask that aren't in MASK turned on. If the
8789 constant in the AND is wide enough, this might make a
8790 cheaper constant. */
8792 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8793 && GET_MODE_MASK (xmode
) != mask
8794 && HWI_COMPUTABLE_MODE_P (xmode
))
8796 unsigned HOST_WIDE_INT cval
8797 = UINTVAL (XEXP (x
, 1)) | (GET_MODE_MASK (xmode
) & ~mask
);
8800 y
= simplify_gen_binary (AND
, xmode
, XEXP (x
, 0),
8801 gen_int_mode (cval
, xmode
));
8802 if (set_src_cost (y
, xmode
, optimize_this_for_speed_p
)
8803 < set_src_cost (x
, xmode
, optimize_this_for_speed_p
))
8813 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8814 low-order bits (as in an alignment operation) and FOO is already
8815 aligned to that boundary, mask C1 to that boundary as well.
8816 This may eliminate that PLUS and, later, the AND. */
8819 unsigned int width
= GET_MODE_PRECISION (mode
);
8820 unsigned HOST_WIDE_INT smask
= mask
;
8822 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8823 number, sign extend it. */
8825 if (width
< HOST_BITS_PER_WIDE_INT
8826 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8827 smask
|= HOST_WIDE_INT_M1U
<< width
;
8829 if (CONST_INT_P (XEXP (x
, 1))
8830 && pow2p_hwi (- smask
)
8831 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8832 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8833 return force_to_mode (plus_constant (xmode
, XEXP (x
, 0),
8834 (INTVAL (XEXP (x
, 1)) & smask
)),
8835 mode
, smask
, next_select
);
8841 /* Substituting into the operands of a widening MULT is not likely to
8842 create RTL matching a machine insn. */
8844 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8845 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8846 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8847 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8848 && REG_P (XEXP (XEXP (x
, 0), 0))
8849 && REG_P (XEXP (XEXP (x
, 1), 0)))
8850 return gen_lowpart_or_truncate (mode
, x
);
8852 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8853 most significant bit in MASK since carries from those bits will
8854 affect the bits we are interested in. */
8859 /* If X is (minus C Y) where C's least set bit is larger than any bit
8860 in the mask, then we may replace with (neg Y). */
8861 if (poly_int_rtx_p (XEXP (x
, 0), &const_op0
)
8862 && (unsigned HOST_WIDE_INT
) known_alignment (const_op0
) > mask
)
8864 x
= simplify_gen_unary (NEG
, xmode
, XEXP (x
, 1), xmode
);
8865 return force_to_mode (x
, mode
, mask
, next_select
);
8868 /* Similarly, if C contains every bit in the fuller_mask, then we may
8869 replace with (not Y). */
8870 if (CONST_INT_P (XEXP (x
, 0))
8871 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8873 x
= simplify_gen_unary (NOT
, xmode
, XEXP (x
, 1), xmode
);
8874 return force_to_mode (x
, mode
, mask
, next_select
);
8882 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8883 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8884 operation which may be a bitfield extraction. Ensure that the
8885 constant we form is not wider than the mode of X. */
8887 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8888 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8889 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8890 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8891 && CONST_INT_P (XEXP (x
, 1))
8892 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8893 + floor_log2 (INTVAL (XEXP (x
, 1))))
8894 < GET_MODE_PRECISION (xmode
))
8895 && (UINTVAL (XEXP (x
, 1))
8896 & ~nonzero_bits (XEXP (x
, 0), xmode
)) == 0)
8898 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8899 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8901 temp
= simplify_gen_binary (GET_CODE (x
), xmode
,
8902 XEXP (XEXP (x
, 0), 0), temp
);
8903 x
= simplify_gen_binary (LSHIFTRT
, xmode
, temp
,
8904 XEXP (XEXP (x
, 0), 1));
8905 return force_to_mode (x
, mode
, mask
, next_select
);
8909 /* For most binary operations, just propagate into the operation and
8910 change the mode if we have an operation of that mode. */
8912 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8913 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8915 /* If we ended up truncating both operands, truncate the result of the
8916 operation instead. */
8917 if (GET_CODE (op0
) == TRUNCATE
8918 && GET_CODE (op1
) == TRUNCATE
)
8920 op0
= XEXP (op0
, 0);
8921 op1
= XEXP (op1
, 0);
8924 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8925 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8927 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8929 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8935 /* For left shifts, do the same, but just for the first operand.
8936 However, we cannot do anything with shifts where we cannot
8937 guarantee that the counts are smaller than the size of the mode
8938 because such a count will have a different meaning in a
8941 if (! (CONST_INT_P (XEXP (x
, 1))
8942 && INTVAL (XEXP (x
, 1)) >= 0
8943 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8944 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8945 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8946 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8949 /* If the shift count is a constant and we can do arithmetic in
8950 the mode of the shift, refine which bits we need. Otherwise, use the
8951 conservative form of the mask. */
8952 if (CONST_INT_P (XEXP (x
, 1))
8953 && INTVAL (XEXP (x
, 1)) >= 0
8954 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8955 && HWI_COMPUTABLE_MODE_P (op_mode
))
8956 mask
>>= INTVAL (XEXP (x
, 1));
8960 op0
= gen_lowpart_or_truncate (op_mode
,
8961 force_to_mode (XEXP (x
, 0), mode
,
8962 mask
, next_select
));
8964 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
8966 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8972 /* Here we can only do something if the shift count is a constant,
8973 this shift constant is valid for the host, and we can do arithmetic
8976 if (CONST_INT_P (XEXP (x
, 1))
8977 && INTVAL (XEXP (x
, 1)) >= 0
8978 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8979 && HWI_COMPUTABLE_MODE_P (op_mode
))
8981 rtx inner
= XEXP (x
, 0);
8982 unsigned HOST_WIDE_INT inner_mask
;
8984 /* Select the mask of the bits we need for the shift operand. */
8985 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8987 /* We can only change the mode of the shift if we can do arithmetic
8988 in the mode of the shift and INNER_MASK is no wider than the
8989 width of X's mode. */
8990 if ((inner_mask
& ~GET_MODE_MASK (xmode
)) != 0)
8993 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8995 if (xmode
!= op_mode
|| inner
!= XEXP (x
, 0))
8997 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
9002 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9003 shift and AND produces only copies of the sign bit (C2 is one less
9004 than a power of two), we can do this with just a shift. */
9006 if (GET_CODE (x
) == LSHIFTRT
9007 && CONST_INT_P (XEXP (x
, 1))
9008 /* The shift puts one of the sign bit copies in the least significant
9010 && ((INTVAL (XEXP (x
, 1))
9011 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
9012 >= GET_MODE_PRECISION (xmode
))
9013 && pow2p_hwi (mask
+ 1)
9014 /* Number of bits left after the shift must be more than the mask
9016 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
9017 <= GET_MODE_PRECISION (xmode
))
9018 /* Must be more sign bit copies than the mask needs. */
9019 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
9020 >= exact_log2 (mask
+ 1)))
9022 int nbits
= GET_MODE_PRECISION (xmode
) - exact_log2 (mask
+ 1);
9023 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0),
9024 gen_int_shift_amount (xmode
, nbits
));
9029 /* If we are just looking for the sign bit, we don't need this shift at
9030 all, even if it has a variable count. */
9031 if (val_signbit_p (xmode
, mask
))
9032 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9034 /* If this is a shift by a constant, get a mask that contains those bits
9035 that are not copies of the sign bit. We then have two cases: If
9036 MASK only includes those bits, this can be a logical shift, which may
9037 allow simplifications. If MASK is a single-bit field not within
9038 those bits, we are requesting a copy of the sign bit and hence can
9039 shift the sign bit to the appropriate location. */
9041 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
9042 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
9044 unsigned HOST_WIDE_INT nonzero
;
9047 /* If the considered data is wider than HOST_WIDE_INT, we can't
9048 represent a mask for all its bits in a single scalar.
9049 But we only care about the lower bits, so calculate these. */
9051 if (GET_MODE_PRECISION (xmode
) > HOST_BITS_PER_WIDE_INT
)
9053 nonzero
= HOST_WIDE_INT_M1U
;
9055 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9056 is the number of bits a full-width mask would have set.
9057 We need only shift if these are fewer than nonzero can
9058 hold. If not, we must keep all bits set in nonzero. */
9060 if (GET_MODE_PRECISION (xmode
) - INTVAL (XEXP (x
, 1))
9061 < HOST_BITS_PER_WIDE_INT
)
9062 nonzero
>>= INTVAL (XEXP (x
, 1))
9063 + HOST_BITS_PER_WIDE_INT
9064 - GET_MODE_PRECISION (xmode
);
9068 nonzero
= GET_MODE_MASK (xmode
);
9069 nonzero
>>= INTVAL (XEXP (x
, 1));
9072 if ((mask
& ~nonzero
) == 0)
9074 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, xmode
,
9075 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
9076 if (GET_CODE (x
) != ASHIFTRT
)
9077 return force_to_mode (x
, mode
, mask
, next_select
);
9080 else if ((i
= exact_log2 (mask
)) >= 0)
9082 x
= simplify_shift_const
9083 (NULL_RTX
, LSHIFTRT
, xmode
, XEXP (x
, 0),
9084 GET_MODE_PRECISION (xmode
) - 1 - i
);
9086 if (GET_CODE (x
) != ASHIFTRT
)
9087 return force_to_mode (x
, mode
, mask
, next_select
);
9091 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9092 even if the shift count isn't a constant. */
9094 x
= simplify_gen_binary (LSHIFTRT
, xmode
, XEXP (x
, 0), XEXP (x
, 1));
9098 /* If this is a zero- or sign-extension operation that just affects bits
9099 we don't care about, remove it. Be sure the call above returned
9100 something that is still a shift. */
9102 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
9103 && CONST_INT_P (XEXP (x
, 1))
9104 && INTVAL (XEXP (x
, 1)) >= 0
9105 && (INTVAL (XEXP (x
, 1))
9106 <= GET_MODE_PRECISION (xmode
) - (floor_log2 (mask
) + 1))
9107 && GET_CODE (XEXP (x
, 0)) == ASHIFT
9108 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
9109 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
9116 /* If the shift count is constant and we can do computations
9117 in the mode of X, compute where the bits we care about are.
9118 Otherwise, we can't do anything. Don't change the mode of
9119 the shift or propagate MODE into the shift, though. */
9120 if (CONST_INT_P (XEXP (x
, 1))
9121 && INTVAL (XEXP (x
, 1)) >= 0)
9123 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
9124 xmode
, gen_int_mode (mask
, xmode
),
9126 if (temp
&& CONST_INT_P (temp
))
9127 x
= simplify_gen_binary (code
, xmode
,
9128 force_to_mode (XEXP (x
, 0), xmode
,
9129 INTVAL (temp
), next_select
),
9135 /* If we just want the low-order bit, the NEG isn't needed since it
9136 won't change the low-order bit. */
9138 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
9140 /* We need any bits less significant than the most significant bit in
9141 MASK since carries from those bits will affect the bits we are
9147 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9148 same as the XOR case above. Ensure that the constant we form is not
9149 wider than the mode of X. */
9151 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
9152 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
9153 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
9154 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
9155 < GET_MODE_PRECISION (xmode
))
9156 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
9158 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)), xmode
);
9159 temp
= simplify_gen_binary (XOR
, xmode
, XEXP (XEXP (x
, 0), 0), temp
);
9160 x
= simplify_gen_binary (LSHIFTRT
, xmode
,
9161 temp
, XEXP (XEXP (x
, 0), 1));
9163 return force_to_mode (x
, mode
, mask
, next_select
);
9166 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9167 use the full mask inside the NOT. */
9171 op0
= gen_lowpart_or_truncate (op_mode
,
9172 force_to_mode (XEXP (x
, 0), mode
, mask
,
9174 if (op_mode
!= xmode
|| op0
!= XEXP (x
, 0))
9176 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
9182 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9183 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9184 which is equal to STORE_FLAG_VALUE. */
9185 if ((mask
& ~STORE_FLAG_VALUE
) == 0
9186 && XEXP (x
, 1) == const0_rtx
9187 && GET_MODE (XEXP (x
, 0)) == mode
9188 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
9189 && (nonzero_bits (XEXP (x
, 0), mode
)
9190 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
9191 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
9196 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9197 written in a narrower mode. We play it safe and do not do so. */
9199 op0
= gen_lowpart_or_truncate (xmode
,
9200 force_to_mode (XEXP (x
, 1), mode
,
9201 mask
, next_select
));
9202 op1
= gen_lowpart_or_truncate (xmode
,
9203 force_to_mode (XEXP (x
, 2), mode
,
9204 mask
, next_select
));
9205 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
9206 x
= simplify_gen_ternary (IF_THEN_ELSE
, xmode
,
9207 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
9215 /* Ensure we return a value of the proper mode. */
9216 return gen_lowpart_or_truncate (mode
, x
);
9219 /* Return nonzero if X is an expression that has one of two values depending on
9220 whether some other value is zero or nonzero. In that case, we return the
9221 value that is being tested, *PTRUE is set to the value if the rtx being
9222 returned has a nonzero value, and *PFALSE is set to the other alternative.
9224 If we return zero, we set *PTRUE and *PFALSE to X. */
9227 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
9229 machine_mode mode
= GET_MODE (x
);
9230 enum rtx_code code
= GET_CODE (x
);
9231 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9232 unsigned HOST_WIDE_INT nz
;
9233 scalar_int_mode int_mode
;
9235 /* If we are comparing a value against zero, we are done. */
9236 if ((code
== NE
|| code
== EQ
)
9237 && XEXP (x
, 1) == const0_rtx
)
9239 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9240 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9244 /* If this is a unary operation whose operand has one of two values, apply
9245 our opcode to compute those values. */
9246 else if (UNARY_P (x
)
9247 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9249 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9250 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9251 GET_MODE (XEXP (x
, 0)));
9255 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9256 make can't possibly match and would suppress other optimizations. */
9257 else if (code
== COMPARE
)
9260 /* If this is a binary operation, see if either side has only one of two
9261 values. If either one does or if both do and they are conditional on
9262 the same value, compute the new true and false values. */
9263 else if (BINARY_P (x
))
9265 rtx op0
= XEXP (x
, 0);
9266 rtx op1
= XEXP (x
, 1);
9267 cond0
= if_then_else_cond (op0
, &true0
, &false0
);
9268 cond1
= if_then_else_cond (op1
, &true1
, &false1
);
9270 if ((cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
))
9271 && (REG_P (op0
) || REG_P (op1
)))
9273 /* Try to enable a simplification by undoing work done by
9274 if_then_else_cond if it converted a REG into something more
9279 true0
= false0
= op0
;
9284 true1
= false1
= op1
;
9288 if ((cond0
!= 0 || cond1
!= 0)
9289 && ! (cond0
!= 0 && cond1
!= 0 && !rtx_equal_p (cond0
, cond1
)))
9291 /* If if_then_else_cond returned zero, then true/false are the
9292 same rtl. We must copy one of them to prevent invalid rtl
9295 true0
= copy_rtx (true0
);
9296 else if (cond1
== 0)
9297 true1
= copy_rtx (true1
);
9299 if (COMPARISON_P (x
))
9301 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9303 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9308 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9309 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9312 return cond0
? cond0
: cond1
;
9315 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9316 operands is zero when the other is nonzero, and vice-versa,
9317 and STORE_FLAG_VALUE is 1 or -1. */
9319 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9320 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9322 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9324 rtx op0
= XEXP (XEXP (x
, 0), 1);
9325 rtx op1
= XEXP (XEXP (x
, 1), 1);
9327 cond0
= XEXP (XEXP (x
, 0), 0);
9328 cond1
= XEXP (XEXP (x
, 1), 0);
9330 if (COMPARISON_P (cond0
)
9331 && COMPARISON_P (cond1
)
9332 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9333 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9334 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9335 || ((swap_condition (GET_CODE (cond0
))
9336 == reversed_comparison_code (cond1
, NULL
))
9337 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9338 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9339 && ! side_effects_p (x
))
9341 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9342 *pfalse
= simplify_gen_binary (MULT
, mode
,
9344 ? simplify_gen_unary (NEG
, mode
,
9352 /* Similarly for MULT, AND and UMIN, except that for these the result
9354 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9355 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9356 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9358 cond0
= XEXP (XEXP (x
, 0), 0);
9359 cond1
= XEXP (XEXP (x
, 1), 0);
9361 if (COMPARISON_P (cond0
)
9362 && COMPARISON_P (cond1
)
9363 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9364 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9365 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9366 || ((swap_condition (GET_CODE (cond0
))
9367 == reversed_comparison_code (cond1
, NULL
))
9368 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9369 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9370 && ! side_effects_p (x
))
9372 *ptrue
= *pfalse
= const0_rtx
;
9378 else if (code
== IF_THEN_ELSE
)
9380 /* If we have IF_THEN_ELSE already, extract the condition and
9381 canonicalize it if it is NE or EQ. */
9382 cond0
= XEXP (x
, 0);
9383 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9384 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9385 return XEXP (cond0
, 0);
9386 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9388 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9389 return XEXP (cond0
, 0);
9395 /* If X is a SUBREG, we can narrow both the true and false values
9396 if the inner expression, if there is a condition. */
9397 else if (code
== SUBREG
9398 && (cond0
= if_then_else_cond (SUBREG_REG (x
), &true0
,
9401 true0
= simplify_gen_subreg (mode
, true0
,
9402 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9403 false0
= simplify_gen_subreg (mode
, false0
,
9404 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9405 if (true0
&& false0
)
9413 /* If X is a constant, this isn't special and will cause confusions
9414 if we treat it as such. Likewise if it is equivalent to a constant. */
9415 else if (CONSTANT_P (x
)
9416 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9419 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9420 will be least confusing to the rest of the compiler. */
9421 else if (mode
== BImode
)
9423 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9427 /* If X is known to be either 0 or -1, those are the true and
9428 false values when testing X. */
9429 else if (x
== constm1_rtx
|| x
== const0_rtx
9430 || (is_a
<scalar_int_mode
> (mode
, &int_mode
)
9431 && (num_sign_bit_copies (x
, int_mode
)
9432 == GET_MODE_PRECISION (int_mode
))))
9434 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9438 /* Likewise for 0 or a single bit. */
9439 else if (HWI_COMPUTABLE_MODE_P (mode
)
9440 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9442 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9446 /* Otherwise fail; show no condition with true and false values the same. */
9447 *ptrue
= *pfalse
= x
;
9451 /* Return the value of expression X given the fact that condition COND
9452 is known to be true when applied to REG as its first operand and VAL
9453 as its second. X is known to not be shared and so can be modified in
9456 We only handle the simplest cases, and specifically those cases that
9457 arise with IF_THEN_ELSE expressions. */
9460 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9462 enum rtx_code code
= GET_CODE (x
);
9466 if (side_effects_p (x
))
9469 /* If either operand of the condition is a floating point value,
9470 then we have to avoid collapsing an EQ comparison. */
9472 && rtx_equal_p (x
, reg
)
9473 && ! FLOAT_MODE_P (GET_MODE (x
))
9474 && ! FLOAT_MODE_P (GET_MODE (val
)))
9477 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9480 /* If X is (abs REG) and we know something about REG's relationship
9481 with zero, we may be able to simplify this. */
9483 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9486 case GE
: case GT
: case EQ
:
9489 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9491 GET_MODE (XEXP (x
, 0)));
9496 /* The only other cases we handle are MIN, MAX, and comparisons if the
9497 operands are the same as REG and VAL. */
9499 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9501 if (rtx_equal_p (XEXP (x
, 0), val
))
9503 std::swap (val
, reg
);
9504 cond
= swap_condition (cond
);
9507 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9509 if (COMPARISON_P (x
))
9511 if (comparison_dominates_p (cond
, code
))
9512 return const_true_rtx
;
9514 code
= reversed_comparison_code (x
, NULL
);
9516 && comparison_dominates_p (cond
, code
))
9521 else if (code
== SMAX
|| code
== SMIN
9522 || code
== UMIN
|| code
== UMAX
)
9524 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9526 /* Do not reverse the condition when it is NE or EQ.
9527 This is because we cannot conclude anything about
9528 the value of 'SMAX (x, y)' when x is not equal to y,
9529 but we can when x equals y. */
9530 if ((code
== SMAX
|| code
== UMAX
)
9531 && ! (cond
== EQ
|| cond
== NE
))
9532 cond
= reverse_condition (cond
);
9537 return unsignedp
? x
: XEXP (x
, 1);
9539 return unsignedp
? x
: XEXP (x
, 0);
9541 return unsignedp
? XEXP (x
, 1) : x
;
9543 return unsignedp
? XEXP (x
, 0) : x
;
9550 else if (code
== SUBREG
)
9552 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9553 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9555 if (SUBREG_REG (x
) != r
)
9557 /* We must simplify subreg here, before we lose track of the
9558 original inner_mode. */
9559 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9560 inner_mode
, SUBREG_BYTE (x
));
9564 SUBST (SUBREG_REG (x
), r
);
9569 /* We don't have to handle SIGN_EXTEND here, because even in the
9570 case of replacing something with a modeless CONST_INT, a
9571 CONST_INT is already (supposed to be) a valid sign extension for
9572 its narrower mode, which implies it's already properly
9573 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9574 story is different. */
9575 else if (code
== ZERO_EXTEND
)
9577 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9578 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9580 if (XEXP (x
, 0) != r
)
9582 /* We must simplify the zero_extend here, before we lose
9583 track of the original inner_mode. */
9584 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9589 SUBST (XEXP (x
, 0), r
);
9595 fmt
= GET_RTX_FORMAT (code
);
9596 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9599 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9600 else if (fmt
[i
] == 'E')
9601 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9602 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9609 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9610 assignment as a field assignment. */
9613 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9615 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9617 if (paradoxical_subreg_p (GET_MODE (x
), GET_MODE (y
)))
9619 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9621 x
= adjust_address_nv (x
, GET_MODE (y
),
9622 byte_lowpart_offset (GET_MODE (y
),
9626 if (x
== y
|| rtx_equal_p (x
, y
))
9629 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9632 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9633 Note that all SUBREGs of MEM are paradoxical; otherwise they
9634 would have been rewritten. */
9635 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9636 && MEM_P (SUBREG_REG (y
))
9637 && rtx_equal_p (SUBREG_REG (y
),
9638 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9641 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9642 && MEM_P (SUBREG_REG (x
))
9643 && rtx_equal_p (SUBREG_REG (x
),
9644 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9647 /* We used to see if get_last_value of X and Y were the same but that's
9648 not correct. In one direction, we'll cause the assignment to have
9649 the wrong destination and in the case, we'll import a register into this
9650 insn that might have already have been dead. So fail if none of the
9651 above cases are true. */
9655 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9656 Return that assignment if so.
9658 We only handle the most common cases. */
9661 make_field_assignment (rtx x
)
9663 rtx dest
= SET_DEST (x
);
9664 rtx src
= SET_SRC (x
);
9669 unsigned HOST_WIDE_INT len
;
9672 /* All the rules in this function are specific to scalar integers. */
9673 scalar_int_mode mode
;
9674 if (!is_a
<scalar_int_mode
> (GET_MODE (dest
), &mode
))
9677 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9678 a clear of a one-bit field. We will have changed it to
9679 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9682 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9683 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9684 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9685 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9687 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9690 return gen_rtx_SET (assign
, const0_rtx
);
9694 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9695 && subreg_lowpart_p (XEXP (src
, 0))
9696 && partial_subreg_p (XEXP (src
, 0))
9697 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9698 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9699 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9700 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9702 assign
= make_extraction (VOIDmode
, dest
, 0,
9703 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9706 return gen_rtx_SET (assign
, const0_rtx
);
9710 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9712 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9713 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9714 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9716 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9719 return gen_rtx_SET (assign
, const1_rtx
);
9723 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9724 SRC is an AND with all bits of that field set, then we can discard
9726 if (GET_CODE (dest
) == ZERO_EXTRACT
9727 && CONST_INT_P (XEXP (dest
, 1))
9728 && GET_CODE (src
) == AND
9729 && CONST_INT_P (XEXP (src
, 1)))
9731 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9732 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9733 unsigned HOST_WIDE_INT ze_mask
;
9735 if (width
>= HOST_BITS_PER_WIDE_INT
)
9738 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9740 /* Complete overlap. We can remove the source AND. */
9741 if ((and_mask
& ze_mask
) == ze_mask
)
9742 return gen_rtx_SET (dest
, XEXP (src
, 0));
9744 /* Partial overlap. We can reduce the source AND. */
9745 if ((and_mask
& ze_mask
) != and_mask
)
9747 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9748 gen_int_mode (and_mask
& ze_mask
, mode
));
9749 return gen_rtx_SET (dest
, src
);
9753 /* The other case we handle is assignments into a constant-position
9754 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9755 a mask that has all one bits except for a group of zero bits and
9756 OTHER is known to have zeros where C1 has ones, this is such an
9757 assignment. Compute the position and length from C1. Shift OTHER
9758 to the appropriate position, force it to the required mode, and
9759 make the extraction. Check for the AND in both operands. */
9761 /* One or more SUBREGs might obscure the constant-position field
9762 assignment. The first one we are likely to encounter is an outer
9763 narrowing SUBREG, which we can just strip for the purposes of
9764 identifying the constant-field assignment. */
9765 scalar_int_mode src_mode
= mode
;
9766 if (GET_CODE (src
) == SUBREG
9767 && subreg_lowpart_p (src
)
9768 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (src
)), &src_mode
))
9769 src
= SUBREG_REG (src
);
9771 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9774 rhs
= expand_compound_operation (XEXP (src
, 0));
9775 lhs
= expand_compound_operation (XEXP (src
, 1));
9777 if (GET_CODE (rhs
) == AND
9778 && CONST_INT_P (XEXP (rhs
, 1))
9779 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9780 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9781 /* The second SUBREG that might get in the way is a paradoxical
9782 SUBREG around the first operand of the AND. We want to
9783 pretend the operand is as wide as the destination here. We
9784 do this by adjusting the MEM to wider mode for the sole
9785 purpose of the call to rtx_equal_for_field_assignment_p. Also
9786 note this trick only works for MEMs. */
9787 else if (GET_CODE (rhs
) == AND
9788 && paradoxical_subreg_p (XEXP (rhs
, 0))
9789 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9790 && CONST_INT_P (XEXP (rhs
, 1))
9791 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9793 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9794 else if (GET_CODE (lhs
) == AND
9795 && CONST_INT_P (XEXP (lhs
, 1))
9796 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9797 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9798 /* The second SUBREG that might get in the way is a paradoxical
9799 SUBREG around the first operand of the AND. We want to
9800 pretend the operand is as wide as the destination here. We
9801 do this by adjusting the MEM to wider mode for the sole
9802 purpose of the call to rtx_equal_for_field_assignment_p. Also
9803 note this trick only works for MEMs. */
9804 else if (GET_CODE (lhs
) == AND
9805 && paradoxical_subreg_p (XEXP (lhs
, 0))
9806 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9807 && CONST_INT_P (XEXP (lhs
, 1))
9808 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9810 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9814 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (mode
), &len
);
9816 || pos
+ len
> GET_MODE_PRECISION (mode
)
9817 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
9818 || (c1
& nonzero_bits (other
, mode
)) != 0)
9821 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9825 /* The mode to use for the source is the mode of the assignment, or of
9826 what is inside a possible STRICT_LOW_PART. */
9827 machine_mode new_mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9828 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9830 /* Shift OTHER right POS places and make it the source, restricting it
9831 to the proper length and mode. */
9833 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9834 src_mode
, other
, pos
),
9836 src
= force_to_mode (src
, new_mode
,
9837 len
>= HOST_BITS_PER_WIDE_INT
9839 : (HOST_WIDE_INT_1U
<< len
) - 1,
9842 /* If SRC is masked by an AND that does not make a difference in
9843 the value being stored, strip it. */
9844 if (GET_CODE (assign
) == ZERO_EXTRACT
9845 && CONST_INT_P (XEXP (assign
, 1))
9846 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9847 && GET_CODE (src
) == AND
9848 && CONST_INT_P (XEXP (src
, 1))
9849 && UINTVAL (XEXP (src
, 1))
9850 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9851 src
= XEXP (src
, 0);
9853 return gen_rtx_SET (assign
, src
);
9856 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9860 apply_distributive_law (rtx x
)
9862 enum rtx_code code
= GET_CODE (x
);
9863 enum rtx_code inner_code
;
9864 rtx lhs
, rhs
, other
;
9867 /* Distributivity is not true for floating point as it can change the
9868 value. So we don't do it unless -funsafe-math-optimizations. */
9869 if (FLOAT_MODE_P (GET_MODE (x
))
9870 && ! flag_unsafe_math_optimizations
)
9873 /* The outer operation can only be one of the following: */
9874 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9875 && code
!= PLUS
&& code
!= MINUS
)
9881 /* If either operand is a primitive we can't do anything, so get out
9883 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9886 lhs
= expand_compound_operation (lhs
);
9887 rhs
= expand_compound_operation (rhs
);
9888 inner_code
= GET_CODE (lhs
);
9889 if (inner_code
!= GET_CODE (rhs
))
9892 /* See if the inner and outer operations distribute. */
9899 /* These all distribute except over PLUS. */
9900 if (code
== PLUS
|| code
== MINUS
)
9905 if (code
!= PLUS
&& code
!= MINUS
)
9910 /* This is also a multiply, so it distributes over everything. */
9913 /* This used to handle SUBREG, but this turned out to be counter-
9914 productive, since (subreg (op ...)) usually is not handled by
9915 insn patterns, and this "optimization" therefore transformed
9916 recognizable patterns into unrecognizable ones. Therefore the
9917 SUBREG case was removed from here.
9919 It is possible that distributing SUBREG over arithmetic operations
9920 leads to an intermediate result than can then be optimized further,
9921 e.g. by moving the outer SUBREG to the other side of a SET as done
9922 in simplify_set. This seems to have been the original intent of
9923 handling SUBREGs here.
9925 However, with current GCC this does not appear to actually happen,
9926 at least on major platforms. If some case is found where removing
9927 the SUBREG case here prevents follow-on optimizations, distributing
9928 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9934 /* Set LHS and RHS to the inner operands (A and B in the example
9935 above) and set OTHER to the common operand (C in the example).
9936 There is only one way to do this unless the inner operation is
9938 if (COMMUTATIVE_ARITH_P (lhs
)
9939 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9940 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9941 else if (COMMUTATIVE_ARITH_P (lhs
)
9942 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9943 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9944 else if (COMMUTATIVE_ARITH_P (lhs
)
9945 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9946 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9947 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9948 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9952 /* Form the new inner operation, seeing if it simplifies first. */
9953 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9955 /* There is one exception to the general way of distributing:
9956 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9957 if (code
== XOR
&& inner_code
== IOR
)
9960 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9963 /* We may be able to continuing distributing the result, so call
9964 ourselves recursively on the inner operation before forming the
9965 outer operation, which we return. */
9966 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9967 apply_distributive_law (tem
), other
);
9970 /* See if X is of the form (* (+ A B) C), and if so convert to
9971 (+ (* A C) (* B C)) and try to simplify.
9973 Most of the time, this results in no change. However, if some of
9974 the operands are the same or inverses of each other, simplifications
9977 For example, (and (ior A B) (not B)) can occur as the result of
9978 expanding a bit field assignment. When we apply the distributive
9979 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9980 which then simplifies to (and (A (not B))).
9982 Note that no checks happen on the validity of applying the inverse
9983 distributive law. This is pointless since we can do it in the
9984 few places where this routine is called.
9986 N is the index of the term that is decomposed (the arithmetic operation,
9987 i.e. (+ A B) in the first example above). !N is the index of the term that
9988 is distributed, i.e. of C in the first example above. */
9990 distribute_and_simplify_rtx (rtx x
, int n
)
9993 enum rtx_code outer_code
, inner_code
;
9994 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9996 /* Distributivity is not true for floating point as it can change the
9997 value. So we don't do it unless -funsafe-math-optimizations. */
9998 if (FLOAT_MODE_P (GET_MODE (x
))
9999 && ! flag_unsafe_math_optimizations
)
10002 decomposed
= XEXP (x
, n
);
10003 if (!ARITHMETIC_P (decomposed
))
10006 mode
= GET_MODE (x
);
10007 outer_code
= GET_CODE (x
);
10008 distributed
= XEXP (x
, !n
);
10010 inner_code
= GET_CODE (decomposed
);
10011 inner_op0
= XEXP (decomposed
, 0);
10012 inner_op1
= XEXP (decomposed
, 1);
10014 /* Special case (and (xor B C) (not A)), which is equivalent to
10015 (xor (ior A B) (ior A C)) */
10016 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
10018 distributed
= XEXP (distributed
, 0);
10024 /* Distribute the second term. */
10025 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
10026 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
10030 /* Distribute the first term. */
10031 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
10032 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
10035 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
10036 new_op0
, new_op1
));
10037 if (GET_CODE (tmp
) != outer_code
10038 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
10039 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
10045 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10046 in MODE. Return an equivalent form, if different from (and VAROP
10047 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10050 simplify_and_const_int_1 (scalar_int_mode mode
, rtx varop
,
10051 unsigned HOST_WIDE_INT constop
)
10053 unsigned HOST_WIDE_INT nonzero
;
10054 unsigned HOST_WIDE_INT orig_constop
;
10058 orig_varop
= varop
;
10059 orig_constop
= constop
;
10060 if (GET_CODE (varop
) == CLOBBER
)
10063 /* Simplify VAROP knowing that we will be only looking at some of the
10066 Note by passing in CONSTOP, we guarantee that the bits not set in
10067 CONSTOP are not significant and will never be examined. We must
10068 ensure that is the case by explicitly masking out those bits
10069 before returning. */
10070 varop
= force_to_mode (varop
, mode
, constop
, 0);
10072 /* If VAROP is a CLOBBER, we will fail so return it. */
10073 if (GET_CODE (varop
) == CLOBBER
)
10076 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10077 to VAROP and return the new constant. */
10078 if (CONST_INT_P (varop
))
10079 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
10081 /* See what bits may be nonzero in VAROP. Unlike the general case of
10082 a call to nonzero_bits, here we don't care about bits outside
10085 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
10087 /* Turn off all bits in the constant that are known to already be zero.
10088 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10089 which is tested below. */
10091 constop
&= nonzero
;
10093 /* If we don't have any bits left, return zero. */
10097 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10098 a power of two, we can replace this with an ASHIFT. */
10099 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
10100 && (i
= exact_log2 (constop
)) >= 0)
10101 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
10103 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10104 or XOR, then try to apply the distributive law. This may eliminate
10105 operations if either branch can be simplified because of the AND.
10106 It may also make some cases more complex, but those cases probably
10107 won't match a pattern either with or without this. */
10109 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
10111 scalar_int_mode varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10115 apply_distributive_law
10116 (simplify_gen_binary (GET_CODE (varop
), varop_mode
,
10117 simplify_and_const_int (NULL_RTX
, varop_mode
,
10120 simplify_and_const_int (NULL_RTX
, varop_mode
,
10125 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10126 the AND and see if one of the operands simplifies to zero. If so, we
10127 may eliminate it. */
10129 if (GET_CODE (varop
) == PLUS
10130 && pow2p_hwi (constop
+ 1))
10134 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
10135 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
10136 if (o0
== const0_rtx
)
10138 if (o1
== const0_rtx
)
10142 /* Make a SUBREG if necessary. If we can't make it, fail. */
10143 varop
= gen_lowpart (mode
, varop
);
10144 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10147 /* If we are only masking insignificant bits, return VAROP. */
10148 if (constop
== nonzero
)
10151 if (varop
== orig_varop
&& constop
== orig_constop
)
10154 /* Otherwise, return an AND. */
10155 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
10159 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10162 Return an equivalent form, if different from X. Otherwise, return X. If
10163 X is zero, we are to always construct the equivalent form. */
10166 simplify_and_const_int (rtx x
, scalar_int_mode mode
, rtx varop
,
10167 unsigned HOST_WIDE_INT constop
)
10169 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
10174 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
10175 gen_int_mode (constop
, mode
));
10176 if (GET_MODE (x
) != mode
)
10177 x
= gen_lowpart (mode
, x
);
10181 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10182 We don't care about bits outside of those defined in MODE.
10184 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10185 a shift, AND, or zero_extract, we can do better. */
10188 reg_nonzero_bits_for_combine (const_rtx x
, scalar_int_mode xmode
,
10189 scalar_int_mode mode
,
10190 unsigned HOST_WIDE_INT
*nonzero
)
10193 reg_stat_type
*rsp
;
10195 /* If X is a register whose nonzero bits value is current, use it.
10196 Otherwise, if X is a register whose value we can find, use that
10197 value. Otherwise, use the previously-computed global nonzero bits
10198 for this register. */
10200 rsp
= ®_stat
[REGNO (x
)];
10201 if (rsp
->last_set_value
!= 0
10202 && (rsp
->last_set_mode
== mode
10203 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10204 && GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
10205 && GET_MODE_CLASS (mode
) == MODE_INT
))
10206 && ((rsp
->last_set_label
>= label_tick_ebb_start
10207 && rsp
->last_set_label
< label_tick
)
10208 || (rsp
->last_set_label
== label_tick
10209 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10210 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10211 && REGNO (x
) < reg_n_sets_max
10212 && REG_N_SETS (REGNO (x
)) == 1
10213 && !REGNO_REG_SET_P
10214 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10217 /* Note that, even if the precision of last_set_mode is lower than that
10218 of mode, record_value_for_reg invoked nonzero_bits on the register
10219 with nonzero_bits_mode (because last_set_mode is necessarily integral
10220 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10221 are all valid, hence in mode too since nonzero_bits_mode is defined
10222 to the largest HWI_COMPUTABLE_MODE_P mode. */
10223 *nonzero
&= rsp
->last_set_nonzero_bits
;
10227 tem
= get_last_value (x
);
10230 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
10231 tem
= sign_extend_short_imm (tem
, xmode
, GET_MODE_PRECISION (mode
));
10236 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
10238 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
10240 if (GET_MODE_PRECISION (xmode
) < GET_MODE_PRECISION (mode
))
10241 /* We don't know anything about the upper bits. */
10242 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (xmode
);
10250 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10251 end of X that are known to be equal to the sign bit. X will be used
10252 in mode MODE; the returned value will always be between 1 and the
10253 number of bits in MODE. */
10256 reg_num_sign_bit_copies_for_combine (const_rtx x
, scalar_int_mode xmode
,
10257 scalar_int_mode mode
,
10258 unsigned int *result
)
10261 reg_stat_type
*rsp
;
10263 rsp
= ®_stat
[REGNO (x
)];
10264 if (rsp
->last_set_value
!= 0
10265 && rsp
->last_set_mode
== mode
10266 && ((rsp
->last_set_label
>= label_tick_ebb_start
10267 && rsp
->last_set_label
< label_tick
)
10268 || (rsp
->last_set_label
== label_tick
10269 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10270 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10271 && REGNO (x
) < reg_n_sets_max
10272 && REG_N_SETS (REGNO (x
)) == 1
10273 && !REGNO_REG_SET_P
10274 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10277 *result
= rsp
->last_set_sign_bit_copies
;
10281 tem
= get_last_value (x
);
10285 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10286 && GET_MODE_PRECISION (xmode
) == GET_MODE_PRECISION (mode
))
10287 *result
= rsp
->sign_bit_copies
;
10292 /* Return the number of "extended" bits there are in X, when interpreted
10293 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10294 unsigned quantities, this is the number of high-order zero bits.
10295 For signed quantities, this is the number of copies of the sign bit
10296 minus 1. In both case, this function returns the number of "spare"
10297 bits. For example, if two quantities for which this function returns
10298 at least 1 are added, the addition is known not to overflow.
10300 This function will always return 0 unless called during combine, which
10301 implies that it must be called from a define_split. */
10304 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10306 if (nonzero_sign_valid
== 0)
10309 scalar_int_mode int_mode
;
10311 ? (is_a
<scalar_int_mode
> (mode
, &int_mode
)
10312 && HWI_COMPUTABLE_MODE_P (int_mode
)
10313 ? (unsigned int) (GET_MODE_PRECISION (int_mode
) - 1
10314 - floor_log2 (nonzero_bits (x
, int_mode
)))
10316 : num_sign_bit_copies (x
, mode
) - 1);
10319 /* This function is called from `simplify_shift_const' to merge two
10320 outer operations. Specifically, we have already found that we need
10321 to perform operation *POP0 with constant *PCONST0 at the outermost
10322 position. We would now like to also perform OP1 with constant CONST1
10323 (with *POP0 being done last).
10325 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10326 the resulting operation. *PCOMP_P is set to 1 if we would need to
10327 complement the innermost operand, otherwise it is unchanged.
10329 MODE is the mode in which the operation will be done. No bits outside
10330 the width of this mode matter. It is assumed that the width of this mode
10331 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10333 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10334 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10335 result is simply *PCONST0.
10337 If the resulting operation cannot be expressed as one operation, we
10338 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10341 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10343 enum rtx_code op0
= *pop0
;
10344 HOST_WIDE_INT const0
= *pconst0
;
10346 const0
&= GET_MODE_MASK (mode
);
10347 const1
&= GET_MODE_MASK (mode
);
10349 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10353 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10356 if (op1
== UNKNOWN
|| op0
== SET
)
10359 else if (op0
== UNKNOWN
)
10360 op0
= op1
, const0
= const1
;
10362 else if (op0
== op1
)
10386 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10387 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10390 /* If the two constants aren't the same, we can't do anything. The
10391 remaining six cases can all be done. */
10392 else if (const0
!= const1
)
10400 /* (a & b) | b == b */
10402 else /* op1 == XOR */
10403 /* (a ^ b) | b == a | b */
10409 /* (a & b) ^ b == (~a) & b */
10410 op0
= AND
, *pcomp_p
= 1;
10411 else /* op1 == IOR */
10412 /* (a | b) ^ b == a & ~b */
10413 op0
= AND
, const0
= ~const0
;
10418 /* (a | b) & b == b */
10420 else /* op1 == XOR */
10421 /* (a ^ b) & b) == (~a) & b */
10428 /* Check for NO-OP cases. */
10429 const0
&= GET_MODE_MASK (mode
);
10431 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10433 else if (const0
== 0 && op0
== AND
)
10435 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10441 /* ??? Slightly redundant with the above mask, but not entirely.
10442 Moving this above means we'd have to sign-extend the mode mask
10443 for the final test. */
10444 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10445 *pconst0
= trunc_int_for_mode (const0
, mode
);
10450 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10451 the shift in. The original shift operation CODE is performed on OP in
10452 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10453 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10454 result of the shift is subject to operation OUTER_CODE with operand
10457 static scalar_int_mode
10458 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10459 scalar_int_mode orig_mode
, scalar_int_mode mode
,
10460 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10462 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10464 /* In general we can't perform in wider mode for right shift and rotate. */
10468 /* We can still widen if the bits brought in from the left are identical
10469 to the sign bit of ORIG_MODE. */
10470 if (num_sign_bit_copies (op
, mode
)
10471 > (unsigned) (GET_MODE_PRECISION (mode
)
10472 - GET_MODE_PRECISION (orig_mode
)))
10477 /* Similarly here but with zero bits. */
10478 if (HWI_COMPUTABLE_MODE_P (mode
)
10479 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10482 /* We can also widen if the bits brought in will be masked off. This
10483 operation is performed in ORIG_MODE. */
10484 if (outer_code
== AND
)
10486 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10489 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10498 gcc_unreachable ();
10505 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10506 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10507 if we cannot simplify it. Otherwise, return a simplified value.
10509 The shift is normally computed in the widest mode we find in VAROP, as
10510 long as it isn't a different number of words than RESULT_MODE. Exceptions
10511 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10514 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10515 rtx varop
, int orig_count
)
10517 enum rtx_code orig_code
= code
;
10518 rtx orig_varop
= varop
;
10520 machine_mode mode
= result_mode
;
10521 machine_mode shift_mode
;
10522 scalar_int_mode tmode
, inner_mode
, int_mode
, int_varop_mode
, int_result_mode
;
10523 /* We form (outer_op (code varop count) (outer_const)). */
10524 enum rtx_code outer_op
= UNKNOWN
;
10525 HOST_WIDE_INT outer_const
= 0;
10526 int complement_p
= 0;
10529 /* Make sure and truncate the "natural" shift on the way in. We don't
10530 want to do this inside the loop as it makes it more difficult to
10532 if (SHIFT_COUNT_TRUNCATED
)
10533 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10535 /* If we were given an invalid count, don't do anything except exactly
10536 what was requested. */
10538 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10541 count
= orig_count
;
10543 /* Unless one of the branches of the `if' in this loop does a `continue',
10544 we will `break' the loop after the `if'. */
10548 /* If we have an operand of (clobber (const_int 0)), fail. */
10549 if (GET_CODE (varop
) == CLOBBER
)
10552 /* Convert ROTATERT to ROTATE. */
10553 if (code
== ROTATERT
)
10555 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10557 count
= bitsize
- count
;
10560 shift_mode
= result_mode
;
10561 if (shift_mode
!= mode
)
10563 /* We only change the modes of scalar shifts. */
10564 int_mode
= as_a
<scalar_int_mode
> (mode
);
10565 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10566 shift_mode
= try_widen_shift_mode (code
, varop
, count
,
10567 int_result_mode
, int_mode
,
10568 outer_op
, outer_const
);
10571 scalar_int_mode shift_unit_mode
10572 = as_a
<scalar_int_mode
> (GET_MODE_INNER (shift_mode
));
10574 /* Handle cases where the count is greater than the size of the mode
10575 minus 1. For ASHIFT, use the size minus one as the count (this can
10576 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10577 take the count modulo the size. For other shifts, the result is
10580 Since these shifts are being produced by the compiler by combining
10581 multiple operations, each of which are defined, we know what the
10582 result is supposed to be. */
10584 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10586 if (code
== ASHIFTRT
)
10587 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10588 else if (code
== ROTATE
|| code
== ROTATERT
)
10589 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10592 /* We can't simply return zero because there may be an
10594 varop
= const0_rtx
;
10600 /* If we discovered we had to complement VAROP, leave. Making a NOT
10601 here would cause an infinite loop. */
10605 if (shift_mode
== shift_unit_mode
)
10607 /* An arithmetic right shift of a quantity known to be -1 or 0
10609 if (code
== ASHIFTRT
10610 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10611 == GET_MODE_PRECISION (shift_unit_mode
)))
10617 /* If we are doing an arithmetic right shift and discarding all but
10618 the sign bit copies, this is equivalent to doing a shift by the
10619 bitsize minus one. Convert it into that shift because it will
10620 often allow other simplifications. */
10622 if (code
== ASHIFTRT
10623 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10624 >= GET_MODE_PRECISION (shift_unit_mode
)))
10625 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10627 /* We simplify the tests below and elsewhere by converting
10628 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10629 `make_compound_operation' will convert it to an ASHIFTRT for
10630 those machines (such as VAX) that don't have an LSHIFTRT. */
10631 if (code
== ASHIFTRT
10632 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10633 && val_signbit_known_clear_p (shift_unit_mode
,
10634 nonzero_bits (varop
,
10638 if (((code
== LSHIFTRT
10639 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10640 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10642 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10643 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10644 & GET_MODE_MASK (shift_unit_mode
))))
10645 && !side_effects_p (varop
))
10646 varop
= const0_rtx
;
10649 switch (GET_CODE (varop
))
10655 new_rtx
= expand_compound_operation (varop
);
10656 if (new_rtx
!= varop
)
10664 /* The following rules apply only to scalars. */
10665 if (shift_mode
!= shift_unit_mode
)
10667 int_mode
= as_a
<scalar_int_mode
> (mode
);
10669 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10670 minus the width of a smaller mode, we can do this with a
10671 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10672 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10673 && ! mode_dependent_address_p (XEXP (varop
, 0),
10674 MEM_ADDR_SPACE (varop
))
10675 && ! MEM_VOLATILE_P (varop
)
10676 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode
) - count
, 1)
10679 new_rtx
= adjust_address_nv (varop
, tmode
,
10680 BYTES_BIG_ENDIAN
? 0
10681 : count
/ BITS_PER_UNIT
);
10683 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10684 : ZERO_EXTEND
, int_mode
, new_rtx
);
10691 /* The following rules apply only to scalars. */
10692 if (shift_mode
!= shift_unit_mode
)
10694 int_mode
= as_a
<scalar_int_mode
> (mode
);
10695 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10697 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10698 the same number of words as what we've seen so far. Then store
10699 the widest mode in MODE. */
10700 if (subreg_lowpart_p (varop
)
10701 && is_int_mode (GET_MODE (SUBREG_REG (varop
)), &inner_mode
)
10702 && GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_varop_mode
)
10703 && (CEIL (GET_MODE_SIZE (inner_mode
), UNITS_PER_WORD
)
10704 == CEIL (GET_MODE_SIZE (int_mode
), UNITS_PER_WORD
))
10705 && GET_MODE_CLASS (int_varop_mode
) == MODE_INT
)
10707 varop
= SUBREG_REG (varop
);
10708 if (GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (int_mode
))
10715 /* Some machines use MULT instead of ASHIFT because MULT
10716 is cheaper. But it is still better on those machines to
10717 merge two shifts into one. */
10718 if (CONST_INT_P (XEXP (varop
, 1))
10719 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10721 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10722 varop
= simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10723 XEXP (varop
, 0), log2_rtx
);
10729 /* Similar, for when divides are cheaper. */
10730 if (CONST_INT_P (XEXP (varop
, 1))
10731 && (log2
= exact_log2 (UINTVAL (XEXP (varop
, 1)))) >= 0)
10733 rtx log2_rtx
= gen_int_shift_amount (GET_MODE (varop
), log2
);
10734 varop
= simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10735 XEXP (varop
, 0), log2_rtx
);
10741 /* If we are extracting just the sign bit of an arithmetic
10742 right shift, that shift is not needed. However, the sign
10743 bit of a wider mode may be different from what would be
10744 interpreted as the sign bit in a narrower mode, so, if
10745 the result is narrower, don't discard the shift. */
10746 if (code
== LSHIFTRT
10747 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10748 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10749 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10751 varop
= XEXP (varop
, 0);
10760 /* The following rules apply only to scalars. */
10761 if (shift_mode
!= shift_unit_mode
)
10763 int_mode
= as_a
<scalar_int_mode
> (mode
);
10764 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10765 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10767 /* Here we have two nested shifts. The result is usually the
10768 AND of a new shift with a mask. We compute the result below. */
10769 if (CONST_INT_P (XEXP (varop
, 1))
10770 && INTVAL (XEXP (varop
, 1)) >= 0
10771 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (int_varop_mode
)
10772 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
10773 && HWI_COMPUTABLE_MODE_P (int_mode
))
10775 enum rtx_code first_code
= GET_CODE (varop
);
10776 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10777 unsigned HOST_WIDE_INT mask
;
10780 /* We have one common special case. We can't do any merging if
10781 the inner code is an ASHIFTRT of a smaller mode. However, if
10782 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10783 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10784 we can convert it to
10785 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10786 This simplifies certain SIGN_EXTEND operations. */
10787 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10788 && count
== (GET_MODE_PRECISION (int_result_mode
)
10789 - GET_MODE_PRECISION (int_varop_mode
)))
10791 /* C3 has the low-order C1 bits zero. */
10793 mask
= GET_MODE_MASK (int_mode
)
10794 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10796 varop
= simplify_and_const_int (NULL_RTX
, int_result_mode
,
10797 XEXP (varop
, 0), mask
);
10798 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
,
10799 int_result_mode
, varop
, count
);
10800 count
= first_count
;
10805 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10806 than C1 high-order bits equal to the sign bit, we can convert
10807 this to either an ASHIFT or an ASHIFTRT depending on the
10810 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10812 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10813 && int_varop_mode
== shift_unit_mode
10814 && (num_sign_bit_copies (XEXP (varop
, 0), shift_unit_mode
)
10817 varop
= XEXP (varop
, 0);
10818 count
-= first_count
;
10828 /* There are some cases we can't do. If CODE is ASHIFTRT,
10829 we can only do this if FIRST_CODE is also ASHIFTRT.
10831 We can't do the case when CODE is ROTATE and FIRST_CODE is
10834 If the mode of this shift is not the mode of the outer shift,
10835 we can't do this if either shift is a right shift or ROTATE.
10837 Finally, we can't do any of these if the mode is too wide
10838 unless the codes are the same.
10840 Handle the case where the shift codes are the same
10843 if (code
== first_code
)
10845 if (int_varop_mode
!= int_result_mode
10846 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10847 || code
== ROTATE
))
10850 count
+= first_count
;
10851 varop
= XEXP (varop
, 0);
10855 if (code
== ASHIFTRT
10856 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10857 || GET_MODE_PRECISION (int_mode
) > HOST_BITS_PER_WIDE_INT
10858 || (int_varop_mode
!= int_result_mode
10859 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10860 || first_code
== ROTATE
10861 || code
== ROTATE
)))
10864 /* To compute the mask to apply after the shift, shift the
10865 nonzero bits of the inner shift the same way the
10866 outer shift will. */
10868 mask_rtx
= gen_int_mode (nonzero_bits (varop
, int_varop_mode
),
10870 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10872 = simplify_const_binary_operation (code
, int_result_mode
,
10873 mask_rtx
, count_rtx
);
10875 /* Give up if we can't compute an outer operation to use. */
10877 || !CONST_INT_P (mask_rtx
)
10878 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10880 int_result_mode
, &complement_p
))
10883 /* If the shifts are in the same direction, we add the
10884 counts. Otherwise, we subtract them. */
10885 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10886 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10887 count
+= first_count
;
10889 count
-= first_count
;
10891 /* If COUNT is positive, the new shift is usually CODE,
10892 except for the two exceptions below, in which case it is
10893 FIRST_CODE. If the count is negative, FIRST_CODE should
10896 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10897 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10899 else if (count
< 0)
10900 code
= first_code
, count
= -count
;
10902 varop
= XEXP (varop
, 0);
10906 /* If we have (A << B << C) for any shift, we can convert this to
10907 (A << C << B). This wins if A is a constant. Only try this if
10908 B is not a constant. */
10910 else if (GET_CODE (varop
) == code
10911 && CONST_INT_P (XEXP (varop
, 0))
10912 && !CONST_INT_P (XEXP (varop
, 1)))
10914 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10915 sure the result will be masked. See PR70222. */
10916 if (code
== LSHIFTRT
10917 && int_mode
!= int_result_mode
10918 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10919 GET_MODE_MASK (int_result_mode
)
10920 >> orig_count
, int_result_mode
,
10923 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10924 up outer sign extension (often left and right shift) is
10925 hardly more efficient than the original. See PR70429. */
10926 if (code
== ASHIFTRT
&& int_mode
!= int_result_mode
)
10929 rtx count_rtx
= gen_int_shift_amount (int_result_mode
, count
);
10930 rtx new_rtx
= simplify_const_binary_operation (code
, int_mode
,
10933 varop
= gen_rtx_fmt_ee (code
, int_mode
, new_rtx
, XEXP (varop
, 1));
10940 /* The following rules apply only to scalars. */
10941 if (shift_mode
!= shift_unit_mode
)
10944 /* Make this fit the case below. */
10945 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10951 /* The following rules apply only to scalars. */
10952 if (shift_mode
!= shift_unit_mode
)
10954 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
10955 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
10957 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10958 with C the size of VAROP - 1 and the shift is logical if
10959 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10960 we have an (le X 0) operation. If we have an arithmetic shift
10961 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10962 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10964 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10965 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10966 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10967 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10968 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
10969 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10972 varop
= gen_rtx_LE (int_varop_mode
, XEXP (varop
, 1),
10975 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10976 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
10981 /* If we have (shift (logical)), move the logical to the outside
10982 to allow it to possibly combine with another logical and the
10983 shift to combine with another shift. This also canonicalizes to
10984 what a ZERO_EXTRACT looks like. Also, some machines have
10985 (and (shift)) insns. */
10987 if (CONST_INT_P (XEXP (varop
, 1))
10988 /* We can't do this if we have (ashiftrt (xor)) and the
10989 constant has its sign bit set in shift_unit_mode with
10990 shift_unit_mode wider than result_mode. */
10991 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10992 && int_result_mode
!= shift_unit_mode
10993 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10994 shift_unit_mode
) < 0)
10995 && (new_rtx
= simplify_const_binary_operation
10996 (code
, int_result_mode
,
10997 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
10998 gen_int_shift_amount (int_result_mode
, count
))) != 0
10999 && CONST_INT_P (new_rtx
)
11000 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
11001 INTVAL (new_rtx
), int_result_mode
,
11004 varop
= XEXP (varop
, 0);
11008 /* If we can't do that, try to simplify the shift in each arm of the
11009 logical expression, make a new logical expression, and apply
11010 the inverse distributive law. This also can't be done for
11011 (ashiftrt (xor)) where we've widened the shift and the constant
11012 changes the sign bit. */
11013 if (CONST_INT_P (XEXP (varop
, 1))
11014 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
11015 && int_result_mode
!= shift_unit_mode
11016 && trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
11017 shift_unit_mode
) < 0))
11019 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11020 XEXP (varop
, 0), count
);
11021 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_unit_mode
,
11022 XEXP (varop
, 1), count
);
11024 varop
= simplify_gen_binary (GET_CODE (varop
), shift_unit_mode
,
11026 varop
= apply_distributive_law (varop
);
11034 /* The following rules apply only to scalars. */
11035 if (shift_mode
!= shift_unit_mode
)
11037 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11039 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11040 says that the sign bit can be tested, FOO has mode MODE, C is
11041 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11042 that may be nonzero. */
11043 if (code
== LSHIFTRT
11044 && XEXP (varop
, 1) == const0_rtx
11045 && GET_MODE (XEXP (varop
, 0)) == int_result_mode
11046 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11047 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11048 && STORE_FLAG_VALUE
== -1
11049 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11050 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11051 int_result_mode
, &complement_p
))
11053 varop
= XEXP (varop
, 0);
11060 /* The following rules apply only to scalars. */
11061 if (shift_mode
!= shift_unit_mode
)
11063 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11065 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11066 than the number of bits in the mode is equivalent to A. */
11067 if (code
== LSHIFTRT
11068 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11069 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1)
11071 varop
= XEXP (varop
, 0);
11076 /* NEG commutes with ASHIFT since it is multiplication. Move the
11077 NEG outside to allow shifts to combine. */
11079 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0,
11080 int_result_mode
, &complement_p
))
11082 varop
= XEXP (varop
, 0);
11088 /* The following rules apply only to scalars. */
11089 if (shift_mode
!= shift_unit_mode
)
11091 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11093 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11094 is one less than the number of bits in the mode is
11095 equivalent to (xor A 1). */
11096 if (code
== LSHIFTRT
11097 && count
== (GET_MODE_PRECISION (int_result_mode
) - 1)
11098 && XEXP (varop
, 1) == constm1_rtx
11099 && nonzero_bits (XEXP (varop
, 0), int_result_mode
) == 1
11100 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1,
11101 int_result_mode
, &complement_p
))
11104 varop
= XEXP (varop
, 0);
11108 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11109 that might be nonzero in BAR are those being shifted out and those
11110 bits are known zero in FOO, we can replace the PLUS with FOO.
11111 Similarly in the other operand order. This code occurs when
11112 we are computing the size of a variable-size array. */
11114 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11115 && count
< HOST_BITS_PER_WIDE_INT
11116 && nonzero_bits (XEXP (varop
, 1), int_result_mode
) >> count
== 0
11117 && (nonzero_bits (XEXP (varop
, 1), int_result_mode
)
11118 & nonzero_bits (XEXP (varop
, 0), int_result_mode
)) == 0)
11120 varop
= XEXP (varop
, 0);
11123 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
11124 && count
< HOST_BITS_PER_WIDE_INT
11125 && HWI_COMPUTABLE_MODE_P (int_result_mode
)
11126 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11128 && (nonzero_bits (XEXP (varop
, 0), int_result_mode
)
11129 & nonzero_bits (XEXP (varop
, 1), int_result_mode
)) == 0)
11131 varop
= XEXP (varop
, 1);
11135 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11137 && CONST_INT_P (XEXP (varop
, 1))
11138 && (new_rtx
= simplify_const_binary_operation
11139 (ASHIFT
, int_result_mode
,
11140 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11141 gen_int_shift_amount (int_result_mode
, count
))) != 0
11142 && CONST_INT_P (new_rtx
)
11143 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
11144 INTVAL (new_rtx
), int_result_mode
,
11147 varop
= XEXP (varop
, 0);
11151 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11152 signbit', and attempt to change the PLUS to an XOR and move it to
11153 the outer operation as is done above in the AND/IOR/XOR case
11154 leg for shift(logical). See details in logical handling above
11155 for reasoning in doing so. */
11156 if (code
== LSHIFTRT
11157 && CONST_INT_P (XEXP (varop
, 1))
11158 && mode_signbit_p (int_result_mode
, XEXP (varop
, 1))
11159 && (new_rtx
= simplify_const_binary_operation
11160 (code
, int_result_mode
,
11161 gen_int_mode (INTVAL (XEXP (varop
, 1)), int_result_mode
),
11162 gen_int_shift_amount (int_result_mode
, count
))) != 0
11163 && CONST_INT_P (new_rtx
)
11164 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
11165 INTVAL (new_rtx
), int_result_mode
,
11168 varop
= XEXP (varop
, 0);
11175 /* The following rules apply only to scalars. */
11176 if (shift_mode
!= shift_unit_mode
)
11178 int_varop_mode
= as_a
<scalar_int_mode
> (GET_MODE (varop
));
11180 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11181 with C the size of VAROP - 1 and the shift is logical if
11182 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11183 we have a (gt X 0) operation. If the shift is arithmetic with
11184 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11185 we have a (neg (gt X 0)) operation. */
11187 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
11188 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
11189 && count
== (GET_MODE_PRECISION (int_varop_mode
) - 1)
11190 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
11191 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11192 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
11193 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
11196 varop
= gen_rtx_GT (int_varop_mode
, XEXP (varop
, 1),
11199 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
11200 varop
= gen_rtx_NEG (int_varop_mode
, varop
);
11207 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11208 if the truncate does not affect the value. */
11209 if (code
== LSHIFTRT
11210 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
11211 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
11212 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
11213 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
11214 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
11216 rtx varop_inner
= XEXP (varop
, 0);
11217 int new_count
= count
+ INTVAL (XEXP (varop_inner
, 1));
11218 rtx new_count_rtx
= gen_int_shift_amount (GET_MODE (varop_inner
),
11220 varop_inner
= gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
11221 XEXP (varop_inner
, 0),
11223 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
11236 shift_mode
= result_mode
;
11237 if (shift_mode
!= mode
)
11239 /* We only change the modes of scalar shifts. */
11240 int_mode
= as_a
<scalar_int_mode
> (mode
);
11241 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11242 shift_mode
= try_widen_shift_mode (code
, varop
, count
, int_result_mode
,
11243 int_mode
, outer_op
, outer_const
);
11246 /* We have now finished analyzing the shift. The result should be
11247 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11248 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11249 to the result of the shift. OUTER_CONST is the relevant constant,
11250 but we must turn off all bits turned off in the shift. */
11252 if (outer_op
== UNKNOWN
11253 && orig_code
== code
&& orig_count
== count
11254 && varop
== orig_varop
11255 && shift_mode
== GET_MODE (varop
))
11258 /* Make a SUBREG if necessary. If we can't make it, fail. */
11259 varop
= gen_lowpart (shift_mode
, varop
);
11260 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
11263 /* If we have an outer operation and we just made a shift, it is
11264 possible that we could have simplified the shift were it not
11265 for the outer operation. So try to do the simplification
11268 if (outer_op
!= UNKNOWN
)
11269 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11274 x
= simplify_gen_binary (code
, shift_mode
, varop
,
11275 gen_int_shift_amount (shift_mode
, count
));
11277 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11278 turn off all the bits that the shift would have turned off. */
11279 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11280 /* We only change the modes of scalar shifts. */
11281 x
= simplify_and_const_int (NULL_RTX
, as_a
<scalar_int_mode
> (shift_mode
),
11282 x
, GET_MODE_MASK (result_mode
) >> orig_count
);
11284 /* Do the remainder of the processing in RESULT_MODE. */
11285 x
= gen_lowpart_or_truncate (result_mode
, x
);
11287 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11290 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11292 if (outer_op
!= UNKNOWN
)
11294 int_result_mode
= as_a
<scalar_int_mode
> (result_mode
);
11296 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11297 && GET_MODE_PRECISION (int_result_mode
) < HOST_BITS_PER_WIDE_INT
)
11298 outer_const
= trunc_int_for_mode (outer_const
, int_result_mode
);
11300 if (outer_op
== AND
)
11301 x
= simplify_and_const_int (NULL_RTX
, int_result_mode
, x
, outer_const
);
11302 else if (outer_op
== SET
)
11304 /* This means that we have determined that the result is
11305 equivalent to a constant. This should be rare. */
11306 if (!side_effects_p (x
))
11307 x
= GEN_INT (outer_const
);
11309 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11310 x
= simplify_gen_unary (outer_op
, int_result_mode
, x
, int_result_mode
);
11312 x
= simplify_gen_binary (outer_op
, int_result_mode
, x
,
11313 GEN_INT (outer_const
));
11319 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11320 The result of the shift is RESULT_MODE. If we cannot simplify it,
11321 return X or, if it is NULL, synthesize the expression with
11322 simplify_gen_binary. Otherwise, return a simplified value.
11324 The shift is normally computed in the widest mode we find in VAROP, as
11325 long as it isn't a different number of words than RESULT_MODE. Exceptions
11326 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11329 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11330 rtx varop
, int count
)
11332 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11337 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
,
11338 gen_int_shift_amount (GET_MODE (varop
), count
));
11339 if (GET_MODE (x
) != result_mode
)
11340 x
= gen_lowpart (result_mode
, x
);
11345 /* A subroutine of recog_for_combine. See there for arguments and
11349 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11351 rtx pat
= *pnewpat
;
11352 rtx pat_without_clobbers
;
11353 int insn_code_number
;
11354 int num_clobbers_to_add
= 0;
11356 rtx notes
= NULL_RTX
;
11357 rtx old_notes
, old_pat
;
11360 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11361 we use to indicate that something didn't match. If we find such a
11362 thing, force rejection. */
11363 if (GET_CODE (pat
) == PARALLEL
)
11364 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11365 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11366 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11369 old_pat
= PATTERN (insn
);
11370 old_notes
= REG_NOTES (insn
);
11371 PATTERN (insn
) = pat
;
11372 REG_NOTES (insn
) = NULL_RTX
;
11374 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11375 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11377 if (insn_code_number
< 0)
11378 fputs ("Failed to match this instruction:\n", dump_file
);
11380 fputs ("Successfully matched this instruction:\n", dump_file
);
11381 print_rtl_single (dump_file
, pat
);
11384 /* If it isn't, there is the possibility that we previously had an insn
11385 that clobbered some register as a side effect, but the combined
11386 insn doesn't need to do that. So try once more without the clobbers
11387 unless this represents an ASM insn. */
11389 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11390 && GET_CODE (pat
) == PARALLEL
)
11394 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11395 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11398 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11402 SUBST_INT (XVECLEN (pat
, 0), pos
);
11405 pat
= XVECEXP (pat
, 0, 0);
11407 PATTERN (insn
) = pat
;
11408 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11409 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11411 if (insn_code_number
< 0)
11412 fputs ("Failed to match this instruction:\n", dump_file
);
11414 fputs ("Successfully matched this instruction:\n", dump_file
);
11415 print_rtl_single (dump_file
, pat
);
11419 pat_without_clobbers
= pat
;
11421 PATTERN (insn
) = old_pat
;
11422 REG_NOTES (insn
) = old_notes
;
11424 /* Recognize all noop sets, these will be killed by followup pass. */
11425 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11426 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11428 /* If we had any clobbers to add, make a new pattern than contains
11429 them. Then check to make sure that all of them are dead. */
11430 if (num_clobbers_to_add
)
11432 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11433 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11434 ? (XVECLEN (pat
, 0)
11435 + num_clobbers_to_add
)
11436 : num_clobbers_to_add
+ 1));
11438 if (GET_CODE (pat
) == PARALLEL
)
11439 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11440 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11442 XVECEXP (newpat
, 0, 0) = pat
;
11444 add_clobbers (newpat
, insn_code_number
);
11446 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11447 i
< XVECLEN (newpat
, 0); i
++)
11449 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11450 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11452 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11454 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11455 notes
= alloc_reg_note (REG_UNUSED
,
11456 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11462 if (insn_code_number
>= 0
11463 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11465 old_pat
= PATTERN (insn
);
11466 old_notes
= REG_NOTES (insn
);
11467 old_icode
= INSN_CODE (insn
);
11468 PATTERN (insn
) = pat
;
11469 REG_NOTES (insn
) = notes
;
11470 INSN_CODE (insn
) = insn_code_number
;
11472 /* Allow targets to reject combined insn. */
11473 if (!targetm
.legitimate_combined_insn (insn
))
11475 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11476 fputs ("Instruction not appropriate for target.",
11479 /* Callers expect recog_for_combine to strip
11480 clobbers from the pattern on failure. */
11481 pat
= pat_without_clobbers
;
11484 insn_code_number
= -1;
11487 PATTERN (insn
) = old_pat
;
11488 REG_NOTES (insn
) = old_notes
;
11489 INSN_CODE (insn
) = old_icode
;
11495 return insn_code_number
;
11498 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11499 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11500 Return whether anything was so changed. */
11503 change_zero_ext (rtx pat
)
11505 bool changed
= false;
11506 rtx
*src
= &SET_SRC (pat
);
11508 subrtx_ptr_iterator::array_type array
;
11509 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11512 scalar_int_mode mode
, inner_mode
;
11513 if (!is_a
<scalar_int_mode
> (GET_MODE (x
), &mode
))
11517 if (GET_CODE (x
) == ZERO_EXTRACT
11518 && CONST_INT_P (XEXP (x
, 1))
11519 && CONST_INT_P (XEXP (x
, 2))
11520 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)), &inner_mode
)
11521 && GET_MODE_PRECISION (inner_mode
) <= GET_MODE_PRECISION (mode
))
11523 size
= INTVAL (XEXP (x
, 1));
11525 int start
= INTVAL (XEXP (x
, 2));
11526 if (BITS_BIG_ENDIAN
)
11527 start
= GET_MODE_PRECISION (inner_mode
) - size
- start
;
11530 x
= gen_rtx_LSHIFTRT (inner_mode
, XEXP (x
, 0),
11531 gen_int_shift_amount (inner_mode
, start
));
11535 if (mode
!= inner_mode
)
11537 if (REG_P (x
) && HARD_REGISTER_P (x
)
11538 && !can_change_dest_mode (x
, 0, mode
))
11541 x
= gen_lowpart_SUBREG (mode
, x
);
11544 else if (GET_CODE (x
) == ZERO_EXTEND
11545 && GET_CODE (XEXP (x
, 0)) == SUBREG
11546 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11547 && !paradoxical_subreg_p (XEXP (x
, 0))
11548 && subreg_lowpart_p (XEXP (x
, 0)))
11550 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11551 size
= GET_MODE_PRECISION (inner_mode
);
11552 x
= SUBREG_REG (XEXP (x
, 0));
11553 if (GET_MODE (x
) != mode
)
11555 if (REG_P (x
) && HARD_REGISTER_P (x
)
11556 && !can_change_dest_mode (x
, 0, mode
))
11559 x
= gen_lowpart_SUBREG (mode
, x
);
11562 else if (GET_CODE (x
) == ZERO_EXTEND
11563 && REG_P (XEXP (x
, 0))
11564 && HARD_REGISTER_P (XEXP (x
, 0))
11565 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11567 inner_mode
= as_a
<scalar_int_mode
> (GET_MODE (XEXP (x
, 0)));
11568 size
= GET_MODE_PRECISION (inner_mode
);
11569 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11574 if (!(GET_CODE (x
) == LSHIFTRT
11575 && CONST_INT_P (XEXP (x
, 1))
11576 && size
+ INTVAL (XEXP (x
, 1)) == GET_MODE_PRECISION (mode
)))
11578 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11579 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11587 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11588 maybe_swap_commutative_operands (**iter
);
11590 rtx
*dst
= &SET_DEST (pat
);
11591 scalar_int_mode mode
;
11592 if (GET_CODE (*dst
) == ZERO_EXTRACT
11593 && REG_P (XEXP (*dst
, 0))
11594 && is_a
<scalar_int_mode
> (GET_MODE (XEXP (*dst
, 0)), &mode
)
11595 && CONST_INT_P (XEXP (*dst
, 1))
11596 && CONST_INT_P (XEXP (*dst
, 2)))
11598 rtx reg
= XEXP (*dst
, 0);
11599 int width
= INTVAL (XEXP (*dst
, 1));
11600 int offset
= INTVAL (XEXP (*dst
, 2));
11601 int reg_width
= GET_MODE_PRECISION (mode
);
11602 if (BITS_BIG_ENDIAN
)
11603 offset
= reg_width
- width
- offset
;
11606 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11607 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11608 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11610 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11613 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11614 w
= gen_rtx_IOR (mode
, x
, z
);
11615 SUBST (SET_DEST (pat
), reg
);
11616 SUBST (SET_SRC (pat
), w
);
11624 /* Like recog, but we receive the address of a pointer to a new pattern.
11625 We try to match the rtx that the pointer points to.
11626 If that fails, we may try to modify or replace the pattern,
11627 storing the replacement into the same pointer object.
11629 Modifications include deletion or addition of CLOBBERs. If the
11630 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11631 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11632 (and undo if that fails).
11634 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11635 the CLOBBERs are placed.
11637 The value is the final insn code from the pattern ultimately matched,
11641 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11643 rtx pat
= *pnewpat
;
11644 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11645 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11646 return insn_code_number
;
11648 void *marker
= get_undo_marker ();
11649 bool changed
= false;
11651 if (GET_CODE (pat
) == SET
)
11652 changed
= change_zero_ext (pat
);
11653 else if (GET_CODE (pat
) == PARALLEL
)
11656 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11658 rtx set
= XVECEXP (pat
, 0, i
);
11659 if (GET_CODE (set
) == SET
)
11660 changed
|= change_zero_ext (set
);
11666 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11668 if (insn_code_number
< 0)
11669 undo_to_marker (marker
);
11672 return insn_code_number
;
11675 /* Like gen_lowpart_general but for use by combine. In combine it
11676 is not possible to create any new pseudoregs. However, it is
11677 safe to create invalid memory addresses, because combine will
11678 try to recognize them and all they will do is make the combine
11681 If for some reason this cannot do its job, an rtx
11682 (clobber (const_int 0)) is returned.
11683 An insn containing that will not be recognized. */
11686 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11688 machine_mode imode
= GET_MODE (x
);
11691 if (omode
== imode
)
11694 /* We can only support MODE being wider than a word if X is a
11695 constant integer or has a mode the same size. */
11696 if (maybe_gt (GET_MODE_SIZE (omode
), UNITS_PER_WORD
)
11697 && ! (CONST_SCALAR_INT_P (x
)
11698 || known_eq (GET_MODE_SIZE (imode
), GET_MODE_SIZE (omode
))))
11701 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11702 won't know what to do. So we will strip off the SUBREG here and
11703 process normally. */
11704 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11706 x
= SUBREG_REG (x
);
11708 /* For use in case we fall down into the address adjustments
11709 further below, we need to adjust the known mode and size of
11710 x; imode and isize, since we just adjusted x. */
11711 imode
= GET_MODE (x
);
11713 if (imode
== omode
)
11717 result
= gen_lowpart_common (omode
, x
);
11724 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11726 if (MEM_VOLATILE_P (x
)
11727 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11730 /* If we want to refer to something bigger than the original memref,
11731 generate a paradoxical subreg instead. That will force a reload
11732 of the original memref X. */
11733 if (paradoxical_subreg_p (omode
, imode
))
11734 return gen_rtx_SUBREG (omode
, x
, 0);
11736 poly_int64 offset
= byte_lowpart_offset (omode
, imode
);
11737 return adjust_address_nv (x
, omode
, offset
);
11740 /* If X is a comparison operator, rewrite it in a new mode. This
11741 probably won't match, but may allow further simplifications. */
11742 else if (COMPARISON_P (x
))
11743 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11745 /* If we couldn't simplify X any other way, just enclose it in a
11746 SUBREG. Normally, this SUBREG won't match, but some patterns may
11747 include an explicit SUBREG or we may simplify it further in combine. */
11752 if (imode
== VOIDmode
)
11754 imode
= int_mode_for_mode (omode
).require ();
11755 x
= gen_lowpart_common (imode
, x
);
11759 res
= lowpart_subreg (omode
, x
, imode
);
11765 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11768 /* Try to simplify a comparison between OP0 and a constant OP1,
11769 where CODE is the comparison code that will be tested, into a
11770 (CODE OP0 const0_rtx) form.
11772 The result is a possibly different comparison code to use.
11773 *POP1 may be updated. */
11775 static enum rtx_code
11776 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11777 rtx op0
, rtx
*pop1
)
11779 scalar_int_mode int_mode
;
11780 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11782 /* Get the constant we are comparing against and turn off all bits
11783 not on in our mode. */
11784 if (mode
!= VOIDmode
)
11785 const_op
= trunc_int_for_mode (const_op
, mode
);
11787 /* If we are comparing against a constant power of two and the value
11788 being compared can only have that single bit nonzero (e.g., it was
11789 `and'ed with that bit), we can replace this with a comparison
11792 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11793 || code
== LT
|| code
== LTU
)
11794 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11795 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11796 && pow2p_hwi (const_op
& GET_MODE_MASK (int_mode
))
11797 && (nonzero_bits (op0
, int_mode
)
11798 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (int_mode
))))
11800 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11804 /* Similarly, if we are comparing a value known to be either -1 or
11805 0 with -1, change it to the opposite comparison against zero. */
11807 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11808 || code
== GEU
|| code
== LTU
)
11809 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11810 && num_sign_bit_copies (op0
, int_mode
) == GET_MODE_PRECISION (int_mode
))
11812 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11816 /* Do some canonicalizations based on the comparison code. We prefer
11817 comparisons against zero and then prefer equality comparisons.
11818 If we can reduce the size of a constant, we will do that too. */
11822 /* < C is equivalent to <= (C - 1) */
11827 /* ... fall through to LE case below. */
11828 gcc_fallthrough ();
11834 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11841 /* If we are doing a <= 0 comparison on a value known to have
11842 a zero sign bit, we can replace this with == 0. */
11843 else if (const_op
== 0
11844 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11845 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11846 && (nonzero_bits (op0
, int_mode
)
11847 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11853 /* >= C is equivalent to > (C - 1). */
11858 /* ... fall through to GT below. */
11859 gcc_fallthrough ();
11865 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11872 /* If we are doing a > 0 comparison on a value known to have
11873 a zero sign bit, we can replace this with != 0. */
11874 else if (const_op
== 0
11875 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
11876 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11877 && (nonzero_bits (op0
, int_mode
)
11878 & (HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11884 /* < C is equivalent to <= (C - 1). */
11889 /* ... fall through ... */
11890 gcc_fallthrough ();
11892 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11893 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11894 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11895 && ((unsigned HOST_WIDE_INT
) const_op
11896 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11906 /* unsigned <= 0 is equivalent to == 0 */
11909 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11910 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11911 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11912 && ((unsigned HOST_WIDE_INT
) const_op
11913 == ((HOST_WIDE_INT_1U
11914 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1)))
11922 /* >= C is equivalent to > (C - 1). */
11927 /* ... fall through ... */
11928 gcc_fallthrough ();
11931 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11932 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11933 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11934 && ((unsigned HOST_WIDE_INT
) const_op
11935 == HOST_WIDE_INT_1U
<< (GET_MODE_PRECISION (int_mode
) - 1)))
11945 /* unsigned > 0 is equivalent to != 0 */
11948 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11949 else if (is_a
<scalar_int_mode
> (mode
, &int_mode
)
11950 && GET_MODE_PRECISION (int_mode
) - 1 < HOST_BITS_PER_WIDE_INT
11951 && ((unsigned HOST_WIDE_INT
) const_op
11952 == (HOST_WIDE_INT_1U
11953 << (GET_MODE_PRECISION (int_mode
) - 1)) - 1))
11964 *pop1
= GEN_INT (const_op
);
11968 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11969 comparison code that will be tested.
11971 The result is a possibly different comparison code to use. *POP0 and
11972 *POP1 may be updated.
11974 It is possible that we might detect that a comparison is either always
11975 true or always false. However, we do not perform general constant
11976 folding in combine, so this knowledge isn't useful. Such tautologies
11977 should have been detected earlier. Hence we ignore all such cases. */
11979 static enum rtx_code
11980 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11986 scalar_int_mode mode
, inner_mode
, tmode
;
11987 opt_scalar_int_mode tmode_iter
;
11989 /* Try a few ways of applying the same transformation to both operands. */
11992 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11993 so check specially. */
11994 if (!WORD_REGISTER_OPERATIONS
11995 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11996 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11997 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11998 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11999 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
12000 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
12001 && is_a
<scalar_int_mode
> (GET_MODE (op0
), &mode
)
12002 && (is_a
<scalar_int_mode
>
12003 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))), &inner_mode
))
12004 && inner_mode
== GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0)))
12005 && CONST_INT_P (XEXP (op0
, 1))
12006 && XEXP (op0
, 1) == XEXP (op1
, 1)
12007 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12008 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
12009 && (INTVAL (XEXP (op0
, 1))
12010 == (GET_MODE_PRECISION (mode
)
12011 - GET_MODE_PRECISION (inner_mode
))))
12013 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
12014 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
12017 /* If both operands are the same constant shift, see if we can ignore the
12018 shift. We can if the shift is a rotate or if the bits shifted out of
12019 this shift are known to be zero for both inputs and if the type of
12020 comparison is compatible with the shift. */
12021 if (GET_CODE (op0
) == GET_CODE (op1
)
12022 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
12023 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
12024 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
12025 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
12026 || (GET_CODE (op0
) == ASHIFTRT
12027 && (code
!= GTU
&& code
!= LTU
12028 && code
!= GEU
&& code
!= LEU
)))
12029 && CONST_INT_P (XEXP (op0
, 1))
12030 && INTVAL (XEXP (op0
, 1)) >= 0
12031 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12032 && XEXP (op0
, 1) == XEXP (op1
, 1))
12034 machine_mode mode
= GET_MODE (op0
);
12035 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12036 int shift_count
= INTVAL (XEXP (op0
, 1));
12038 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
12039 mask
&= (mask
>> shift_count
) << shift_count
;
12040 else if (GET_CODE (op0
) == ASHIFT
)
12041 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
12043 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
12044 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
12045 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
12050 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12051 SUBREGs are of the same mode, and, in both cases, the AND would
12052 be redundant if the comparison was done in the narrower mode,
12053 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12054 and the operand's possibly nonzero bits are 0xffffff01; in that case
12055 if we only care about QImode, we don't need the AND). This case
12056 occurs if the output mode of an scc insn is not SImode and
12057 STORE_FLAG_VALUE == 1 (e.g., the 386).
12059 Similarly, check for a case where the AND's are ZERO_EXTEND
12060 operations from some narrower mode even though a SUBREG is not
12063 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
12064 && CONST_INT_P (XEXP (op0
, 1))
12065 && CONST_INT_P (XEXP (op1
, 1)))
12067 rtx inner_op0
= XEXP (op0
, 0);
12068 rtx inner_op1
= XEXP (op1
, 0);
12069 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
12070 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
12073 if (paradoxical_subreg_p (inner_op0
)
12074 && GET_CODE (inner_op1
) == SUBREG
12075 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0
)))
12076 && (GET_MODE (SUBREG_REG (inner_op0
))
12077 == GET_MODE (SUBREG_REG (inner_op1
)))
12078 && ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
12079 GET_MODE (SUBREG_REG (inner_op0
)))) == 0
12080 && ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
12081 GET_MODE (SUBREG_REG (inner_op1
)))) == 0)
12083 op0
= SUBREG_REG (inner_op0
);
12084 op1
= SUBREG_REG (inner_op1
);
12086 /* The resulting comparison is always unsigned since we masked
12087 off the original sign bit. */
12088 code
= unsigned_condition (code
);
12094 FOR_EACH_MODE_UNTIL (tmode
,
12095 as_a
<scalar_int_mode
> (GET_MODE (op0
)))
12096 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
12098 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
12099 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
12100 code
= unsigned_condition (code
);
12109 /* If both operands are NOT, we can strip off the outer operation
12110 and adjust the comparison code for swapped operands; similarly for
12111 NEG, except that this must be an equality comparison. */
12112 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
12113 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
12114 && (code
== EQ
|| code
== NE
)))
12115 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
12121 /* If the first operand is a constant, swap the operands and adjust the
12122 comparison code appropriately, but don't do this if the second operand
12123 is already a constant integer. */
12124 if (swap_commutative_operands_p (op0
, op1
))
12126 std::swap (op0
, op1
);
12127 code
= swap_condition (code
);
12130 /* We now enter a loop during which we will try to simplify the comparison.
12131 For the most part, we only are concerned with comparisons with zero,
12132 but some things may really be comparisons with zero but not start
12133 out looking that way. */
12135 while (CONST_INT_P (op1
))
12137 machine_mode raw_mode
= GET_MODE (op0
);
12138 scalar_int_mode int_mode
;
12139 int equality_comparison_p
;
12140 int sign_bit_comparison_p
;
12141 int unsigned_comparison_p
;
12142 HOST_WIDE_INT const_op
;
12144 /* We only want to handle integral modes. This catches VOIDmode,
12145 CCmode, and the floating-point modes. An exception is that we
12146 can handle VOIDmode if OP0 is a COMPARE or a comparison
12149 if (GET_MODE_CLASS (raw_mode
) != MODE_INT
12150 && ! (raw_mode
== VOIDmode
12151 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
12154 /* Try to simplify the compare to constant, possibly changing the
12155 comparison op, and/or changing op1 to zero. */
12156 code
= simplify_compare_const (code
, raw_mode
, op0
, &op1
);
12157 const_op
= INTVAL (op1
);
12159 /* Compute some predicates to simplify code below. */
12161 equality_comparison_p
= (code
== EQ
|| code
== NE
);
12162 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
12163 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
12166 /* If this is a sign bit comparison and we can do arithmetic in
12167 MODE, say that we will only be needing the sign bit of OP0. */
12168 if (sign_bit_comparison_p
12169 && is_a
<scalar_int_mode
> (raw_mode
, &int_mode
)
12170 && HWI_COMPUTABLE_MODE_P (int_mode
))
12171 op0
= force_to_mode (op0
, int_mode
,
12173 << (GET_MODE_PRECISION (int_mode
) - 1),
12176 if (COMPARISON_P (op0
))
12178 /* We can't do anything if OP0 is a condition code value, rather
12179 than an actual data value. */
12181 || CC0_P (XEXP (op0
, 0))
12182 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12185 /* Get the two operands being compared. */
12186 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12187 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12189 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12191 /* Check for the cases where we simply want the result of the
12192 earlier test or the opposite of that result. */
12193 if (code
== NE
|| code
== EQ
12194 || (val_signbit_known_set_p (raw_mode
, STORE_FLAG_VALUE
)
12195 && (code
== LT
|| code
== GE
)))
12197 enum rtx_code new_code
;
12198 if (code
== LT
|| code
== NE
)
12199 new_code
= GET_CODE (op0
);
12201 new_code
= reversed_comparison_code (op0
, NULL
);
12203 if (new_code
!= UNKNOWN
)
12214 if (raw_mode
== VOIDmode
)
12216 scalar_int_mode mode
= as_a
<scalar_int_mode
> (raw_mode
);
12218 /* Now try cases based on the opcode of OP0. If none of the cases
12219 does a "continue", we exit this loop immediately after the
12222 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
12223 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
12224 switch (GET_CODE (op0
))
12227 /* If we are extracting a single bit from a variable position in
12228 a constant that has only a single bit set and are comparing it
12229 with zero, we can convert this into an equality comparison
12230 between the position and the location of the single bit. */
12231 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12232 have already reduced the shift count modulo the word size. */
12233 if (!SHIFT_COUNT_TRUNCATED
12234 && CONST_INT_P (XEXP (op0
, 0))
12235 && XEXP (op0
, 1) == const1_rtx
12236 && equality_comparison_p
&& const_op
== 0
12237 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
12239 if (BITS_BIG_ENDIAN
)
12240 i
= BITS_PER_WORD
- 1 - i
;
12242 op0
= XEXP (op0
, 2);
12246 /* Result is nonzero iff shift count is equal to I. */
12247 code
= reverse_condition (code
);
12254 tem
= expand_compound_operation (op0
);
12263 /* If testing for equality, we can take the NOT of the constant. */
12264 if (equality_comparison_p
12265 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
12267 op0
= XEXP (op0
, 0);
12272 /* If just looking at the sign bit, reverse the sense of the
12274 if (sign_bit_comparison_p
)
12276 op0
= XEXP (op0
, 0);
12277 code
= (code
== GE
? LT
: GE
);
12283 /* If testing for equality, we can take the NEG of the constant. */
12284 if (equality_comparison_p
12285 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
12287 op0
= XEXP (op0
, 0);
12292 /* The remaining cases only apply to comparisons with zero. */
12296 /* When X is ABS or is known positive,
12297 (neg X) is < 0 if and only if X != 0. */
12299 if (sign_bit_comparison_p
12300 && (GET_CODE (XEXP (op0
, 0)) == ABS
12301 || (mode_width
<= HOST_BITS_PER_WIDE_INT
12302 && (nonzero_bits (XEXP (op0
, 0), mode
)
12303 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12306 op0
= XEXP (op0
, 0);
12307 code
= (code
== LT
? NE
: EQ
);
12311 /* If we have NEG of something whose two high-order bits are the
12312 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12313 if (num_sign_bit_copies (op0
, mode
) >= 2)
12315 op0
= XEXP (op0
, 0);
12316 code
= swap_condition (code
);
12322 /* If we are testing equality and our count is a constant, we
12323 can perform the inverse operation on our RHS. */
12324 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12325 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
12326 op1
, XEXP (op0
, 1))) != 0)
12328 op0
= XEXP (op0
, 0);
12333 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12334 a particular bit. Convert it to an AND of a constant of that
12335 bit. This will be converted into a ZERO_EXTRACT. */
12336 if (const_op
== 0 && sign_bit_comparison_p
12337 && CONST_INT_P (XEXP (op0
, 1))
12338 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12340 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12343 - INTVAL (XEXP (op0
, 1)))));
12344 code
= (code
== LT
? NE
: EQ
);
12348 /* Fall through. */
12351 /* ABS is ignorable inside an equality comparison with zero. */
12352 if (const_op
== 0 && equality_comparison_p
)
12354 op0
= XEXP (op0
, 0);
12360 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12361 (compare FOO CONST) if CONST fits in FOO's mode and we
12362 are either testing inequality or have an unsigned
12363 comparison with ZERO_EXTEND or a signed comparison with
12364 SIGN_EXTEND. But don't do it if we don't have a compare
12365 insn of the given mode, since we'd have to revert it
12366 later on, and then we wouldn't know whether to sign- or
12368 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12369 && ! unsigned_comparison_p
12370 && HWI_COMPUTABLE_MODE_P (mode
)
12371 && trunc_int_for_mode (const_op
, mode
) == const_op
12372 && have_insn_for (COMPARE
, mode
))
12374 op0
= XEXP (op0
, 0);
12380 /* Check for the case where we are comparing A - C1 with C2, that is
12382 (subreg:MODE (plus (A) (-C1))) op (C2)
12384 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12385 comparison in the wider mode. One of the following two conditions
12386 must be true in order for this to be valid:
12388 1. The mode extension results in the same bit pattern being added
12389 on both sides and the comparison is equality or unsigned. As
12390 C2 has been truncated to fit in MODE, the pattern can only be
12393 2. The mode extension results in the sign bit being copied on
12396 The difficulty here is that we have predicates for A but not for
12397 (A - C1) so we need to check that C1 is within proper bounds so
12398 as to perturbate A as little as possible. */
12400 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12401 && subreg_lowpart_p (op0
)
12402 && is_a
<scalar_int_mode
> (GET_MODE (SUBREG_REG (op0
)),
12404 && GET_MODE_PRECISION (inner_mode
) > mode_width
12405 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12406 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12408 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12409 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12412 && (unsigned HOST_WIDE_INT
) c1
12413 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12414 && (equality_comparison_p
|| unsigned_comparison_p
)
12415 /* (A - C1) zero-extends if it is positive and sign-extends
12416 if it is negative, C2 both zero- and sign-extends. */
12417 && (((nonzero_bits (a
, inner_mode
)
12418 & ~GET_MODE_MASK (mode
)) == 0
12420 /* (A - C1) sign-extends if it is positive and 1-extends
12421 if it is negative, C2 both sign- and 1-extends. */
12422 || (num_sign_bit_copies (a
, inner_mode
)
12423 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12426 || ((unsigned HOST_WIDE_INT
) c1
12427 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12428 /* (A - C1) always sign-extends, like C2. */
12429 && num_sign_bit_copies (a
, inner_mode
)
12430 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12431 - (mode_width
- 1))))
12433 op0
= SUBREG_REG (op0
);
12438 /* If the inner mode is narrower and we are extracting the low part,
12439 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12440 if (paradoxical_subreg_p (op0
))
12442 else if (subreg_lowpart_p (op0
)
12443 && GET_MODE_CLASS (mode
) == MODE_INT
12444 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12445 && (code
== NE
|| code
== EQ
)
12446 && GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12447 && !paradoxical_subreg_p (op0
)
12448 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12449 & ~GET_MODE_MASK (mode
)) == 0)
12451 /* Remove outer subregs that don't do anything. */
12452 tem
= gen_lowpart (inner_mode
, op1
);
12454 if ((nonzero_bits (tem
, inner_mode
)
12455 & ~GET_MODE_MASK (mode
)) == 0)
12457 op0
= SUBREG_REG (op0
);
12469 if (is_int_mode (GET_MODE (XEXP (op0
, 0)), &mode
)
12470 && (unsigned_comparison_p
|| equality_comparison_p
)
12471 && HWI_COMPUTABLE_MODE_P (mode
)
12472 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12474 && have_insn_for (COMPARE
, mode
))
12476 op0
= XEXP (op0
, 0);
12482 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12483 this for equality comparisons due to pathological cases involving
12485 if (equality_comparison_p
12486 && (tem
= simplify_binary_operation (MINUS
, mode
,
12487 op1
, XEXP (op0
, 1))) != 0)
12489 op0
= XEXP (op0
, 0);
12494 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12495 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12496 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12498 op0
= XEXP (XEXP (op0
, 0), 0);
12499 code
= (code
== LT
? EQ
: NE
);
12505 /* We used to optimize signed comparisons against zero, but that
12506 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12507 arrive here as equality comparisons, or (GEU, LTU) are
12508 optimized away. No need to special-case them. */
12510 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12511 (eq B (minus A C)), whichever simplifies. We can only do
12512 this for equality comparisons due to pathological cases involving
12514 if (equality_comparison_p
12515 && (tem
= simplify_binary_operation (PLUS
, mode
,
12516 XEXP (op0
, 1), op1
)) != 0)
12518 op0
= XEXP (op0
, 0);
12523 if (equality_comparison_p
12524 && (tem
= simplify_binary_operation (MINUS
, mode
,
12525 XEXP (op0
, 0), op1
)) != 0)
12527 op0
= XEXP (op0
, 1);
12532 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12533 of bits in X minus 1, is one iff X > 0. */
12534 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12535 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12536 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12537 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12539 op0
= XEXP (op0
, 1);
12540 code
= (code
== GE
? LE
: GT
);
12546 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12547 if C is zero or B is a constant. */
12548 if (equality_comparison_p
12549 && (tem
= simplify_binary_operation (XOR
, mode
,
12550 XEXP (op0
, 1), op1
)) != 0)
12552 op0
= XEXP (op0
, 0);
12560 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12562 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12563 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12564 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12566 op0
= XEXP (op0
, 1);
12567 code
= (code
== GE
? GT
: LE
);
12573 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12574 will be converted to a ZERO_EXTRACT later. */
12575 if (const_op
== 0 && equality_comparison_p
12576 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12577 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12579 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12580 XEXP (XEXP (op0
, 0), 1));
12581 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12585 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12586 zero and X is a comparison and C1 and C2 describe only bits set
12587 in STORE_FLAG_VALUE, we can compare with X. */
12588 if (const_op
== 0 && equality_comparison_p
12589 && mode_width
<= HOST_BITS_PER_WIDE_INT
12590 && CONST_INT_P (XEXP (op0
, 1))
12591 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12592 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12593 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12594 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12596 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12597 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12598 if ((~STORE_FLAG_VALUE
& mask
) == 0
12599 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12600 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12601 && COMPARISON_P (tem
))))
12603 op0
= XEXP (XEXP (op0
, 0), 0);
12608 /* If we are doing an equality comparison of an AND of a bit equal
12609 to the sign bit, replace this with a LT or GE comparison of
12610 the underlying value. */
12611 if (equality_comparison_p
12613 && CONST_INT_P (XEXP (op0
, 1))
12614 && mode_width
<= HOST_BITS_PER_WIDE_INT
12615 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12616 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12618 op0
= XEXP (op0
, 0);
12619 code
= (code
== EQ
? GE
: LT
);
12623 /* If this AND operation is really a ZERO_EXTEND from a narrower
12624 mode, the constant fits within that mode, and this is either an
12625 equality or unsigned comparison, try to do this comparison in
12630 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12631 -> (ne:DI (reg:SI 4) (const_int 0))
12633 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12634 known to hold a value of the required mode the
12635 transformation is invalid. */
12636 if ((equality_comparison_p
|| unsigned_comparison_p
)
12637 && CONST_INT_P (XEXP (op0
, 1))
12638 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12639 & GET_MODE_MASK (mode
))
12641 && const_op
>> i
== 0
12642 && int_mode_for_size (i
, 1).exists (&tmode
))
12644 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12648 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12649 fits in both M1 and M2 and the SUBREG is either paradoxical
12650 or represents the low part, permute the SUBREG and the AND
12652 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12653 && CONST_INT_P (XEXP (op0
, 1)))
12655 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12656 /* Require an integral mode, to avoid creating something like
12658 if ((is_a
<scalar_int_mode
>
12659 (GET_MODE (SUBREG_REG (XEXP (op0
, 0))), &tmode
))
12660 /* It is unsafe to commute the AND into the SUBREG if the
12661 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12662 not defined. As originally written the upper bits
12663 have a defined value due to the AND operation.
12664 However, if we commute the AND inside the SUBREG then
12665 they no longer have defined values and the meaning of
12666 the code has been changed.
12667 Also C1 should not change value in the smaller mode,
12668 see PR67028 (a positive C1 can become negative in the
12669 smaller mode, so that the AND does no longer mask the
12671 && ((WORD_REGISTER_OPERATIONS
12672 && mode_width
> GET_MODE_PRECISION (tmode
)
12673 && mode_width
<= BITS_PER_WORD
12674 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12675 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12676 && subreg_lowpart_p (XEXP (op0
, 0))))
12677 && mode_width
<= HOST_BITS_PER_WIDE_INT
12678 && HWI_COMPUTABLE_MODE_P (tmode
)
12679 && (c1
& ~mask
) == 0
12680 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12682 && c1
!= GET_MODE_MASK (tmode
))
12684 op0
= simplify_gen_binary (AND
, tmode
,
12685 SUBREG_REG (XEXP (op0
, 0)),
12686 gen_int_mode (c1
, tmode
));
12687 op0
= gen_lowpart (mode
, op0
);
12692 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12693 if (const_op
== 0 && equality_comparison_p
12694 && XEXP (op0
, 1) == const1_rtx
12695 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12697 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12698 XEXP (XEXP (op0
, 0), 0), 1);
12699 code
= (code
== NE
? EQ
: NE
);
12703 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12704 (eq (and (lshiftrt X) 1) 0).
12705 Also handle the case where (not X) is expressed using xor. */
12706 if (const_op
== 0 && equality_comparison_p
12707 && XEXP (op0
, 1) == const1_rtx
12708 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12710 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12711 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12713 if (GET_CODE (shift_op
) == NOT
12714 || (GET_CODE (shift_op
) == XOR
12715 && CONST_INT_P (XEXP (shift_op
, 1))
12716 && CONST_INT_P (shift_count
)
12717 && HWI_COMPUTABLE_MODE_P (mode
)
12718 && (UINTVAL (XEXP (shift_op
, 1))
12719 == HOST_WIDE_INT_1U
12720 << INTVAL (shift_count
))))
12723 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12724 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12725 code
= (code
== NE
? EQ
: NE
);
12732 /* If we have (compare (ashift FOO N) (const_int C)) and
12733 the high order N bits of FOO (N+1 if an inequality comparison)
12734 are known to be zero, we can do this by comparing FOO with C
12735 shifted right N bits so long as the low-order N bits of C are
12737 if (CONST_INT_P (XEXP (op0
, 1))
12738 && INTVAL (XEXP (op0
, 1)) >= 0
12739 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12740 < HOST_BITS_PER_WIDE_INT
)
12741 && (((unsigned HOST_WIDE_INT
) const_op
12742 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12744 && mode_width
<= HOST_BITS_PER_WIDE_INT
12745 && (nonzero_bits (XEXP (op0
, 0), mode
)
12746 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12747 + ! equality_comparison_p
))) == 0)
12749 /* We must perform a logical shift, not an arithmetic one,
12750 as we want the top N bits of C to be zero. */
12751 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12753 temp
>>= INTVAL (XEXP (op0
, 1));
12754 op1
= gen_int_mode (temp
, mode
);
12755 op0
= XEXP (op0
, 0);
12759 /* If we are doing a sign bit comparison, it means we are testing
12760 a particular bit. Convert it to the appropriate AND. */
12761 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12762 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12764 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12767 - INTVAL (XEXP (op0
, 1)))));
12768 code
= (code
== LT
? NE
: EQ
);
12772 /* If this an equality comparison with zero and we are shifting
12773 the low bit to the sign bit, we can convert this to an AND of the
12775 if (const_op
== 0 && equality_comparison_p
12776 && CONST_INT_P (XEXP (op0
, 1))
12777 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12779 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12785 /* If this is an equality comparison with zero, we can do this
12786 as a logical shift, which might be much simpler. */
12787 if (equality_comparison_p
&& const_op
== 0
12788 && CONST_INT_P (XEXP (op0
, 1)))
12790 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12792 INTVAL (XEXP (op0
, 1)));
12796 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12797 do the comparison in a narrower mode. */
12798 if (! unsigned_comparison_p
12799 && CONST_INT_P (XEXP (op0
, 1))
12800 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12801 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12802 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12804 && (((unsigned HOST_WIDE_INT
) const_op
12805 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12806 <= GET_MODE_MASK (tmode
)))
12808 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12812 /* Likewise if OP0 is a PLUS of a sign extension with a
12813 constant, which is usually represented with the PLUS
12814 between the shifts. */
12815 if (! unsigned_comparison_p
12816 && CONST_INT_P (XEXP (op0
, 1))
12817 && GET_CODE (XEXP (op0
, 0)) == PLUS
12818 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12819 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12820 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12821 && (int_mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)), 1)
12823 && (((unsigned HOST_WIDE_INT
) const_op
12824 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12825 <= GET_MODE_MASK (tmode
)))
12827 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12828 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12829 rtx new_const
= simplify_gen_binary (ASHIFTRT
, mode
,
12830 add_const
, XEXP (op0
, 1));
12832 op0
= simplify_gen_binary (PLUS
, tmode
,
12833 gen_lowpart (tmode
, inner
),
12840 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12841 the low order N bits of FOO are known to be zero, we can do this
12842 by comparing FOO with C shifted left N bits so long as no
12843 overflow occurs. Even if the low order N bits of FOO aren't known
12844 to be zero, if the comparison is >= or < we can use the same
12845 optimization and for > or <= by setting all the low
12846 order N bits in the comparison constant. */
12847 if (CONST_INT_P (XEXP (op0
, 1))
12848 && INTVAL (XEXP (op0
, 1)) > 0
12849 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12850 && mode_width
<= HOST_BITS_PER_WIDE_INT
12851 && (((unsigned HOST_WIDE_INT
) const_op
12852 + (GET_CODE (op0
) != LSHIFTRT
12853 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12856 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12858 unsigned HOST_WIDE_INT low_bits
12859 = (nonzero_bits (XEXP (op0
, 0), mode
)
12860 & ((HOST_WIDE_INT_1U
12861 << INTVAL (XEXP (op0
, 1))) - 1));
12862 if (low_bits
== 0 || !equality_comparison_p
)
12864 /* If the shift was logical, then we must make the condition
12866 if (GET_CODE (op0
) == LSHIFTRT
)
12867 code
= unsigned_condition (code
);
12869 const_op
= (unsigned HOST_WIDE_INT
) const_op
12870 << INTVAL (XEXP (op0
, 1));
12872 && (code
== GT
|| code
== GTU
12873 || code
== LE
|| code
== LEU
))
12875 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12876 op1
= GEN_INT (const_op
);
12877 op0
= XEXP (op0
, 0);
12882 /* If we are using this shift to extract just the sign bit, we
12883 can replace this with an LT or GE comparison. */
12885 && (equality_comparison_p
|| sign_bit_comparison_p
)
12886 && CONST_INT_P (XEXP (op0
, 1))
12887 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12889 op0
= XEXP (op0
, 0);
12890 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12902 /* Now make any compound operations involved in this comparison. Then,
12903 check for an outmost SUBREG on OP0 that is not doing anything or is
12904 paradoxical. The latter transformation must only be performed when
12905 it is known that the "extra" bits will be the same in op0 and op1 or
12906 that they don't matter. There are three cases to consider:
12908 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12909 care bits and we can assume they have any convenient value. So
12910 making the transformation is safe.
12912 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12913 In this case the upper bits of op0 are undefined. We should not make
12914 the simplification in that case as we do not know the contents of
12917 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12918 In that case we know those bits are zeros or ones. We must also be
12919 sure that they are the same as the upper bits of op1.
12921 We can never remove a SUBREG for a non-equality comparison because
12922 the sign bit is in a different place in the underlying object. */
12924 rtx_code op0_mco_code
= SET
;
12925 if (op1
== const0_rtx
)
12926 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12928 op0
= make_compound_operation (op0
, op0_mco_code
);
12929 op1
= make_compound_operation (op1
, SET
);
12931 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12932 && is_int_mode (GET_MODE (op0
), &mode
)
12933 && is_int_mode (GET_MODE (SUBREG_REG (op0
)), &inner_mode
)
12934 && (code
== NE
|| code
== EQ
))
12936 if (paradoxical_subreg_p (op0
))
12938 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12940 if (REG_P (SUBREG_REG (op0
)))
12942 op0
= SUBREG_REG (op0
);
12943 op1
= gen_lowpart (inner_mode
, op1
);
12946 else if (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
12947 && (nonzero_bits (SUBREG_REG (op0
), inner_mode
)
12948 & ~GET_MODE_MASK (mode
)) == 0)
12950 tem
= gen_lowpart (inner_mode
, op1
);
12952 if ((nonzero_bits (tem
, inner_mode
) & ~GET_MODE_MASK (mode
)) == 0)
12953 op0
= SUBREG_REG (op0
), op1
= tem
;
12957 /* We now do the opposite procedure: Some machines don't have compare
12958 insns in all modes. If OP0's mode is an integer mode smaller than a
12959 word and we can't do a compare in that mode, see if there is a larger
12960 mode for which we can do the compare. There are a number of cases in
12961 which we can use the wider mode. */
12963 if (is_int_mode (GET_MODE (op0
), &mode
)
12964 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12965 && ! have_insn_for (COMPARE
, mode
))
12966 FOR_EACH_WIDER_MODE (tmode_iter
, mode
)
12968 tmode
= tmode_iter
.require ();
12969 if (!HWI_COMPUTABLE_MODE_P (tmode
))
12971 if (have_insn_for (COMPARE
, tmode
))
12975 /* If this is a test for negative, we can make an explicit
12976 test of the sign bit. Test this first so we can use
12977 a paradoxical subreg to extend OP0. */
12979 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12980 && HWI_COMPUTABLE_MODE_P (mode
))
12982 unsigned HOST_WIDE_INT sign
12983 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12984 op0
= simplify_gen_binary (AND
, tmode
,
12985 gen_lowpart (tmode
, op0
),
12986 gen_int_mode (sign
, tmode
));
12987 code
= (code
== LT
) ? NE
: EQ
;
12991 /* If the only nonzero bits in OP0 and OP1 are those in the
12992 narrower mode and this is an equality or unsigned comparison,
12993 we can use the wider mode. Similarly for sign-extended
12994 values, in which case it is true for all comparisons. */
12995 zero_extended
= ((code
== EQ
|| code
== NE
12996 || code
== GEU
|| code
== GTU
12997 || code
== LEU
|| code
== LTU
)
12998 && (nonzero_bits (op0
, tmode
)
12999 & ~GET_MODE_MASK (mode
)) == 0
13000 && ((CONST_INT_P (op1
)
13001 || (nonzero_bits (op1
, tmode
)
13002 & ~GET_MODE_MASK (mode
)) == 0)));
13005 || ((num_sign_bit_copies (op0
, tmode
)
13006 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13007 - GET_MODE_PRECISION (mode
)))
13008 && (num_sign_bit_copies (op1
, tmode
)
13009 > (unsigned int) (GET_MODE_PRECISION (tmode
)
13010 - GET_MODE_PRECISION (mode
)))))
13012 /* If OP0 is an AND and we don't have an AND in MODE either,
13013 make a new AND in the proper mode. */
13014 if (GET_CODE (op0
) == AND
13015 && !have_insn_for (AND
, mode
))
13016 op0
= simplify_gen_binary (AND
, tmode
,
13017 gen_lowpart (tmode
,
13019 gen_lowpart (tmode
,
13025 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13027 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
,
13032 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13034 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
,
13043 /* We may have changed the comparison operands. Re-canonicalize. */
13044 if (swap_commutative_operands_p (op0
, op1
))
13046 std::swap (op0
, op1
);
13047 code
= swap_condition (code
);
13050 /* If this machine only supports a subset of valid comparisons, see if we
13051 can convert an unsupported one into a supported one. */
13052 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
13060 /* Utility function for record_value_for_reg. Count number of
13065 enum rtx_code code
= GET_CODE (x
);
13069 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
13070 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
13072 rtx x0
= XEXP (x
, 0);
13073 rtx x1
= XEXP (x
, 1);
13076 return 1 + 2 * count_rtxs (x0
);
13078 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
13079 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
13080 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13081 return 2 + 2 * count_rtxs (x0
)
13082 + count_rtxs (x
== XEXP (x1
, 0)
13083 ? XEXP (x1
, 1) : XEXP (x1
, 0));
13085 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
13086 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
13087 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13088 return 2 + 2 * count_rtxs (x1
)
13089 + count_rtxs (x
== XEXP (x0
, 0)
13090 ? XEXP (x0
, 1) : XEXP (x0
, 0));
13093 fmt
= GET_RTX_FORMAT (code
);
13094 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13096 ret
+= count_rtxs (XEXP (x
, i
));
13097 else if (fmt
[i
] == 'E')
13098 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13099 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
13104 /* Utility function for following routine. Called when X is part of a value
13105 being stored into last_set_value. Sets last_set_table_tick
13106 for each register mentioned. Similar to mention_regs in cse.c */
13109 update_table_tick (rtx x
)
13111 enum rtx_code code
= GET_CODE (x
);
13112 const char *fmt
= GET_RTX_FORMAT (code
);
13117 unsigned int regno
= REGNO (x
);
13118 unsigned int endregno
= END_REGNO (x
);
13121 for (r
= regno
; r
< endregno
; r
++)
13123 reg_stat_type
*rsp
= ®_stat
[r
];
13124 rsp
->last_set_table_tick
= label_tick
;
13130 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13133 /* Check for identical subexpressions. If x contains
13134 identical subexpression we only have to traverse one of
13136 if (i
== 0 && ARITHMETIC_P (x
))
13138 /* Note that at this point x1 has already been
13140 rtx x0
= XEXP (x
, 0);
13141 rtx x1
= XEXP (x
, 1);
13143 /* If x0 and x1 are identical then there is no need to
13148 /* If x0 is identical to a subexpression of x1 then while
13149 processing x1, x0 has already been processed. Thus we
13150 are done with x. */
13151 if (ARITHMETIC_P (x1
)
13152 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13155 /* If x1 is identical to a subexpression of x0 then we
13156 still have to process the rest of x0. */
13157 if (ARITHMETIC_P (x0
)
13158 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13160 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
13165 update_table_tick (XEXP (x
, i
));
13167 else if (fmt
[i
] == 'E')
13168 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13169 update_table_tick (XVECEXP (x
, i
, j
));
13172 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13173 are saying that the register is clobbered and we no longer know its
13174 value. If INSN is zero, don't update reg_stat[].last_set; this is
13175 only permitted with VALUE also zero and is used to invalidate the
13179 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
13181 unsigned int regno
= REGNO (reg
);
13182 unsigned int endregno
= END_REGNO (reg
);
13184 reg_stat_type
*rsp
;
13186 /* If VALUE contains REG and we have a previous value for REG, substitute
13187 the previous value. */
13188 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
13192 /* Set things up so get_last_value is allowed to see anything set up to
13194 subst_low_luid
= DF_INSN_LUID (insn
);
13195 tem
= get_last_value (reg
);
13197 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13198 it isn't going to be useful and will take a lot of time to process,
13199 so just use the CLOBBER. */
13203 if (ARITHMETIC_P (tem
)
13204 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
13205 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
13206 tem
= XEXP (tem
, 0);
13207 else if (count_occurrences (value
, reg
, 1) >= 2)
13209 /* If there are two or more occurrences of REG in VALUE,
13210 prevent the value from growing too much. */
13211 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
13212 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
13215 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
13219 /* For each register modified, show we don't know its value, that
13220 we don't know about its bitwise content, that its value has been
13221 updated, and that we don't know the location of the death of the
13223 for (i
= regno
; i
< endregno
; i
++)
13225 rsp
= ®_stat
[i
];
13228 rsp
->last_set
= insn
;
13230 rsp
->last_set_value
= 0;
13231 rsp
->last_set_mode
= VOIDmode
;
13232 rsp
->last_set_nonzero_bits
= 0;
13233 rsp
->last_set_sign_bit_copies
= 0;
13234 rsp
->last_death
= 0;
13235 rsp
->truncated_to_mode
= VOIDmode
;
13238 /* Mark registers that are being referenced in this value. */
13240 update_table_tick (value
);
13242 /* Now update the status of each register being set.
13243 If someone is using this register in this block, set this register
13244 to invalid since we will get confused between the two lives in this
13245 basic block. This makes using this register always invalid. In cse, we
13246 scan the table to invalidate all entries using this register, but this
13247 is too much work for us. */
13249 for (i
= regno
; i
< endregno
; i
++)
13251 rsp
= ®_stat
[i
];
13252 rsp
->last_set_label
= label_tick
;
13254 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
13255 rsp
->last_set_invalid
= 1;
13257 rsp
->last_set_invalid
= 0;
13260 /* The value being assigned might refer to X (like in "x++;"). In that
13261 case, we must replace it with (clobber (const_int 0)) to prevent
13263 rsp
= ®_stat
[regno
];
13264 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
13266 value
= copy_rtx (value
);
13267 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
13271 /* For the main register being modified, update the value, the mode, the
13272 nonzero bits, and the number of sign bit copies. */
13274 rsp
->last_set_value
= value
;
13278 machine_mode mode
= GET_MODE (reg
);
13279 subst_low_luid
= DF_INSN_LUID (insn
);
13280 rsp
->last_set_mode
= mode
;
13281 if (GET_MODE_CLASS (mode
) == MODE_INT
13282 && HWI_COMPUTABLE_MODE_P (mode
))
13283 mode
= nonzero_bits_mode
;
13284 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
13285 rsp
->last_set_sign_bit_copies
13286 = num_sign_bit_copies (value
, GET_MODE (reg
));
13290 /* Called via note_stores from record_dead_and_set_regs to handle one
13291 SET or CLOBBER in an insn. DATA is the instruction in which the
13292 set is occurring. */
13295 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
13297 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
13299 if (GET_CODE (dest
) == SUBREG
)
13300 dest
= SUBREG_REG (dest
);
13302 if (!record_dead_insn
)
13305 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13311 /* If we are setting the whole register, we know its value. Otherwise
13312 show that we don't know the value. We can handle a SUBREG if it's
13313 the low part, but we must be careful with paradoxical SUBREGs on
13314 RISC architectures because we cannot strip e.g. an extension around
13315 a load and record the naked load since the RTL middle-end considers
13316 that the upper bits are defined according to LOAD_EXTEND_OP. */
13317 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13318 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13319 else if (GET_CODE (setter
) == SET
13320 && GET_CODE (SET_DEST (setter
)) == SUBREG
13321 && SUBREG_REG (SET_DEST (setter
)) == dest
13322 && known_le (GET_MODE_PRECISION (GET_MODE (dest
)),
13324 && subreg_lowpart_p (SET_DEST (setter
)))
13325 record_value_for_reg (dest
, record_dead_insn
,
13326 WORD_REGISTER_OPERATIONS
13327 && paradoxical_subreg_p (SET_DEST (setter
))
13329 : gen_lowpart (GET_MODE (dest
),
13330 SET_SRC (setter
)));
13331 else if (GET_CODE (setter
) == CLOBBER_HIGH
)
13333 reg_stat_type
*rsp
= ®_stat
[REGNO (dest
)];
13334 if (rsp
->last_set_value
13335 && reg_is_clobbered_by_clobber_high
13336 (REGNO (dest
), GET_MODE (rsp
->last_set_value
),
13338 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13341 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13343 else if (MEM_P (dest
)
13344 /* Ignore pushes, they clobber nothing. */
13345 && ! push_operand (dest
, GET_MODE (dest
)))
13346 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13349 /* Update the records of when each REG was most recently set or killed
13350 for the things done by INSN. This is the last thing done in processing
13351 INSN in the combiner loop.
13353 We update reg_stat[], in particular fields last_set, last_set_value,
13354 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13355 last_death, and also the similar information mem_last_set (which insn
13356 most recently modified memory) and last_call_luid (which insn was the
13357 most recent subroutine call). */
13360 record_dead_and_set_regs (rtx_insn
*insn
)
13365 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13367 if (REG_NOTE_KIND (link
) == REG_DEAD
13368 && REG_P (XEXP (link
, 0)))
13370 unsigned int regno
= REGNO (XEXP (link
, 0));
13371 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13373 for (i
= regno
; i
< endregno
; i
++)
13375 reg_stat_type
*rsp
;
13377 rsp
= ®_stat
[i
];
13378 rsp
->last_death
= insn
;
13381 else if (REG_NOTE_KIND (link
) == REG_INC
)
13382 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13387 hard_reg_set_iterator hrsi
;
13388 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13390 reg_stat_type
*rsp
;
13392 rsp
= ®_stat
[i
];
13393 rsp
->last_set_invalid
= 1;
13394 rsp
->last_set
= insn
;
13395 rsp
->last_set_value
= 0;
13396 rsp
->last_set_mode
= VOIDmode
;
13397 rsp
->last_set_nonzero_bits
= 0;
13398 rsp
->last_set_sign_bit_copies
= 0;
13399 rsp
->last_death
= 0;
13400 rsp
->truncated_to_mode
= VOIDmode
;
13403 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13405 /* We can't combine into a call pattern. Remember, though, that
13406 the return value register is set at this LUID. We could
13407 still replace a register with the return value from the
13408 wrong subroutine call! */
13409 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13412 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13415 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13416 register present in the SUBREG, so for each such SUBREG go back and
13417 adjust nonzero and sign bit information of the registers that are
13418 known to have some zero/sign bits set.
13420 This is needed because when combine blows the SUBREGs away, the
13421 information on zero/sign bits is lost and further combines can be
13422 missed because of that. */
13425 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13427 struct insn_link
*links
;
13429 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13430 machine_mode mode
= GET_MODE (subreg
);
13432 if (!HWI_COMPUTABLE_MODE_P (mode
))
13435 for (links
= LOG_LINKS (insn
); links
;)
13437 reg_stat_type
*rsp
;
13439 insn
= links
->insn
;
13440 set
= single_set (insn
);
13442 if (! set
|| !REG_P (SET_DEST (set
))
13443 || REGNO (SET_DEST (set
)) != regno
13444 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13446 links
= links
->next
;
13450 rsp
= ®_stat
[regno
];
13451 if (rsp
->last_set
== insn
)
13453 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13454 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13457 if (REG_P (SET_SRC (set
)))
13459 regno
= REGNO (SET_SRC (set
));
13460 links
= LOG_LINKS (insn
);
13467 /* Check if X, a register, is known to contain a value already
13468 truncated to MODE. In this case we can use a subreg to refer to
13469 the truncated value even though in the generic case we would need
13470 an explicit truncation. */
13473 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13475 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13476 machine_mode truncated
= rsp
->truncated_to_mode
;
13479 || rsp
->truncation_label
< label_tick_ebb_start
)
13481 if (!partial_subreg_p (mode
, truncated
))
13483 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13488 /* If X is a hard reg or a subreg record the mode that the register is
13489 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13490 able to turn a truncate into a subreg using this information. Return true
13491 if traversing X is complete. */
13494 record_truncated_value (rtx x
)
13496 machine_mode truncated_mode
;
13497 reg_stat_type
*rsp
;
13499 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13501 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13502 truncated_mode
= GET_MODE (x
);
13504 if (!partial_subreg_p (truncated_mode
, original_mode
))
13507 truncated_mode
= GET_MODE (x
);
13508 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13511 x
= SUBREG_REG (x
);
13513 /* ??? For hard-regs we now record everything. We might be able to
13514 optimize this using last_set_mode. */
13515 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13516 truncated_mode
= GET_MODE (x
);
13520 rsp
= ®_stat
[REGNO (x
)];
13521 if (rsp
->truncated_to_mode
== 0
13522 || rsp
->truncation_label
< label_tick_ebb_start
13523 || partial_subreg_p (truncated_mode
, rsp
->truncated_to_mode
))
13525 rsp
->truncated_to_mode
= truncated_mode
;
13526 rsp
->truncation_label
= label_tick
;
13532 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13533 the modes they are used in. This can help truning TRUNCATEs into
13537 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13539 subrtx_var_iterator::array_type array
;
13540 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13541 if (record_truncated_value (*iter
))
13542 iter
.skip_subrtxes ();
13545 /* Scan X for promoted SUBREGs. For each one found,
13546 note what it implies to the registers used in it. */
13549 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13551 if (GET_CODE (x
) == SUBREG
13552 && SUBREG_PROMOTED_VAR_P (x
)
13553 && REG_P (SUBREG_REG (x
)))
13554 record_promoted_value (insn
, x
);
13557 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13560 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13564 check_promoted_subreg (insn
, XEXP (x
, i
));
13568 if (XVEC (x
, i
) != 0)
13569 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13570 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13576 /* Verify that all the registers and memory references mentioned in *LOC are
13577 still valid. *LOC was part of a value set in INSN when label_tick was
13578 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13579 the invalid references with (clobber (const_int 0)) and return 1. This
13580 replacement is useful because we often can get useful information about
13581 the form of a value (e.g., if it was produced by a shift that always
13582 produces -1 or 0) even though we don't know exactly what registers it
13583 was produced from. */
13586 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13589 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13590 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13595 unsigned int regno
= REGNO (x
);
13596 unsigned int endregno
= END_REGNO (x
);
13599 for (j
= regno
; j
< endregno
; j
++)
13601 reg_stat_type
*rsp
= ®_stat
[j
];
13602 if (rsp
->last_set_invalid
13603 /* If this is a pseudo-register that was only set once and not
13604 live at the beginning of the function, it is always valid. */
13605 || (! (regno
>= FIRST_PSEUDO_REGISTER
13606 && regno
< reg_n_sets_max
13607 && REG_N_SETS (regno
) == 1
13608 && (!REGNO_REG_SET_P
13609 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13611 && rsp
->last_set_label
> tick
))
13614 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13621 /* If this is a memory reference, make sure that there were no stores after
13622 it that might have clobbered the value. We don't have alias info, so we
13623 assume any store invalidates it. Moreover, we only have local UIDs, so
13624 we also assume that there were stores in the intervening basic blocks. */
13625 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13626 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13629 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13633 for (i
= 0; i
< len
; i
++)
13637 /* Check for identical subexpressions. If x contains
13638 identical subexpression we only have to traverse one of
13640 if (i
== 1 && ARITHMETIC_P (x
))
13642 /* Note that at this point x0 has already been checked
13643 and found valid. */
13644 rtx x0
= XEXP (x
, 0);
13645 rtx x1
= XEXP (x
, 1);
13647 /* If x0 and x1 are identical then x is also valid. */
13651 /* If x1 is identical to a subexpression of x0 then
13652 while checking x0, x1 has already been checked. Thus
13653 it is valid and so as x. */
13654 if (ARITHMETIC_P (x0
)
13655 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13658 /* If x0 is identical to a subexpression of x1 then x is
13659 valid iff the rest of x1 is valid. */
13660 if (ARITHMETIC_P (x1
)
13661 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13663 get_last_value_validate (&XEXP (x1
,
13664 x0
== XEXP (x1
, 0) ? 1 : 0),
13665 insn
, tick
, replace
);
13668 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13672 else if (fmt
[i
] == 'E')
13673 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13674 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13675 insn
, tick
, replace
) == 0)
13679 /* If we haven't found a reason for it to be invalid, it is valid. */
13683 /* Get the last value assigned to X, if known. Some registers
13684 in the value may be replaced with (clobber (const_int 0)) if their value
13685 is known longer known reliably. */
13688 get_last_value (const_rtx x
)
13690 unsigned int regno
;
13692 reg_stat_type
*rsp
;
13694 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13695 then convert it to the desired mode. If this is a paradoxical SUBREG,
13696 we cannot predict what values the "extra" bits might have. */
13697 if (GET_CODE (x
) == SUBREG
13698 && subreg_lowpart_p (x
)
13699 && !paradoxical_subreg_p (x
)
13700 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13701 return gen_lowpart (GET_MODE (x
), value
);
13707 rsp
= ®_stat
[regno
];
13708 value
= rsp
->last_set_value
;
13710 /* If we don't have a value, or if it isn't for this basic block and
13711 it's either a hard register, set more than once, or it's a live
13712 at the beginning of the function, return 0.
13714 Because if it's not live at the beginning of the function then the reg
13715 is always set before being used (is never used without being set).
13716 And, if it's set only once, and it's always set before use, then all
13717 uses must have the same last value, even if it's not from this basic
13721 || (rsp
->last_set_label
< label_tick_ebb_start
13722 && (regno
< FIRST_PSEUDO_REGISTER
13723 || regno
>= reg_n_sets_max
13724 || REG_N_SETS (regno
) != 1
13726 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13729 /* If the value was set in a later insn than the ones we are processing,
13730 we can't use it even if the register was only set once. */
13731 if (rsp
->last_set_label
== label_tick
13732 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13735 /* If fewer bits were set than what we are asked for now, we cannot use
13737 if (maybe_lt (GET_MODE_PRECISION (rsp
->last_set_mode
),
13738 GET_MODE_PRECISION (GET_MODE (x
))))
13741 /* If the value has all its registers valid, return it. */
13742 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13745 /* Otherwise, make a copy and replace any invalid register with
13746 (clobber (const_int 0)). If that fails for some reason, return 0. */
13748 value
= copy_rtx (value
);
13749 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13755 /* Define three variables used for communication between the following
13758 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13759 static int reg_dead_flag
;
13762 /* Function called via note_stores from reg_dead_at_p.
13764 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13765 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13768 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13770 unsigned int regno
, endregno
;
13775 if (GET_CODE (x
) == CLOBBER_HIGH
13776 && !reg_is_clobbered_by_clobber_high (reg_dead_reg
, XEXP (x
, 0)))
13779 regno
= REGNO (dest
);
13780 endregno
= END_REGNO (dest
);
13781 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13782 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13785 /* Return nonzero if REG is known to be dead at INSN.
13787 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13788 referencing REG, it is dead. If we hit a SET referencing REG, it is
13789 live. Otherwise, see if it is live or dead at the start of the basic
13790 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13791 must be assumed to be always live. */
13794 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13799 /* Set variables for reg_dead_at_p_1. */
13800 reg_dead_regno
= REGNO (reg
);
13801 reg_dead_endregno
= END_REGNO (reg
);
13802 reg_dead_reg
= reg
;
13806 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13807 we allow the machine description to decide whether use-and-clobber
13808 patterns are OK. */
13809 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13811 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13812 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13816 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13817 beginning of basic block. */
13818 block
= BLOCK_FOR_INSN (insn
);
13823 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13826 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13828 return reg_dead_flag
== 1 ? 1 : 0;
13830 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13834 if (insn
== BB_HEAD (block
))
13837 insn
= PREV_INSN (insn
);
13840 /* Look at live-in sets for the basic block that we were in. */
13841 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13842 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13848 /* Note hard registers in X that are used. */
13851 mark_used_regs_combine (rtx x
)
13853 RTX_CODE code
= GET_CODE (x
);
13854 unsigned int regno
;
13865 case ADDR_DIFF_VEC
:
13867 /* CC0 must die in the insn after it is set, so we don't need to take
13868 special note of it here. */
13873 /* If we are clobbering a MEM, mark any hard registers inside the
13874 address as used. */
13875 if (MEM_P (XEXP (x
, 0)))
13876 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13881 /* A hard reg in a wide mode may really be multiple registers.
13882 If so, mark all of them just like the first. */
13883 if (regno
< FIRST_PSEUDO_REGISTER
)
13885 /* None of this applies to the stack, frame or arg pointers. */
13886 if (regno
== STACK_POINTER_REGNUM
13887 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13888 && regno
== HARD_FRAME_POINTER_REGNUM
)
13889 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13890 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13891 || regno
== FRAME_POINTER_REGNUM
)
13894 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13900 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13902 rtx testreg
= SET_DEST (x
);
13904 while (GET_CODE (testreg
) == SUBREG
13905 || GET_CODE (testreg
) == ZERO_EXTRACT
13906 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13907 testreg
= XEXP (testreg
, 0);
13909 if (MEM_P (testreg
))
13910 mark_used_regs_combine (XEXP (testreg
, 0));
13912 mark_used_regs_combine (SET_SRC (x
));
13920 /* Recursively scan the operands of this expression. */
13923 const char *fmt
= GET_RTX_FORMAT (code
);
13925 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13928 mark_used_regs_combine (XEXP (x
, i
));
13929 else if (fmt
[i
] == 'E')
13933 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13934 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13940 /* Remove register number REGNO from the dead registers list of INSN.
13942 Return the note used to record the death, if there was one. */
13945 remove_death (unsigned int regno
, rtx_insn
*insn
)
13947 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13950 remove_note (insn
, note
);
13955 /* For each register (hardware or pseudo) used within expression X, if its
13956 death is in an instruction with luid between FROM_LUID (inclusive) and
13957 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13958 list headed by PNOTES.
13960 That said, don't move registers killed by maybe_kill_insn.
13962 This is done when X is being merged by combination into TO_INSN. These
13963 notes will then be distributed as needed. */
13966 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13971 enum rtx_code code
= GET_CODE (x
);
13975 unsigned int regno
= REGNO (x
);
13976 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13978 /* If we do not know where the register died, it may still die between
13979 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13980 if (!where_dead
|| DF_INSN_LUID (where_dead
) >= DF_INSN_LUID (to_insn
))
13982 rtx_insn
*insn
= prev_real_nondebug_insn (to_insn
);
13984 && BLOCK_FOR_INSN (insn
) == BLOCK_FOR_INSN (to_insn
)
13985 && DF_INSN_LUID (insn
) >= from_luid
)
13987 if (dead_or_set_regno_p (insn
, regno
))
13989 if (find_regno_note (insn
, REG_DEAD
, regno
))
13994 insn
= prev_real_nondebug_insn (insn
);
13998 /* Don't move the register if it gets killed in between from and to. */
13999 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
14000 && ! reg_referenced_p (x
, maybe_kill_insn
))
14004 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
14005 && DF_INSN_LUID (where_dead
) >= from_luid
14006 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
14008 rtx note
= remove_death (regno
, where_dead
);
14010 /* It is possible for the call above to return 0. This can occur
14011 when last_death points to I2 or I1 that we combined with.
14012 In that case make a new note.
14014 We must also check for the case where X is a hard register
14015 and NOTE is a death note for a range of hard registers
14016 including X. In that case, we must put REG_DEAD notes for
14017 the remaining registers in place of NOTE. */
14019 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
14020 && partial_subreg_p (GET_MODE (x
), GET_MODE (XEXP (note
, 0))))
14022 unsigned int deadregno
= REGNO (XEXP (note
, 0));
14023 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
14024 unsigned int ourend
= END_REGNO (x
);
14027 for (i
= deadregno
; i
< deadend
; i
++)
14028 if (i
< regno
|| i
>= ourend
)
14029 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
14032 /* If we didn't find any note, or if we found a REG_DEAD note that
14033 covers only part of the given reg, and we have a multi-reg hard
14034 register, then to be safe we must check for REG_DEAD notes
14035 for each register other than the first. They could have
14036 their own REG_DEAD notes lying around. */
14037 else if ((note
== 0
14039 && partial_subreg_p (GET_MODE (XEXP (note
, 0)),
14041 && regno
< FIRST_PSEUDO_REGISTER
14042 && REG_NREGS (x
) > 1)
14044 unsigned int ourend
= END_REGNO (x
);
14045 unsigned int i
, offset
;
14049 offset
= hard_regno_nregs (regno
, GET_MODE (XEXP (note
, 0)));
14053 for (i
= regno
+ offset
; i
< ourend
; i
++)
14054 move_deaths (regno_reg_rtx
[i
],
14055 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
14058 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
14060 XEXP (note
, 1) = *pnotes
;
14064 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
14070 else if (GET_CODE (x
) == SET
)
14072 rtx dest
= SET_DEST (x
);
14074 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14076 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14077 that accesses one word of a multi-word item, some
14078 piece of everything register in the expression is used by
14079 this insn, so remove any old death. */
14080 /* ??? So why do we test for equality of the sizes? */
14082 if (GET_CODE (dest
) == ZERO_EXTRACT
14083 || GET_CODE (dest
) == STRICT_LOW_PART
14084 || (GET_CODE (dest
) == SUBREG
14085 && !read_modify_subreg_p (dest
)))
14087 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14091 /* If this is some other SUBREG, we know it replaces the entire
14092 value, so use that as the destination. */
14093 if (GET_CODE (dest
) == SUBREG
)
14094 dest
= SUBREG_REG (dest
);
14096 /* If this is a MEM, adjust deaths of anything used in the address.
14097 For a REG (the only other possibility), the entire value is
14098 being replaced so the old value is not used in this insn. */
14101 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
14106 else if (GET_CODE (x
) == CLOBBER
)
14109 len
= GET_RTX_LENGTH (code
);
14110 fmt
= GET_RTX_FORMAT (code
);
14112 for (i
= 0; i
< len
; i
++)
14117 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
14118 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
14121 else if (fmt
[i
] == 'e')
14122 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
14126 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14127 pattern of an insn. X must be a REG. */
14130 reg_bitfield_target_p (rtx x
, rtx body
)
14134 if (GET_CODE (body
) == SET
)
14136 rtx dest
= SET_DEST (body
);
14138 unsigned int regno
, tregno
, endregno
, endtregno
;
14140 if (GET_CODE (dest
) == ZERO_EXTRACT
)
14141 target
= XEXP (dest
, 0);
14142 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
14143 target
= SUBREG_REG (XEXP (dest
, 0));
14147 if (GET_CODE (target
) == SUBREG
)
14148 target
= SUBREG_REG (target
);
14150 if (!REG_P (target
))
14153 tregno
= REGNO (target
), regno
= REGNO (x
);
14154 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
14155 return target
== x
;
14157 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
14158 endregno
= end_hard_regno (GET_MODE (x
), regno
);
14160 return endregno
> tregno
&& regno
< endtregno
;
14163 else if (GET_CODE (body
) == PARALLEL
)
14164 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
14165 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
14171 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14172 as appropriate. I3 and I2 are the insns resulting from the combination
14173 insns including FROM (I2 may be zero).
14175 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14176 not need REG_DEAD notes because they are being substituted for. This
14177 saves searching in the most common cases.
14179 Each note in the list is either ignored or placed on some insns, depending
14180 on the type of note. */
14183 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
14184 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
14186 rtx note
, next_note
;
14188 rtx_insn
*tem_insn
;
14190 for (note
= notes
; note
; note
= next_note
)
14192 rtx_insn
*place
= 0, *place2
= 0;
14194 next_note
= XEXP (note
, 1);
14195 switch (REG_NOTE_KIND (note
))
14199 /* Doesn't matter much where we put this, as long as it's somewhere.
14200 It is preferable to keep these notes on branches, which is most
14201 likely to be i3. */
14205 case REG_NON_LOCAL_GOTO
:
14210 gcc_assert (i2
&& JUMP_P (i2
));
14215 case REG_EH_REGION
:
14216 /* These notes must remain with the call or trapping instruction. */
14219 else if (i2
&& CALL_P (i2
))
14223 gcc_assert (cfun
->can_throw_non_call_exceptions
);
14224 if (may_trap_p (i3
))
14226 else if (i2
&& may_trap_p (i2
))
14228 /* ??? Otherwise assume we've combined things such that we
14229 can now prove that the instructions can't trap. Drop the
14230 note in this case. */
14234 case REG_ARGS_SIZE
:
14235 /* ??? How to distribute between i3-i1. Assume i3 contains the
14236 entire adjustment. Assert i3 contains at least some adjust. */
14237 if (!noop_move_p (i3
))
14239 poly_int64 old_size
, args_size
= get_args_size (note
);
14240 /* fixup_args_size_notes looks at REG_NORETURN note,
14241 so ensure the note is placed there first. */
14245 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
14246 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
14250 XEXP (n
, 1) = REG_NOTES (i3
);
14251 REG_NOTES (i3
) = n
;
14255 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
14256 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14257 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14258 gcc_assert (maybe_ne (old_size
, args_size
)
14260 && !ACCUMULATE_OUTGOING_ARGS
14261 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
14268 case REG_CALL_DECL
:
14269 case REG_CALL_NOCF_CHECK
:
14270 /* These notes must remain with the call. It should not be
14271 possible for both I2 and I3 to be a call. */
14276 gcc_assert (i2
&& CALL_P (i2
));
14282 /* Any clobbers for i3 may still exist, and so we must process
14283 REG_UNUSED notes from that insn.
14285 Any clobbers from i2 or i1 can only exist if they were added by
14286 recog_for_combine. In that case, recog_for_combine created the
14287 necessary REG_UNUSED notes. Trying to keep any original
14288 REG_UNUSED notes from these insns can cause incorrect output
14289 if it is for the same register as the original i3 dest.
14290 In that case, we will notice that the register is set in i3,
14291 and then add a REG_UNUSED note for the destination of i3, which
14292 is wrong. However, it is possible to have REG_UNUSED notes from
14293 i2 or i1 for register which were both used and clobbered, so
14294 we keep notes from i2 or i1 if they will turn into REG_DEAD
14297 /* If this register is set or clobbered in I3, put the note there
14298 unless there is one already. */
14299 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14301 if (from_insn
!= i3
)
14304 if (! (REG_P (XEXP (note
, 0))
14305 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14306 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14309 /* Otherwise, if this register is used by I3, then this register
14310 now dies here, so we must put a REG_DEAD note here unless there
14312 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14313 && ! (REG_P (XEXP (note
, 0))
14314 ? find_regno_note (i3
, REG_DEAD
,
14315 REGNO (XEXP (note
, 0)))
14316 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14318 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14322 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14323 but we can't tell which at this point. We must reset any
14324 expectations we had about the value that was previously
14325 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14326 and, if appropriate, restore its previous value, but we
14327 don't have enough information for that at this point. */
14330 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14332 /* Otherwise, if this register is now referenced in i2
14333 then the register used to be modified in one of the
14334 original insns. If it was i3 (say, in an unused
14335 parallel), it's now completely gone, so the note can
14336 be discarded. But if it was modified in i2, i1 or i0
14337 and we still reference it in i2, then we're
14338 referencing the previous value, and since the
14339 register was modified and REG_UNUSED, we know that
14340 the previous value is now dead. So, if we only
14341 reference the register in i2, we change the note to
14342 REG_DEAD, to reflect the previous value. However, if
14343 we're also setting or clobbering the register as
14344 scratch, we know (because the register was not
14345 referenced in i3) that it's unused, just as it was
14346 unused before, and we place the note in i2. */
14347 if (from_insn
!= i3
&& i2
&& INSN_P (i2
)
14348 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14350 if (!reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14351 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14352 if (! (REG_P (XEXP (note
, 0))
14353 ? find_regno_note (i2
, REG_NOTE_KIND (note
),
14354 REGNO (XEXP (note
, 0)))
14355 : find_reg_note (i2
, REG_NOTE_KIND (note
),
14366 /* These notes say something about results of an insn. We can
14367 only support them if they used to be on I3 in which case they
14368 remain on I3. Otherwise they are ignored.
14370 If the note refers to an expression that is not a constant, we
14371 must also ignore the note since we cannot tell whether the
14372 equivalence is still true. It might be possible to do
14373 slightly better than this (we only have a problem if I2DEST
14374 or I1DEST is present in the expression), but it doesn't
14375 seem worth the trouble. */
14377 if (from_insn
== i3
14378 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14383 /* These notes say something about how a register is used. They must
14384 be present on any use of the register in I2 or I3. */
14385 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14388 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14397 case REG_LABEL_TARGET
:
14398 case REG_LABEL_OPERAND
:
14399 /* This can show up in several ways -- either directly in the
14400 pattern, or hidden off in the constant pool with (or without?)
14401 a REG_EQUAL note. */
14402 /* ??? Ignore the without-reg_equal-note problem for now. */
14403 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14404 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14405 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14406 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14410 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14411 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14412 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14413 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14421 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14422 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14424 if (place
&& JUMP_P (place
)
14425 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14426 && (JUMP_LABEL (place
) == NULL
14427 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14429 rtx label
= JUMP_LABEL (place
);
14432 JUMP_LABEL (place
) = XEXP (note
, 0);
14433 else if (LABEL_P (label
))
14434 LABEL_NUSES (label
)--;
14437 if (place2
&& JUMP_P (place2
)
14438 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14439 && (JUMP_LABEL (place2
) == NULL
14440 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14442 rtx label
= JUMP_LABEL (place2
);
14445 JUMP_LABEL (place2
) = XEXP (note
, 0);
14446 else if (LABEL_P (label
))
14447 LABEL_NUSES (label
)--;
14453 /* This note says something about the value of a register prior
14454 to the execution of an insn. It is too much trouble to see
14455 if the note is still correct in all situations. It is better
14456 to simply delete it. */
14460 /* If we replaced the right hand side of FROM_INSN with a
14461 REG_EQUAL note, the original use of the dying register
14462 will not have been combined into I3 and I2. In such cases,
14463 FROM_INSN is guaranteed to be the first of the combined
14464 instructions, so we simply need to search back before
14465 FROM_INSN for the previous use or set of this register,
14466 then alter the notes there appropriately.
14468 If the register is used as an input in I3, it dies there.
14469 Similarly for I2, if it is nonzero and adjacent to I3.
14471 If the register is not used as an input in either I3 or I2
14472 and it is not one of the registers we were supposed to eliminate,
14473 there are two possibilities. We might have a non-adjacent I2
14474 or we might have somehow eliminated an additional register
14475 from a computation. For example, we might have had A & B where
14476 we discover that B will always be zero. In this case we will
14477 eliminate the reference to A.
14479 In both cases, we must search to see if we can find a previous
14480 use of A and put the death note there. */
14483 && from_insn
== i2mod
14484 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14485 tem_insn
= from_insn
;
14489 && CALL_P (from_insn
)
14490 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14492 else if (i2
&& reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14494 /* If the new I2 sets the same register that is marked
14495 dead in the note, we do not in general know where to
14496 put the note. One important case we _can_ handle is
14497 when the note comes from I3. */
14498 if (from_insn
== i3
)
14503 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14505 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14506 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14508 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14510 && reg_overlap_mentioned_p (XEXP (note
, 0),
14512 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14513 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14520 basic_block bb
= this_basic_block
;
14522 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14524 if (!NONDEBUG_INSN_P (tem_insn
))
14526 if (tem_insn
== BB_HEAD (bb
))
14531 /* If the register is being set at TEM_INSN, see if that is all
14532 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14533 into a REG_UNUSED note instead. Don't delete sets to
14534 global register vars. */
14535 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14536 || !global_regs
[REGNO (XEXP (note
, 0))])
14537 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14539 rtx set
= single_set (tem_insn
);
14540 rtx inner_dest
= 0;
14541 rtx_insn
*cc0_setter
= NULL
;
14544 for (inner_dest
= SET_DEST (set
);
14545 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14546 || GET_CODE (inner_dest
) == SUBREG
14547 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14548 inner_dest
= XEXP (inner_dest
, 0))
14551 /* Verify that it was the set, and not a clobber that
14552 modified the register.
14554 CC0 targets must be careful to maintain setter/user
14555 pairs. If we cannot delete the setter due to side
14556 effects, mark the user with an UNUSED note instead
14559 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14560 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14562 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14563 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14564 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14566 /* Move the notes and links of TEM_INSN elsewhere.
14567 This might delete other dead insns recursively.
14568 First set the pattern to something that won't use
14570 rtx old_notes
= REG_NOTES (tem_insn
);
14572 PATTERN (tem_insn
) = pc_rtx
;
14573 REG_NOTES (tem_insn
) = NULL
;
14575 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14576 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14577 distribute_links (LOG_LINKS (tem_insn
));
14579 unsigned int regno
= REGNO (XEXP (note
, 0));
14580 reg_stat_type
*rsp
= ®_stat
[regno
];
14581 if (rsp
->last_set
== tem_insn
)
14582 record_value_for_reg (XEXP (note
, 0), NULL
, NULL_RTX
);
14584 SET_INSN_DELETED (tem_insn
);
14585 if (tem_insn
== i2
)
14588 /* Delete the setter too. */
14591 PATTERN (cc0_setter
) = pc_rtx
;
14592 old_notes
= REG_NOTES (cc0_setter
);
14593 REG_NOTES (cc0_setter
) = NULL
;
14595 distribute_notes (old_notes
, cc0_setter
,
14597 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14598 distribute_links (LOG_LINKS (cc0_setter
));
14600 SET_INSN_DELETED (cc0_setter
);
14601 if (cc0_setter
== i2
)
14607 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14609 /* If there isn't already a REG_UNUSED note, put one
14610 here. Do not place a REG_DEAD note, even if
14611 the register is also used here; that would not
14612 match the algorithm used in lifetime analysis
14613 and can cause the consistency check in the
14614 scheduler to fail. */
14615 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14616 REGNO (XEXP (note
, 0))))
14621 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14622 || (CALL_P (tem_insn
)
14623 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14627 /* If we are doing a 3->2 combination, and we have a
14628 register which formerly died in i3 and was not used
14629 by i2, which now no longer dies in i3 and is used in
14630 i2 but does not die in i2, and place is between i2
14631 and i3, then we may need to move a link from place to
14633 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14635 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14636 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14638 struct insn_link
*links
= LOG_LINKS (place
);
14639 LOG_LINKS (place
) = NULL
;
14640 distribute_links (links
);
14645 if (tem_insn
== BB_HEAD (bb
))
14651 /* If the register is set or already dead at PLACE, we needn't do
14652 anything with this note if it is still a REG_DEAD note.
14653 We check here if it is set at all, not if is it totally replaced,
14654 which is what `dead_or_set_p' checks, so also check for it being
14657 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14659 unsigned int regno
= REGNO (XEXP (note
, 0));
14660 reg_stat_type
*rsp
= ®_stat
[regno
];
14662 if (dead_or_set_p (place
, XEXP (note
, 0))
14663 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14665 /* Unless the register previously died in PLACE, clear
14666 last_death. [I no longer understand why this is
14668 if (rsp
->last_death
!= place
)
14669 rsp
->last_death
= 0;
14673 rsp
->last_death
= place
;
14675 /* If this is a death note for a hard reg that is occupying
14676 multiple registers, ensure that we are still using all
14677 parts of the object. If we find a piece of the object
14678 that is unused, we must arrange for an appropriate REG_DEAD
14679 note to be added for it. However, we can't just emit a USE
14680 and tag the note to it, since the register might actually
14681 be dead; so we recourse, and the recursive call then finds
14682 the previous insn that used this register. */
14684 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14686 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14687 bool all_used
= true;
14690 for (i
= regno
; i
< endregno
; i
++)
14691 if ((! refers_to_regno_p (i
, PATTERN (place
))
14692 && ! find_regno_fusage (place
, USE
, i
))
14693 || dead_or_set_regno_p (place
, i
))
14701 /* Put only REG_DEAD notes for pieces that are
14702 not already dead or set. */
14704 for (i
= regno
; i
< endregno
;
14705 i
+= hard_regno_nregs (i
, reg_raw_mode
[i
]))
14707 rtx piece
= regno_reg_rtx
[i
];
14708 basic_block bb
= this_basic_block
;
14710 if (! dead_or_set_p (place
, piece
)
14711 && ! reg_bitfield_target_p (piece
,
14714 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14717 distribute_notes (new_note
, place
, place
,
14718 NULL
, NULL_RTX
, NULL_RTX
,
14721 else if (! refers_to_regno_p (i
, PATTERN (place
))
14722 && ! find_regno_fusage (place
, USE
, i
))
14723 for (tem_insn
= PREV_INSN (place
); ;
14724 tem_insn
= PREV_INSN (tem_insn
))
14726 if (!NONDEBUG_INSN_P (tem_insn
))
14728 if (tem_insn
== BB_HEAD (bb
))
14732 if (dead_or_set_p (tem_insn
, piece
)
14733 || reg_bitfield_target_p (piece
,
14734 PATTERN (tem_insn
)))
14736 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14749 /* Any other notes should not be present at this point in the
14751 gcc_unreachable ();
14756 XEXP (note
, 1) = REG_NOTES (place
);
14757 REG_NOTES (place
) = note
;
14759 /* Set added_notes_insn to the earliest insn we added a note to. */
14760 if (added_notes_insn
== 0
14761 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place
))
14762 added_notes_insn
= place
;
14767 add_shallow_copy_of_reg_note (place2
, note
);
14769 /* Set added_notes_insn to the earliest insn we added a note to. */
14770 if (added_notes_insn
== 0
14771 || DF_INSN_LUID (added_notes_insn
) > DF_INSN_LUID (place2
))
14772 added_notes_insn
= place2
;
14777 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14778 I3, I2, and I1 to new locations. This is also called to add a link
14779 pointing at I3 when I3's destination is changed. */
14782 distribute_links (struct insn_link
*links
)
14784 struct insn_link
*link
, *next_link
;
14786 for (link
= links
; link
; link
= next_link
)
14788 rtx_insn
*place
= 0;
14792 next_link
= link
->next
;
14794 /* If the insn that this link points to is a NOTE, ignore it. */
14795 if (NOTE_P (link
->insn
))
14799 rtx pat
= PATTERN (link
->insn
);
14800 if (GET_CODE (pat
) == SET
)
14802 else if (GET_CODE (pat
) == PARALLEL
)
14805 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14807 set
= XVECEXP (pat
, 0, i
);
14808 if (GET_CODE (set
) != SET
)
14811 reg
= SET_DEST (set
);
14812 while (GET_CODE (reg
) == ZERO_EXTRACT
14813 || GET_CODE (reg
) == STRICT_LOW_PART
14814 || GET_CODE (reg
) == SUBREG
)
14815 reg
= XEXP (reg
, 0);
14820 if (REGNO (reg
) == link
->regno
)
14823 if (i
== XVECLEN (pat
, 0))
14829 reg
= SET_DEST (set
);
14831 while (GET_CODE (reg
) == ZERO_EXTRACT
14832 || GET_CODE (reg
) == STRICT_LOW_PART
14833 || GET_CODE (reg
) == SUBREG
)
14834 reg
= XEXP (reg
, 0);
14839 /* A LOG_LINK is defined as being placed on the first insn that uses
14840 a register and points to the insn that sets the register. Start
14841 searching at the next insn after the target of the link and stop
14842 when we reach a set of the register or the end of the basic block.
14844 Note that this correctly handles the link that used to point from
14845 I3 to I2. Also note that not much searching is typically done here
14846 since most links don't point very far away. */
14848 for (insn
= NEXT_INSN (link
->insn
);
14849 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14850 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14851 insn
= NEXT_INSN (insn
))
14852 if (DEBUG_INSN_P (insn
))
14854 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14856 if (reg_referenced_p (reg
, PATTERN (insn
)))
14860 else if (CALL_P (insn
)
14861 && find_reg_fusage (insn
, USE
, reg
))
14866 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14869 /* If we found a place to put the link, place it there unless there
14870 is already a link to the same insn as LINK at that point. */
14874 struct insn_link
*link2
;
14876 FOR_EACH_LOG_LINK (link2
, place
)
14877 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14882 link
->next
= LOG_LINKS (place
);
14883 LOG_LINKS (place
) = link
;
14885 /* Set added_links_insn to the earliest insn we added a
14887 if (added_links_insn
== 0
14888 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14889 added_links_insn
= place
;
14895 /* Check for any register or memory mentioned in EQUIV that is not
14896 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14897 of EXPR where some registers may have been replaced by constants. */
14900 unmentioned_reg_p (rtx equiv
, rtx expr
)
14902 subrtx_iterator::array_type array
;
14903 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14905 const_rtx x
= *iter
;
14906 if ((REG_P (x
) || MEM_P (x
))
14907 && !reg_mentioned_p (x
, expr
))
14913 DEBUG_FUNCTION
void
14914 dump_combine_stats (FILE *file
)
14918 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14919 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14923 dump_combine_total_stats (FILE *file
)
14927 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14928 total_attempts
, total_merges
, total_extras
, total_successes
);
14931 /* Try combining insns through substitution. */
14932 static unsigned int
14933 rest_of_handle_combine (void)
14935 int rebuild_jump_labels_after_combine
;
14937 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14938 df_note_add_problem ();
14941 regstat_init_n_sets_and_refs ();
14942 reg_n_sets_max
= max_reg_num ();
14944 rebuild_jump_labels_after_combine
14945 = combine_instructions (get_insns (), max_reg_num ());
14947 /* Combining insns may have turned an indirect jump into a
14948 direct jump. Rebuild the JUMP_LABEL fields of jumping
14950 if (rebuild_jump_labels_after_combine
)
14952 if (dom_info_available_p (CDI_DOMINATORS
))
14953 free_dominance_info (CDI_DOMINATORS
);
14954 timevar_push (TV_JUMP
);
14955 rebuild_jump_labels (get_insns ());
14957 timevar_pop (TV_JUMP
);
14960 regstat_free_n_sets_and_refs ();
14966 const pass_data pass_data_combine
=
14968 RTL_PASS
, /* type */
14969 "combine", /* name */
14970 OPTGROUP_NONE
, /* optinfo_flags */
14971 TV_COMBINE
, /* tv_id */
14972 PROP_cfglayout
, /* properties_required */
14973 0, /* properties_provided */
14974 0, /* properties_destroyed */
14975 0, /* todo_flags_start */
14976 TODO_df_finish
, /* todo_flags_finish */
14979 class pass_combine
: public rtl_opt_pass
14982 pass_combine (gcc::context
*ctxt
)
14983 : rtl_opt_pass (pass_data_combine
, ctxt
)
14986 /* opt_pass methods: */
14987 virtual bool gate (function
*) { return (optimize
> 0); }
14988 virtual unsigned int execute (function
*)
14990 return rest_of_handle_combine ();
14993 }; // class pass_combine
14995 } // anon namespace
14998 make_pass_combine (gcc::context
*ctxt
)
15000 return new pass_combine (ctxt
);