1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
94 #include "stor-layout.h"
96 #include "cfgcleanup.h"
97 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "insn-attr.h"
100 #include "rtlhooks-def.h"
102 #include "tree-pass.h"
103 #include "valtrack.h"
104 #include "rtl-iter.h"
105 #include "print-rtl.h"
107 /* Number of attempts to combine instructions in this function. */
109 static int combine_attempts
;
111 /* Number of attempts that got as far as substitution in this function. */
113 static int combine_merges
;
115 /* Number of instructions combined with added SETs in this function. */
117 static int combine_extras
;
119 /* Number of instructions combined in this function. */
121 static int combine_successes
;
123 /* Totals over entire compilation. */
125 static int total_attempts
, total_merges
, total_extras
, total_successes
;
127 /* combine_instructions may try to replace the right hand side of the
128 second instruction with the value of an associated REG_EQUAL note
129 before throwing it at try_combine. That is problematic when there
130 is a REG_DEAD note for a register used in the old right hand side
131 and can cause distribute_notes to do wrong things. This is the
132 second instruction if it has been so modified, null otherwise. */
134 static rtx_insn
*i2mod
;
136 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138 static rtx i2mod_old_rhs
;
140 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142 static rtx i2mod_new_rhs
;
144 struct reg_stat_type
{
145 /* Record last point of death of (hard or pseudo) register n. */
146 rtx_insn
*last_death
;
148 /* Record last point of modification of (hard or pseudo) register n. */
151 /* The next group of fields allows the recording of the last value assigned
152 to (hard or pseudo) register n. We use this information to see if an
153 operation being processed is redundant given a prior operation performed
154 on the register. For example, an `and' with a constant is redundant if
155 all the zero bits are already known to be turned off.
157 We use an approach similar to that used by cse, but change it in the
160 (1) We do not want to reinitialize at each label.
161 (2) It is useful, but not critical, to know the actual value assigned
162 to a register. Often just its form is helpful.
164 Therefore, we maintain the following fields:
166 last_set_value the last value assigned
167 last_set_label records the value of label_tick when the
168 register was assigned
169 last_set_table_tick records the value of label_tick when a
170 value using the register is assigned
171 last_set_invalid set to nonzero when it is not valid
172 to use the value of this register in some
175 To understand the usage of these tables, it is important to understand
176 the distinction between the value in last_set_value being valid and
177 the register being validly contained in some other expression in the
180 (The next two parameters are out of date).
182 reg_stat[i].last_set_value is valid if it is nonzero, and either
183 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185 Register I may validly appear in any expression returned for the value
186 of another register if reg_n_sets[i] is 1. It may also appear in the
187 value for register J if reg_stat[j].last_set_invalid is zero, or
188 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190 If an expression is found in the table containing a register which may
191 not validly appear in an expression, the register is replaced by
192 something that won't match, (clobber (const_int 0)). */
194 /* Record last value assigned to (hard or pseudo) register n. */
198 /* Record the value of label_tick when an expression involving register n
199 is placed in last_set_value. */
201 int last_set_table_tick
;
203 /* Record the value of label_tick when the value for register n is placed in
208 /* These fields are maintained in parallel with last_set_value and are
209 used to store the mode in which the register was last set, the bits
210 that were known to be zero when it was last set, and the number of
211 sign bits copies it was known to have when it was last set. */
213 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
214 char last_set_sign_bit_copies
;
215 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
217 /* Set nonzero if references to register n in expressions should not be
218 used. last_set_invalid is set nonzero when this register is being
219 assigned to and last_set_table_tick == label_tick. */
221 char last_set_invalid
;
223 /* Some registers that are set more than once and used in more than one
224 basic block are nevertheless always set in similar ways. For example,
225 a QImode register may be loaded from memory in two places on a machine
226 where byte loads zero extend.
228 We record in the following fields if a register has some leading bits
229 that are always equal to the sign bit, and what we know about the
230 nonzero bits of a register, specifically which bits are known to be
233 If an entry is zero, it means that we don't know anything special. */
235 unsigned char sign_bit_copies
;
237 unsigned HOST_WIDE_INT nonzero_bits
;
239 /* Record the value of the label_tick when the last truncation
240 happened. The field truncated_to_mode is only valid if
241 truncation_label == label_tick. */
243 int truncation_label
;
245 /* Record the last truncation seen for this register. If truncation
246 is not a nop to this mode we might be able to save an explicit
247 truncation if we know that value already contains a truncated
250 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
254 static vec
<reg_stat_type
> reg_stat
;
256 /* One plus the highest pseudo for which we track REG_N_SETS.
257 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
258 but during combine_split_insns new pseudos can be created. As we don't have
259 updated DF information in that case, it is hard to initialize the array
260 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
261 so instead of growing the arrays, just assume all newly created pseudos
262 during combine might be set multiple times. */
264 static unsigned int reg_n_sets_max
;
266 /* Record the luid of the last insn that invalidated memory
267 (anything that writes memory, and subroutine calls, but not pushes). */
269 static int mem_last_set
;
271 /* Record the luid of the last CALL_INSN
272 so we can tell whether a potential combination crosses any calls. */
274 static int last_call_luid
;
276 /* When `subst' is called, this is the insn that is being modified
277 (by combining in a previous insn). The PATTERN of this insn
278 is still the old pattern partially modified and it should not be
279 looked at, but this may be used to examine the successors of the insn
280 to judge whether a simplification is valid. */
282 static rtx_insn
*subst_insn
;
284 /* This is the lowest LUID that `subst' is currently dealing with.
285 get_last_value will not return a value if the register was set at or
286 after this LUID. If not for this mechanism, we could get confused if
287 I2 or I1 in try_combine were an insn that used the old value of a register
288 to obtain a new value. In that case, we might erroneously get the
289 new value of the register when we wanted the old one. */
291 static int subst_low_luid
;
293 /* This contains any hard registers that are used in newpat; reg_dead_at_p
294 must consider all these registers to be always live. */
296 static HARD_REG_SET newpat_used_regs
;
298 /* This is an insn to which a LOG_LINKS entry has been added. If this
299 insn is the earlier than I2 or I3, combine should rescan starting at
302 static rtx_insn
*added_links_insn
;
304 /* Basic block in which we are performing combines. */
305 static basic_block this_basic_block
;
306 static bool optimize_this_for_speed_p
;
309 /* Length of the currently allocated uid_insn_cost array. */
311 static int max_uid_known
;
313 /* The following array records the insn_rtx_cost for every insn
314 in the instruction stream. */
316 static int *uid_insn_cost
;
318 /* The following array records the LOG_LINKS for every insn in the
319 instruction stream as struct insn_link pointers. */
324 struct insn_link
*next
;
327 static struct insn_link
**uid_log_links
;
329 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
330 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
332 #define FOR_EACH_LOG_LINK(L, INSN) \
333 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
335 /* Links for LOG_LINKS are allocated from this obstack. */
337 static struct obstack insn_link_obstack
;
339 /* Allocate a link. */
341 static inline struct insn_link
*
342 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
345 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
346 sizeof (struct insn_link
));
353 /* Incremented for each basic block. */
355 static int label_tick
;
357 /* Reset to label_tick for each extended basic block in scanning order. */
359 static int label_tick_ebb_start
;
361 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
362 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
364 static machine_mode nonzero_bits_mode
;
366 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
367 be safely used. It is zero while computing them and after combine has
368 completed. This former test prevents propagating values based on
369 previously set values, which can be incorrect if a variable is modified
372 static int nonzero_sign_valid
;
375 /* Record one modification to rtl structure
376 to be undone by storing old_contents into *where. */
378 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
384 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
385 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
388 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
389 num_undo says how many are currently recorded.
391 other_insn is nonzero if we have modified some other insn in the process
392 of working on subst_insn. It must be verified too. */
398 rtx_insn
*other_insn
;
401 static struct undobuf undobuf
;
403 /* Number of times the pseudo being substituted for
404 was found and replaced. */
406 static int n_occurrences
;
408 static rtx
reg_nonzero_bits_for_combine (const_rtx
, machine_mode
, const_rtx
,
410 unsigned HOST_WIDE_INT
,
411 unsigned HOST_WIDE_INT
*);
412 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, machine_mode
, const_rtx
,
414 unsigned int, unsigned int *);
415 static void do_SUBST (rtx
*, rtx
);
416 static void do_SUBST_INT (int *, int);
417 static void init_reg_last (void);
418 static void setup_incoming_promotions (rtx_insn
*);
419 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
420 static int cant_combine_insn_p (rtx_insn
*);
421 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
422 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
423 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
424 static int contains_muldiv (rtx
);
425 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
427 static void undo_all (void);
428 static void undo_commit (void);
429 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
430 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
431 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
432 static rtx
simplify_if_then_else (rtx
);
433 static rtx
simplify_set (rtx
);
434 static rtx
simplify_logical (rtx
);
435 static rtx
expand_compound_operation (rtx
);
436 static const_rtx
expand_field_assignment (const_rtx
);
437 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
438 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
439 static rtx
extract_left_shift (rtx
, int);
440 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
441 unsigned HOST_WIDE_INT
*);
442 static rtx
canon_reg_for_combine (rtx
, rtx
);
443 static rtx
force_to_mode (rtx
, machine_mode
,
444 unsigned HOST_WIDE_INT
, int);
445 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
446 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
447 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
448 static rtx
make_field_assignment (rtx
);
449 static rtx
apply_distributive_law (rtx
);
450 static rtx
distribute_and_simplify_rtx (rtx
, int);
451 static rtx
simplify_and_const_int_1 (machine_mode
, rtx
,
452 unsigned HOST_WIDE_INT
);
453 static rtx
simplify_and_const_int (rtx
, machine_mode
, rtx
,
454 unsigned HOST_WIDE_INT
);
455 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
456 HOST_WIDE_INT
, machine_mode
, int *);
457 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
458 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
460 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
461 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
462 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
464 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
465 static void update_table_tick (rtx
);
466 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
467 static void check_promoted_subreg (rtx_insn
*, rtx
);
468 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
469 static void record_dead_and_set_regs (rtx_insn
*);
470 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
471 static rtx
get_last_value (const_rtx
);
472 static int use_crosses_set_p (const_rtx
, int);
473 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
474 static int reg_dead_at_p (rtx
, rtx_insn
*);
475 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
476 static int reg_bitfield_target_p (rtx
, rtx
);
477 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
478 static void distribute_links (struct insn_link
*);
479 static void mark_used_regs_combine (rtx
);
480 static void record_promoted_value (rtx_insn
*, rtx
);
481 static bool unmentioned_reg_p (rtx
, rtx
);
482 static void record_truncated_values (rtx
*, void *);
483 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
484 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
487 /* It is not safe to use ordinary gen_lowpart in combine.
488 See comments in gen_lowpart_for_combine. */
489 #undef RTL_HOOKS_GEN_LOWPART
490 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
492 /* Our implementation of gen_lowpart never emits a new pseudo. */
493 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
494 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
496 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
497 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
499 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
500 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
502 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
503 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
505 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
508 /* Convenience wrapper for the canonicalize_comparison target hook.
509 Target hooks cannot use enum rtx_code. */
511 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
512 bool op0_preserve_value
)
514 int code_int
= (int)*code
;
515 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
516 *code
= (enum rtx_code
)code_int
;
519 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
520 PATTERN can not be split. Otherwise, it returns an insn sequence.
521 This is a wrapper around split_insns which ensures that the
522 reg_stat vector is made larger if the splitter creates a new
526 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
531 ret
= split_insns (pattern
, insn
);
532 nregs
= max_reg_num ();
533 if (nregs
> reg_stat
.length ())
534 reg_stat
.safe_grow_cleared (nregs
);
538 /* This is used by find_single_use to locate an rtx in LOC that
539 contains exactly one use of DEST, which is typically either a REG
540 or CC0. It returns a pointer to the innermost rtx expression
541 containing DEST. Appearances of DEST that are being used to
542 totally replace it are not counted. */
545 find_single_use_1 (rtx dest
, rtx
*loc
)
548 enum rtx_code code
= GET_CODE (x
);
564 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
565 of a REG that occupies all of the REG, the insn uses DEST if
566 it is mentioned in the destination or the source. Otherwise, we
567 need just check the source. */
568 if (GET_CODE (SET_DEST (x
)) != CC0
569 && GET_CODE (SET_DEST (x
)) != PC
570 && !REG_P (SET_DEST (x
))
571 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
572 && REG_P (SUBREG_REG (SET_DEST (x
)))
573 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
574 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
575 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
576 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
579 return find_single_use_1 (dest
, &SET_SRC (x
));
583 return find_single_use_1 (dest
, &XEXP (x
, 0));
589 /* If it wasn't one of the common cases above, check each expression and
590 vector of this code. Look for a unique usage of DEST. */
592 fmt
= GET_RTX_FORMAT (code
);
593 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
597 if (dest
== XEXP (x
, i
)
598 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
599 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
602 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
605 result
= this_result
;
606 else if (this_result
)
607 /* Duplicate usage. */
610 else if (fmt
[i
] == 'E')
614 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
616 if (XVECEXP (x
, i
, j
) == dest
618 && REG_P (XVECEXP (x
, i
, j
))
619 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
622 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
625 result
= this_result
;
626 else if (this_result
)
636 /* See if DEST, produced in INSN, is used only a single time in the
637 sequel. If so, return a pointer to the innermost rtx expression in which
640 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
642 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
643 care about REG_DEAD notes or LOG_LINKS.
645 Otherwise, we find the single use by finding an insn that has a
646 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
647 only referenced once in that insn, we know that it must be the first
648 and last insn referencing DEST. */
651 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
656 struct insn_link
*link
;
660 next
= NEXT_INSN (insn
);
662 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
665 result
= find_single_use_1 (dest
, &PATTERN (next
));
674 bb
= BLOCK_FOR_INSN (insn
);
675 for (next
= NEXT_INSN (insn
);
676 next
&& BLOCK_FOR_INSN (next
) == bb
;
677 next
= NEXT_INSN (next
))
678 if (INSN_P (next
) && dead_or_set_p (next
, dest
))
680 FOR_EACH_LOG_LINK (link
, next
)
681 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
686 result
= find_single_use_1 (dest
, &PATTERN (next
));
696 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
697 insn. The substitution can be undone by undo_all. If INTO is already
698 set to NEWVAL, do not record this change. Because computing NEWVAL might
699 also call SUBST, we have to compute it before we put anything into
703 do_SUBST (rtx
*into
, rtx newval
)
708 if (oldval
== newval
)
711 /* We'd like to catch as many invalid transformations here as
712 possible. Unfortunately, there are way too many mode changes
713 that are perfectly valid, so we'd waste too much effort for
714 little gain doing the checks here. Focus on catching invalid
715 transformations involving integer constants. */
716 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
717 && CONST_INT_P (newval
))
719 /* Sanity check that we're replacing oldval with a CONST_INT
720 that is a valid sign-extension for the original mode. */
721 gcc_assert (INTVAL (newval
)
722 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
724 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
725 CONST_INT is not valid, because after the replacement, the
726 original mode would be gone. Unfortunately, we can't tell
727 when do_SUBST is called to replace the operand thereof, so we
728 perform this test on oldval instead, checking whether an
729 invalid replacement took place before we got here. */
730 gcc_assert (!(GET_CODE (oldval
) == SUBREG
731 && CONST_INT_P (SUBREG_REG (oldval
))));
732 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
733 && CONST_INT_P (XEXP (oldval
, 0))));
737 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
739 buf
= XNEW (struct undo
);
741 buf
->kind
= UNDO_RTX
;
743 buf
->old_contents
.r
= oldval
;
746 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
749 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
751 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
752 for the value of a HOST_WIDE_INT value (including CONST_INT) is
756 do_SUBST_INT (int *into
, int newval
)
761 if (oldval
== newval
)
765 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
767 buf
= XNEW (struct undo
);
769 buf
->kind
= UNDO_INT
;
771 buf
->old_contents
.i
= oldval
;
774 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
777 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
779 /* Similar to SUBST, but just substitute the mode. This is used when
780 changing the mode of a pseudo-register, so that any other
781 references to the entry in the regno_reg_rtx array will change as
785 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
788 machine_mode oldval
= GET_MODE (*into
);
790 if (oldval
== newval
)
794 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
796 buf
= XNEW (struct undo
);
798 buf
->kind
= UNDO_MODE
;
800 buf
->old_contents
.m
= oldval
;
801 adjust_reg_mode (*into
, newval
);
803 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
806 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
808 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
811 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
814 struct insn_link
* oldval
= *into
;
816 if (oldval
== newval
)
820 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
822 buf
= XNEW (struct undo
);
824 buf
->kind
= UNDO_LINKS
;
826 buf
->old_contents
.l
= oldval
;
829 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
832 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
834 /* Subroutine of try_combine. Determine whether the replacement patterns
835 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
836 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
837 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
838 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
839 of all the instructions can be estimated and the replacements are more
840 expensive than the original sequence. */
843 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
844 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
846 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
847 int new_i2_cost
, new_i3_cost
;
848 int old_cost
, new_cost
;
850 /* Lookup the original insn_rtx_costs. */
851 i2_cost
= INSN_COST (i2
);
852 i3_cost
= INSN_COST (i3
);
856 i1_cost
= INSN_COST (i1
);
859 i0_cost
= INSN_COST (i0
);
860 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
861 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
865 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
866 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
872 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
873 i1_cost
= i0_cost
= 0;
876 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
878 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
882 /* Calculate the replacement insn_rtx_costs. */
883 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
886 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
887 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
888 ? new_i2_cost
+ new_i3_cost
: 0;
892 new_cost
= new_i3_cost
;
896 if (undobuf
.other_insn
)
898 int old_other_cost
, new_other_cost
;
900 old_other_cost
= INSN_COST (undobuf
.other_insn
);
901 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
902 if (old_other_cost
> 0 && new_other_cost
> 0)
904 old_cost
+= old_other_cost
;
905 new_cost
+= new_other_cost
;
911 /* Disallow this combination if both new_cost and old_cost are greater than
912 zero, and new_cost is greater than old cost. */
913 int reject
= old_cost
> 0 && new_cost
> old_cost
;
917 fprintf (dump_file
, "%s combination of insns ",
918 reject
? "rejecting" : "allowing");
920 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
921 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
922 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
923 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
925 fprintf (dump_file
, "original costs ");
927 fprintf (dump_file
, "%d + ", i0_cost
);
928 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
929 fprintf (dump_file
, "%d + ", i1_cost
);
930 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
933 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
934 new_i2_cost
, new_i3_cost
, new_cost
);
936 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
942 /* Update the uid_insn_cost array with the replacement costs. */
943 INSN_COST (i2
) = new_i2_cost
;
944 INSN_COST (i3
) = new_i3_cost
;
956 /* Delete any insns that copy a register to itself. */
959 delete_noop_moves (void)
961 rtx_insn
*insn
, *next
;
964 FOR_EACH_BB_FN (bb
, cfun
)
966 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
968 next
= NEXT_INSN (insn
);
969 if (INSN_P (insn
) && noop_move_p (insn
))
972 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
974 delete_insn_and_edges (insn
);
981 /* Return false if we do not want to (or cannot) combine DEF. */
983 can_combine_def_p (df_ref def
)
985 /* Do not consider if it is pre/post modification in MEM. */
986 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
989 unsigned int regno
= DF_REF_REGNO (def
);
991 /* Do not combine frame pointer adjustments. */
992 if ((regno
== FRAME_POINTER_REGNUM
993 && (!reload_completed
|| frame_pointer_needed
))
994 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
995 && regno
== HARD_FRAME_POINTER_REGNUM
996 && (!reload_completed
|| frame_pointer_needed
))
997 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
998 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1004 /* Return false if we do not want to (or cannot) combine USE. */
1006 can_combine_use_p (df_ref use
)
1008 /* Do not consider the usage of the stack pointer by function call. */
1009 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1015 /* Fill in log links field for all insns. */
1018 create_log_links (void)
1021 rtx_insn
**next_use
;
1025 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1027 /* Pass through each block from the end, recording the uses of each
1028 register and establishing log links when def is encountered.
1029 Note that we do not clear next_use array in order to save time,
1030 so we have to test whether the use is in the same basic block as def.
1032 There are a few cases below when we do not consider the definition or
1033 usage -- these are taken from original flow.c did. Don't ask me why it is
1034 done this way; I don't know and if it works, I don't want to know. */
1036 FOR_EACH_BB_FN (bb
, cfun
)
1038 FOR_BB_INSNS_REVERSE (bb
, insn
)
1040 if (!NONDEBUG_INSN_P (insn
))
1043 /* Log links are created only once. */
1044 gcc_assert (!LOG_LINKS (insn
));
1046 FOR_EACH_INSN_DEF (def
, insn
)
1048 unsigned int regno
= DF_REF_REGNO (def
);
1051 if (!next_use
[regno
])
1054 if (!can_combine_def_p (def
))
1057 use_insn
= next_use
[regno
];
1058 next_use
[regno
] = NULL
;
1060 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1065 We don't build a LOG_LINK for hard registers contained
1066 in ASM_OPERANDs. If these registers get replaced,
1067 we might wind up changing the semantics of the insn,
1068 even if reload can make what appear to be valid
1069 assignments later. */
1070 if (regno
< FIRST_PSEUDO_REGISTER
1071 && asm_noperands (PATTERN (use_insn
)) >= 0)
1074 /* Don't add duplicate links between instructions. */
1075 struct insn_link
*links
;
1076 FOR_EACH_LOG_LINK (links
, use_insn
)
1077 if (insn
== links
->insn
&& regno
== links
->regno
)
1081 LOG_LINKS (use_insn
)
1082 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1085 FOR_EACH_INSN_USE (use
, insn
)
1086 if (can_combine_use_p (use
))
1087 next_use
[DF_REF_REGNO (use
)] = insn
;
1094 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1095 true if we found a LOG_LINK that proves that A feeds B. This only works
1096 if there are no instructions between A and B which could have a link
1097 depending on A, since in that case we would not record a link for B.
1098 We also check the implicit dependency created by a cc0 setter/user
1102 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1104 struct insn_link
*links
;
1105 FOR_EACH_LOG_LINK (links
, b
)
1106 if (links
->insn
== a
)
1108 if (HAVE_cc0
&& sets_cc0_p (a
))
1113 /* Main entry point for combiner. F is the first insn of the function.
1114 NREGS is the first unused pseudo-reg number.
1116 Return nonzero if the combiner has turned an indirect jump
1117 instruction into a direct jump. */
1119 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1121 rtx_insn
*insn
, *next
;
1123 struct insn_link
*links
, *nextlinks
;
1125 basic_block last_bb
;
1127 int new_direct_jump_p
= 0;
1129 for (first
= f
; first
&& !INSN_P (first
); )
1130 first
= NEXT_INSN (first
);
1134 combine_attempts
= 0;
1137 combine_successes
= 0;
1139 rtl_hooks
= combine_rtl_hooks
;
1141 reg_stat
.safe_grow_cleared (nregs
);
1143 init_recog_no_volatile ();
1145 /* Allocate array for insn info. */
1146 max_uid_known
= get_max_uid ();
1147 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1148 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1149 gcc_obstack_init (&insn_link_obstack
);
1151 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1153 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1154 problems when, for example, we have j <<= 1 in a loop. */
1156 nonzero_sign_valid
= 0;
1157 label_tick
= label_tick_ebb_start
= 1;
1159 /* Scan all SETs and see if we can deduce anything about what
1160 bits are known to be zero for some registers and how many copies
1161 of the sign bit are known to exist for those registers.
1163 Also set any known values so that we can use it while searching
1164 for what bits are known to be set. */
1166 setup_incoming_promotions (first
);
1167 /* Allow the entry block and the first block to fall into the same EBB.
1168 Conceptually the incoming promotions are assigned to the entry block. */
1169 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1171 create_log_links ();
1172 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1174 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1179 if (!single_pred_p (this_basic_block
)
1180 || single_pred (this_basic_block
) != last_bb
)
1181 label_tick_ebb_start
= label_tick
;
1182 last_bb
= this_basic_block
;
1184 FOR_BB_INSNS (this_basic_block
, insn
)
1185 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1189 subst_low_luid
= DF_INSN_LUID (insn
);
1192 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1194 record_dead_and_set_regs (insn
);
1197 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1198 if (REG_NOTE_KIND (links
) == REG_INC
)
1199 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1202 /* Record the current insn_rtx_cost of this instruction. */
1203 if (NONJUMP_INSN_P (insn
))
1204 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1205 optimize_this_for_speed_p
);
1207 fprintf (dump_file
, "insn_cost %d: %d\n",
1208 INSN_UID (insn
), INSN_COST (insn
));
1212 nonzero_sign_valid
= 1;
1214 /* Now scan all the insns in forward order. */
1215 label_tick
= label_tick_ebb_start
= 1;
1217 setup_incoming_promotions (first
);
1218 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1219 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1221 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1223 rtx_insn
*last_combined_insn
= NULL
;
1224 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1229 if (!single_pred_p (this_basic_block
)
1230 || single_pred (this_basic_block
) != last_bb
)
1231 label_tick_ebb_start
= label_tick
;
1232 last_bb
= this_basic_block
;
1234 rtl_profile_for_bb (this_basic_block
);
1235 for (insn
= BB_HEAD (this_basic_block
);
1236 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1237 insn
= next
? next
: NEXT_INSN (insn
))
1240 if (!NONDEBUG_INSN_P (insn
))
1243 while (last_combined_insn
1244 && last_combined_insn
->deleted ())
1245 last_combined_insn
= PREV_INSN (last_combined_insn
);
1246 if (last_combined_insn
== NULL_RTX
1247 || BARRIER_P (last_combined_insn
)
1248 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1249 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1250 last_combined_insn
= insn
;
1252 /* See if we know about function return values before this
1253 insn based upon SUBREG flags. */
1254 check_promoted_subreg (insn
, PATTERN (insn
));
1256 /* See if we can find hardregs and subreg of pseudos in
1257 narrower modes. This could help turning TRUNCATEs
1259 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1261 /* Try this insn with each insn it links back to. */
1263 FOR_EACH_LOG_LINK (links
, insn
)
1264 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1265 NULL
, &new_direct_jump_p
,
1266 last_combined_insn
)) != 0)
1268 statistics_counter_event (cfun
, "two-insn combine", 1);
1272 /* Try each sequence of three linked insns ending with this one. */
1274 if (max_combine
>= 3)
1275 FOR_EACH_LOG_LINK (links
, insn
)
1277 rtx_insn
*link
= links
->insn
;
1279 /* If the linked insn has been replaced by a note, then there
1280 is no point in pursuing this chain any further. */
1284 FOR_EACH_LOG_LINK (nextlinks
, link
)
1285 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1286 NULL
, &new_direct_jump_p
,
1287 last_combined_insn
)) != 0)
1289 statistics_counter_event (cfun
, "three-insn combine", 1);
1294 /* Try to combine a jump insn that uses CC0
1295 with a preceding insn that sets CC0, and maybe with its
1296 logical predecessor as well.
1297 This is how we make decrement-and-branch insns.
1298 We need this special code because data flow connections
1299 via CC0 do not get entered in LOG_LINKS. */
1303 && (prev
= prev_nonnote_insn (insn
)) != 0
1304 && NONJUMP_INSN_P (prev
)
1305 && sets_cc0_p (PATTERN (prev
)))
1307 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1309 last_combined_insn
)) != 0)
1312 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1313 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1314 NULL
, &new_direct_jump_p
,
1315 last_combined_insn
)) != 0)
1319 /* Do the same for an insn that explicitly references CC0. */
1320 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1321 && (prev
= prev_nonnote_insn (insn
)) != 0
1322 && NONJUMP_INSN_P (prev
)
1323 && sets_cc0_p (PATTERN (prev
))
1324 && GET_CODE (PATTERN (insn
)) == SET
1325 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1327 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1329 last_combined_insn
)) != 0)
1332 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1333 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1334 NULL
, &new_direct_jump_p
,
1335 last_combined_insn
)) != 0)
1339 /* Finally, see if any of the insns that this insn links to
1340 explicitly references CC0. If so, try this insn, that insn,
1341 and its predecessor if it sets CC0. */
1344 FOR_EACH_LOG_LINK (links
, insn
)
1345 if (NONJUMP_INSN_P (links
->insn
)
1346 && GET_CODE (PATTERN (links
->insn
)) == SET
1347 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1348 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1349 && NONJUMP_INSN_P (prev
)
1350 && sets_cc0_p (PATTERN (prev
))
1351 && (next
= try_combine (insn
, links
->insn
,
1352 prev
, NULL
, &new_direct_jump_p
,
1353 last_combined_insn
)) != 0)
1357 /* Try combining an insn with two different insns whose results it
1359 if (max_combine
>= 3)
1360 FOR_EACH_LOG_LINK (links
, insn
)
1361 for (nextlinks
= links
->next
; nextlinks
;
1362 nextlinks
= nextlinks
->next
)
1363 if ((next
= try_combine (insn
, links
->insn
,
1364 nextlinks
->insn
, NULL
,
1366 last_combined_insn
)) != 0)
1369 statistics_counter_event (cfun
, "three-insn combine", 1);
1373 /* Try four-instruction combinations. */
1374 if (max_combine
>= 4)
1375 FOR_EACH_LOG_LINK (links
, insn
)
1377 struct insn_link
*next1
;
1378 rtx_insn
*link
= links
->insn
;
1380 /* If the linked insn has been replaced by a note, then there
1381 is no point in pursuing this chain any further. */
1385 FOR_EACH_LOG_LINK (next1
, link
)
1387 rtx_insn
*link1
= next1
->insn
;
1390 /* I0 -> I1 -> I2 -> I3. */
1391 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1392 if ((next
= try_combine (insn
, link
, link1
,
1395 last_combined_insn
)) != 0)
1397 statistics_counter_event (cfun
, "four-insn combine", 1);
1400 /* I0, I1 -> I2, I2 -> I3. */
1401 for (nextlinks
= next1
->next
; nextlinks
;
1402 nextlinks
= nextlinks
->next
)
1403 if ((next
= try_combine (insn
, link
, link1
,
1406 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "four-insn combine", 1);
1413 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1415 rtx_insn
*link1
= next1
->insn
;
1418 /* I0 -> I2; I1, I2 -> I3. */
1419 FOR_EACH_LOG_LINK (nextlinks
, link
)
1420 if ((next
= try_combine (insn
, link
, link1
,
1423 last_combined_insn
)) != 0)
1425 statistics_counter_event (cfun
, "four-insn combine", 1);
1428 /* I0 -> I1; I1, I2 -> I3. */
1429 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1430 if ((next
= try_combine (insn
, link
, link1
,
1433 last_combined_insn
)) != 0)
1435 statistics_counter_event (cfun
, "four-insn combine", 1);
1441 /* Try this insn with each REG_EQUAL note it links back to. */
1442 FOR_EACH_LOG_LINK (links
, insn
)
1445 rtx_insn
*temp
= links
->insn
;
1446 if ((set
= single_set (temp
)) != 0
1447 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1448 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1449 /* Avoid using a register that may already been marked
1450 dead by an earlier instruction. */
1451 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1452 && (GET_MODE (note
) == VOIDmode
1453 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1454 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1455 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1456 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1457 == GET_MODE (note
))))))
1459 /* Temporarily replace the set's source with the
1460 contents of the REG_EQUAL note. The insn will
1461 be deleted or recognized by try_combine. */
1462 rtx orig_src
= SET_SRC (set
);
1463 rtx orig_dest
= SET_DEST (set
);
1464 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1465 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1466 SET_SRC (set
) = note
;
1468 i2mod_old_rhs
= copy_rtx (orig_src
);
1469 i2mod_new_rhs
= copy_rtx (note
);
1470 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1472 last_combined_insn
);
1476 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1479 SET_SRC (set
) = orig_src
;
1480 SET_DEST (set
) = orig_dest
;
1485 record_dead_and_set_regs (insn
);
1492 default_rtl_profile ();
1494 new_direct_jump_p
|= purge_all_dead_edges ();
1495 delete_noop_moves ();
1498 obstack_free (&insn_link_obstack
, NULL
);
1499 free (uid_log_links
);
1500 free (uid_insn_cost
);
1501 reg_stat
.release ();
1504 struct undo
*undo
, *next
;
1505 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1513 total_attempts
+= combine_attempts
;
1514 total_merges
+= combine_merges
;
1515 total_extras
+= combine_extras
;
1516 total_successes
+= combine_successes
;
1518 nonzero_sign_valid
= 0;
1519 rtl_hooks
= general_rtl_hooks
;
1521 /* Make recognizer allow volatile MEMs again. */
1524 return new_direct_jump_p
;
1527 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1530 init_reg_last (void)
1535 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1536 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1539 /* Set up any promoted values for incoming argument registers. */
1542 setup_incoming_promotions (rtx_insn
*first
)
1545 bool strictly_local
= false;
1547 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1548 arg
= DECL_CHAIN (arg
))
1550 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1552 machine_mode mode1
, mode2
, mode3
, mode4
;
1554 /* Only continue if the incoming argument is in a register. */
1558 /* Determine, if possible, whether all call sites of the current
1559 function lie within the current compilation unit. (This does
1560 take into account the exporting of a function via taking its
1561 address, and so forth.) */
1562 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1564 /* The mode and signedness of the argument before any promotions happen
1565 (equal to the mode of the pseudo holding it at that stage). */
1566 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1567 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1569 /* The mode and signedness of the argument after any source language and
1570 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1571 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1572 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1574 /* The mode and signedness of the argument as it is actually passed,
1575 see assign_parm_setup_reg in function.c. */
1576 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1577 TREE_TYPE (cfun
->decl
), 0);
1579 /* The mode of the register in which the argument is being passed. */
1580 mode4
= GET_MODE (reg
);
1582 /* Eliminate sign extensions in the callee when:
1583 (a) A mode promotion has occurred; */
1586 /* (b) The mode of the register is the same as the mode of
1587 the argument as it is passed; */
1590 /* (c) There's no language level extension; */
1593 /* (c.1) All callers are from the current compilation unit. If that's
1594 the case we don't have to rely on an ABI, we only have to know
1595 what we're generating right now, and we know that we will do the
1596 mode1 to mode2 promotion with the given sign. */
1597 else if (!strictly_local
)
1599 /* (c.2) The combination of the two promotions is useful. This is
1600 true when the signs match, or if the first promotion is unsigned.
1601 In the later case, (sign_extend (zero_extend x)) is the same as
1602 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1608 /* Record that the value was promoted from mode1 to mode3,
1609 so that any sign extension at the head of the current
1610 function may be eliminated. */
1611 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1612 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1613 record_value_for_reg (reg
, first
, x
);
1617 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1618 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1619 because some machines (maybe most) will actually do the sign-extension and
1620 this is the conservative approach.
1622 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1626 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1628 if (GET_MODE_PRECISION (mode
) < prec
1629 && CONST_INT_P (src
)
1631 && val_signbit_known_set_p (mode
, INTVAL (src
)))
1632 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (mode
));
1637 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1641 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1644 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1645 unsigned HOST_WIDE_INT bits
= 0;
1646 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1647 unsigned int num
= 0;
1650 reg_equal
= XEXP (reg_equal_note
, 0);
1652 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1654 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1656 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1659 /* Don't call nonzero_bits if it cannot change anything. */
1660 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1662 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1663 if (reg_equal
&& bits
)
1664 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1665 rsp
->nonzero_bits
|= bits
;
1668 /* Don't call num_sign_bit_copies if it cannot change anything. */
1669 if (rsp
->sign_bit_copies
!= 1)
1671 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1672 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1674 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1675 if (num
== 0 || numeq
> num
)
1678 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1679 rsp
->sign_bit_copies
= num
;
1683 /* Called via note_stores. If X is a pseudo that is narrower than
1684 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1686 If we are setting only a portion of X and we can't figure out what
1687 portion, assume all bits will be used since we don't know what will
1690 Similarly, set how many bits of X are known to be copies of the sign bit
1691 at all locations in the function. This is the smallest number implied
1695 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1697 rtx_insn
*insn
= (rtx_insn
*) data
;
1700 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1701 /* If this register is undefined at the start of the file, we can't
1702 say what its contents were. */
1703 && ! REGNO_REG_SET_P
1704 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1705 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
1707 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1709 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1711 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1712 rsp
->sign_bit_copies
= 1;
1716 /* If this register is being initialized using itself, and the
1717 register is uninitialized in this basic block, and there are
1718 no LOG_LINKS which set the register, then part of the
1719 register is uninitialized. In that case we can't assume
1720 anything about the number of nonzero bits.
1722 ??? We could do better if we checked this in
1723 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1724 could avoid making assumptions about the insn which initially
1725 sets the register, while still using the information in other
1726 insns. We would have to be careful to check every insn
1727 involved in the combination. */
1730 && reg_referenced_p (x
, PATTERN (insn
))
1731 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1734 struct insn_link
*link
;
1736 FOR_EACH_LOG_LINK (link
, insn
)
1737 if (dead_or_set_p (link
->insn
, x
))
1741 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1742 rsp
->sign_bit_copies
= 1;
1747 /* If this is a complex assignment, see if we can convert it into a
1748 simple assignment. */
1749 set
= expand_field_assignment (set
);
1751 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1752 set what we know about X. */
1754 if (SET_DEST (set
) == x
1755 || (paradoxical_subreg_p (SET_DEST (set
))
1756 && SUBREG_REG (SET_DEST (set
)) == x
))
1757 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1760 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1761 rsp
->sign_bit_copies
= 1;
1766 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1767 optionally insns that were previously combined into I3 or that will be
1768 combined into the merger of INSN and I3. The order is PRED, PRED2,
1769 INSN, SUCC, SUCC2, I3.
1771 Return 0 if the combination is not allowed for any reason.
1773 If the combination is allowed, *PDEST will be set to the single
1774 destination of INSN and *PSRC to the single source, and this function
1778 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1779 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1780 rtx
*pdest
, rtx
*psrc
)
1787 bool all_adjacent
= true;
1788 int (*is_volatile_p
) (const_rtx
);
1794 if (next_active_insn (succ2
) != i3
)
1795 all_adjacent
= false;
1796 if (next_active_insn (succ
) != succ2
)
1797 all_adjacent
= false;
1799 else if (next_active_insn (succ
) != i3
)
1800 all_adjacent
= false;
1801 if (next_active_insn (insn
) != succ
)
1802 all_adjacent
= false;
1804 else if (next_active_insn (insn
) != i3
)
1805 all_adjacent
= false;
1807 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1808 or a PARALLEL consisting of such a SET and CLOBBERs.
1810 If INSN has CLOBBER parallel parts, ignore them for our processing.
1811 By definition, these happen during the execution of the insn. When it
1812 is merged with another insn, all bets are off. If they are, in fact,
1813 needed and aren't also supplied in I3, they may be added by
1814 recog_for_combine. Otherwise, it won't match.
1816 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1819 Get the source and destination of INSN. If more than one, can't
1822 if (GET_CODE (PATTERN (insn
)) == SET
)
1823 set
= PATTERN (insn
);
1824 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1825 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1827 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1829 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1831 switch (GET_CODE (elt
))
1833 /* This is important to combine floating point insns
1834 for the SH4 port. */
1836 /* Combining an isolated USE doesn't make sense.
1837 We depend here on combinable_i3pat to reject them. */
1838 /* The code below this loop only verifies that the inputs of
1839 the SET in INSN do not change. We call reg_set_between_p
1840 to verify that the REG in the USE does not change between
1842 If the USE in INSN was for a pseudo register, the matching
1843 insn pattern will likely match any register; combining this
1844 with any other USE would only be safe if we knew that the
1845 used registers have identical values, or if there was
1846 something to tell them apart, e.g. different modes. For
1847 now, we forgo such complicated tests and simply disallow
1848 combining of USES of pseudo registers with any other USE. */
1849 if (REG_P (XEXP (elt
, 0))
1850 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1852 rtx i3pat
= PATTERN (i3
);
1853 int i
= XVECLEN (i3pat
, 0) - 1;
1854 unsigned int regno
= REGNO (XEXP (elt
, 0));
1858 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1860 if (GET_CODE (i3elt
) == USE
1861 && REG_P (XEXP (i3elt
, 0))
1862 && (REGNO (XEXP (i3elt
, 0)) == regno
1863 ? reg_set_between_p (XEXP (elt
, 0),
1864 PREV_INSN (insn
), i3
)
1865 : regno
>= FIRST_PSEUDO_REGISTER
))
1872 /* We can ignore CLOBBERs. */
1877 /* Ignore SETs whose result isn't used but not those that
1878 have side-effects. */
1879 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1880 && insn_nothrow_p (insn
)
1881 && !side_effects_p (elt
))
1884 /* If we have already found a SET, this is a second one and
1885 so we cannot combine with this insn. */
1893 /* Anything else means we can't combine. */
1899 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1900 so don't do anything with it. */
1901 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1910 /* The simplification in expand_field_assignment may call back to
1911 get_last_value, so set safe guard here. */
1912 subst_low_luid
= DF_INSN_LUID (insn
);
1914 set
= expand_field_assignment (set
);
1915 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1917 /* Do not eliminate user-specified register if it is in an
1918 asm input because we may break the register asm usage defined
1919 in GCC manual if allow to do so.
1920 Be aware that this may cover more cases than we expect but this
1921 should be harmless. */
1922 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1923 && extract_asm_operands (PATTERN (i3
)))
1926 /* Don't eliminate a store in the stack pointer. */
1927 if (dest
== stack_pointer_rtx
1928 /* Don't combine with an insn that sets a register to itself if it has
1929 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1930 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1931 /* Can't merge an ASM_OPERANDS. */
1932 || GET_CODE (src
) == ASM_OPERANDS
1933 /* Can't merge a function call. */
1934 || GET_CODE (src
) == CALL
1935 /* Don't eliminate a function call argument. */
1937 && (find_reg_fusage (i3
, USE
, dest
)
1939 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1940 && global_regs
[REGNO (dest
)])))
1941 /* Don't substitute into an incremented register. */
1942 || FIND_REG_INC_NOTE (i3
, dest
)
1943 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1944 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1945 /* Don't substitute into a non-local goto, this confuses CFG. */
1946 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1947 /* Make sure that DEST is not used after SUCC but before I3. */
1950 && (reg_used_between_p (dest
, succ2
, i3
)
1951 || reg_used_between_p (dest
, succ
, succ2
)))
1952 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))))
1953 /* Make sure that the value that is to be substituted for the register
1954 does not use any registers whose values alter in between. However,
1955 If the insns are adjacent, a use can't cross a set even though we
1956 think it might (this can happen for a sequence of insns each setting
1957 the same destination; last_set of that register might point to
1958 a NOTE). If INSN has a REG_EQUIV note, the register is always
1959 equivalent to the memory so the substitution is valid even if there
1960 are intervening stores. Also, don't move a volatile asm or
1961 UNSPEC_VOLATILE across any other insns. */
1964 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1965 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1966 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1967 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1968 /* Don't combine across a CALL_INSN, because that would possibly
1969 change whether the life span of some REGs crosses calls or not,
1970 and it is a pain to update that information.
1971 Exception: if source is a constant, moving it later can't hurt.
1972 Accept that as a special case. */
1973 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1976 /* DEST must either be a REG or CC0. */
1979 /* If register alignment is being enforced for multi-word items in all
1980 cases except for parameters, it is possible to have a register copy
1981 insn referencing a hard register that is not allowed to contain the
1982 mode being copied and which would not be valid as an operand of most
1983 insns. Eliminate this problem by not combining with such an insn.
1985 Also, on some machines we don't want to extend the life of a hard
1989 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
1990 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
1991 /* Don't extend the life of a hard register unless it is
1992 user variable (if we have few registers) or it can't
1993 fit into the desired register (meaning something special
1995 Also avoid substituting a return register into I3, because
1996 reload can't handle a conflict with constraints of other
1998 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
1999 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
2002 else if (GET_CODE (dest
) != CC0
)
2006 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2007 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2008 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2010 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2012 /* If the clobber represents an earlyclobber operand, we must not
2013 substitute an expression containing the clobbered register.
2014 As we do not analyze the constraint strings here, we have to
2015 make the conservative assumption. However, if the register is
2016 a fixed hard reg, the clobber cannot represent any operand;
2017 we leave it up to the machine description to either accept or
2018 reject use-and-clobber patterns. */
2020 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2021 || !fixed_regs
[REGNO (reg
)])
2022 if (reg_overlap_mentioned_p (reg
, src
))
2026 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2027 or not), reject, unless nothing volatile comes between it and I3 */
2029 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2031 /* Make sure neither succ nor succ2 contains a volatile reference. */
2032 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2034 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2036 /* We'll check insns between INSN and I3 below. */
2039 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2040 to be an explicit register variable, and was chosen for a reason. */
2042 if (GET_CODE (src
) == ASM_OPERANDS
2043 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2046 /* If INSN contains volatile references (specifically volatile MEMs),
2047 we cannot combine across any other volatile references.
2048 Even if INSN doesn't contain volatile references, any intervening
2049 volatile insn might affect machine state. */
2051 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2055 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2056 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2059 /* If INSN contains an autoincrement or autodecrement, make sure that
2060 register is not used between there and I3, and not already used in
2061 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2062 Also insist that I3 not be a jump; if it were one
2063 and the incremented register were spilled, we would lose. */
2066 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2067 if (REG_NOTE_KIND (link
) == REG_INC
2069 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2070 || (pred
!= NULL_RTX
2071 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2072 || (pred2
!= NULL_RTX
2073 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2074 || (succ
!= NULL_RTX
2075 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2076 || (succ2
!= NULL_RTX
2077 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2078 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2081 /* Don't combine an insn that follows a CC0-setting insn.
2082 An insn that uses CC0 must not be separated from the one that sets it.
2083 We do, however, allow I2 to follow a CC0-setting insn if that insn
2084 is passed as I1; in that case it will be deleted also.
2085 We also allow combining in this case if all the insns are adjacent
2086 because that would leave the two CC0 insns adjacent as well.
2087 It would be more logical to test whether CC0 occurs inside I1 or I2,
2088 but that would be much slower, and this ought to be equivalent. */
2092 p
= prev_nonnote_insn (insn
);
2093 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2098 /* If we get here, we have passed all the tests and the combination is
2107 /* LOC is the location within I3 that contains its pattern or the component
2108 of a PARALLEL of the pattern. We validate that it is valid for combining.
2110 One problem is if I3 modifies its output, as opposed to replacing it
2111 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2112 doing so would produce an insn that is not equivalent to the original insns.
2116 (set (reg:DI 101) (reg:DI 100))
2117 (set (subreg:SI (reg:DI 101) 0) <foo>)
2119 This is NOT equivalent to:
2121 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2122 (set (reg:DI 101) (reg:DI 100))])
2124 Not only does this modify 100 (in which case it might still be valid
2125 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2127 We can also run into a problem if I2 sets a register that I1
2128 uses and I1 gets directly substituted into I3 (not via I2). In that
2129 case, we would be getting the wrong value of I2DEST into I3, so we
2130 must reject the combination. This case occurs when I2 and I1 both
2131 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2132 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2133 of a SET must prevent combination from occurring. The same situation
2134 can occur for I0, in which case I0_NOT_IN_SRC is set.
2136 Before doing the above check, we first try to expand a field assignment
2137 into a set of logical operations.
2139 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2140 we place a register that is both set and used within I3. If more than one
2141 such register is detected, we fail.
2143 Return 1 if the combination is valid, zero otherwise. */
2146 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2147 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2151 if (GET_CODE (x
) == SET
)
2154 rtx dest
= SET_DEST (set
);
2155 rtx src
= SET_SRC (set
);
2156 rtx inner_dest
= dest
;
2159 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2160 || GET_CODE (inner_dest
) == SUBREG
2161 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2162 inner_dest
= XEXP (inner_dest
, 0);
2164 /* Check for the case where I3 modifies its output, as discussed
2165 above. We don't want to prevent pseudos from being combined
2166 into the address of a MEM, so only prevent the combination if
2167 i1 or i2 set the same MEM. */
2168 if ((inner_dest
!= dest
&&
2169 (!MEM_P (inner_dest
)
2170 || rtx_equal_p (i2dest
, inner_dest
)
2171 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2172 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2173 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2174 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2175 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2177 /* This is the same test done in can_combine_p except we can't test
2178 all_adjacent; we don't have to, since this instruction will stay
2179 in place, thus we are not considering increasing the lifetime of
2182 Also, if this insn sets a function argument, combining it with
2183 something that might need a spill could clobber a previous
2184 function argument; the all_adjacent test in can_combine_p also
2185 checks this; here, we do a more specific test for this case. */
2187 || (REG_P (inner_dest
)
2188 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2189 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2190 GET_MODE (inner_dest
))))
2191 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2192 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2195 /* If DEST is used in I3, it is being killed in this insn, so
2196 record that for later. We have to consider paradoxical
2197 subregs here, since they kill the whole register, but we
2198 ignore partial subregs, STRICT_LOW_PART, etc.
2199 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2200 STACK_POINTER_REGNUM, since these are always considered to be
2201 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2203 if (GET_CODE (subdest
) == SUBREG
2204 && (GET_MODE_SIZE (GET_MODE (subdest
))
2205 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2206 subdest
= SUBREG_REG (subdest
);
2209 && reg_referenced_p (subdest
, PATTERN (i3
))
2210 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2211 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2212 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2213 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2214 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2215 || ! fixed_regs
[REGNO (subdest
)]))
2216 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2218 if (*pi3dest_killed
)
2221 *pi3dest_killed
= subdest
;
2225 else if (GET_CODE (x
) == PARALLEL
)
2229 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2230 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2231 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2238 /* Return 1 if X is an arithmetic expression that contains a multiplication
2239 and division. We don't count multiplications by powers of two here. */
2242 contains_muldiv (rtx x
)
2244 switch (GET_CODE (x
))
2246 case MOD
: case DIV
: case UMOD
: case UDIV
:
2250 return ! (CONST_INT_P (XEXP (x
, 1))
2251 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2254 return contains_muldiv (XEXP (x
, 0))
2255 || contains_muldiv (XEXP (x
, 1));
2258 return contains_muldiv (XEXP (x
, 0));
2264 /* Determine whether INSN can be used in a combination. Return nonzero if
2265 not. This is used in try_combine to detect early some cases where we
2266 can't perform combinations. */
2269 cant_combine_insn_p (rtx_insn
*insn
)
2274 /* If this isn't really an insn, we can't do anything.
2275 This can occur when flow deletes an insn that it has merged into an
2276 auto-increment address. */
2277 if (! INSN_P (insn
))
2280 /* Never combine loads and stores involving hard regs that are likely
2281 to be spilled. The register allocator can usually handle such
2282 reg-reg moves by tying. If we allow the combiner to make
2283 substitutions of likely-spilled regs, reload might die.
2284 As an exception, we allow combinations involving fixed regs; these are
2285 not available to the register allocator so there's no risk involved. */
2287 set
= single_set (insn
);
2290 src
= SET_SRC (set
);
2291 dest
= SET_DEST (set
);
2292 if (GET_CODE (src
) == SUBREG
)
2293 src
= SUBREG_REG (src
);
2294 if (GET_CODE (dest
) == SUBREG
)
2295 dest
= SUBREG_REG (dest
);
2296 if (REG_P (src
) && REG_P (dest
)
2297 && ((HARD_REGISTER_P (src
)
2298 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2299 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2300 || (HARD_REGISTER_P (dest
)
2301 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2302 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2308 struct likely_spilled_retval_info
2310 unsigned regno
, nregs
;
2314 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2315 hard registers that are known to be written to / clobbered in full. */
2317 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2319 struct likely_spilled_retval_info
*const info
=
2320 (struct likely_spilled_retval_info
*) data
;
2321 unsigned regno
, nregs
;
2324 if (!REG_P (XEXP (set
, 0)))
2327 if (regno
>= info
->regno
+ info
->nregs
)
2329 nregs
= REG_NREGS (x
);
2330 if (regno
+ nregs
<= info
->regno
)
2332 new_mask
= (2U << (nregs
- 1)) - 1;
2333 if (regno
< info
->regno
)
2334 new_mask
>>= info
->regno
- regno
;
2336 new_mask
<<= regno
- info
->regno
;
2337 info
->mask
&= ~new_mask
;
2340 /* Return nonzero iff part of the return value is live during INSN, and
2341 it is likely spilled. This can happen when more than one insn is needed
2342 to copy the return value, e.g. when we consider to combine into the
2343 second copy insn for a complex value. */
2346 likely_spilled_retval_p (rtx_insn
*insn
)
2348 rtx_insn
*use
= BB_END (this_basic_block
);
2351 unsigned regno
, nregs
;
2352 /* We assume here that no machine mode needs more than
2353 32 hard registers when the value overlaps with a register
2354 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2356 struct likely_spilled_retval_info info
;
2358 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2360 reg
= XEXP (PATTERN (use
), 0);
2361 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2363 regno
= REGNO (reg
);
2364 nregs
= REG_NREGS (reg
);
2367 mask
= (2U << (nregs
- 1)) - 1;
2369 /* Disregard parts of the return value that are set later. */
2373 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2375 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2378 /* Check if any of the (probably) live return value registers is
2383 if ((mask
& 1 << nregs
)
2384 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2390 /* Adjust INSN after we made a change to its destination.
2392 Changing the destination can invalidate notes that say something about
2393 the results of the insn and a LOG_LINK pointing to the insn. */
2396 adjust_for_new_dest (rtx_insn
*insn
)
2398 /* For notes, be conservative and simply remove them. */
2399 remove_reg_equal_equiv_notes (insn
);
2401 /* The new insn will have a destination that was previously the destination
2402 of an insn just above it. Call distribute_links to make a LOG_LINK from
2403 the next use of that destination. */
2405 rtx set
= single_set (insn
);
2408 rtx reg
= SET_DEST (set
);
2410 while (GET_CODE (reg
) == ZERO_EXTRACT
2411 || GET_CODE (reg
) == STRICT_LOW_PART
2412 || GET_CODE (reg
) == SUBREG
)
2413 reg
= XEXP (reg
, 0);
2414 gcc_assert (REG_P (reg
));
2416 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2418 df_insn_rescan (insn
);
2421 /* Return TRUE if combine can reuse reg X in mode MODE.
2422 ADDED_SETS is nonzero if the original set is still required. */
2424 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2432 /* Allow hard registers if the new mode is legal, and occupies no more
2433 registers than the old mode. */
2434 if (regno
< FIRST_PSEUDO_REGISTER
)
2435 return (HARD_REGNO_MODE_OK (regno
, mode
)
2436 && REG_NREGS (x
) >= hard_regno_nregs
[regno
][mode
]);
2438 /* Or a pseudo that is only used once. */
2439 return (regno
< reg_n_sets_max
2440 && REG_N_SETS (regno
) == 1
2442 && !REG_USERVAR_P (x
));
2446 /* Check whether X, the destination of a set, refers to part of
2447 the register specified by REG. */
2450 reg_subword_p (rtx x
, rtx reg
)
2452 /* Check that reg is an integer mode register. */
2453 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2456 if (GET_CODE (x
) == STRICT_LOW_PART
2457 || GET_CODE (x
) == ZERO_EXTRACT
)
2460 return GET_CODE (x
) == SUBREG
2461 && SUBREG_REG (x
) == reg
2462 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2465 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2466 Note that the INSN should be deleted *after* removing dead edges, so
2467 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2468 but not for a (set (pc) (label_ref FOO)). */
2471 update_cfg_for_uncondjump (rtx_insn
*insn
)
2473 basic_block bb
= BLOCK_FOR_INSN (insn
);
2474 gcc_assert (BB_END (bb
) == insn
);
2476 purge_dead_edges (bb
);
2479 if (EDGE_COUNT (bb
->succs
) == 1)
2483 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2485 /* Remove barriers from the footer if there are any. */
2486 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2487 if (BARRIER_P (insn
))
2489 if (PREV_INSN (insn
))
2490 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2492 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2493 if (NEXT_INSN (insn
))
2494 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2496 else if (LABEL_P (insn
))
2501 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2502 by an arbitrary number of CLOBBERs. */
2504 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2506 if (GET_CODE (pat
) != PARALLEL
)
2509 int len
= XVECLEN (pat
, 0);
2514 for (i
= 0; i
< n
; i
++)
2515 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2516 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2518 for ( ; i
< len
; i
++)
2519 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2520 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2526 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2527 CLOBBERs), can be split into individual SETs in that order, without
2528 changing semantics. */
2530 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2532 if (!insn_nothrow_p (insn
))
2535 rtx pat
= PATTERN (insn
);
2538 for (i
= 0; i
< n
; i
++)
2540 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2543 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2545 for (j
= i
+ 1; j
< n
; j
++)
2546 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2553 /* Try to combine the insns I0, I1 and I2 into I3.
2554 Here I0, I1 and I2 appear earlier than I3.
2555 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2558 If we are combining more than two insns and the resulting insn is not
2559 recognized, try splitting it into two insns. If that happens, I2 and I3
2560 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2561 Otherwise, I0, I1 and I2 are pseudo-deleted.
2563 Return 0 if the combination does not work. Then nothing is changed.
2564 If we did the combination, return the insn at which combine should
2567 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2568 new direct jump instruction.
2570 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2571 been I3 passed to an earlier try_combine within the same basic
2575 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2576 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2578 /* New patterns for I3 and I2, respectively. */
2579 rtx newpat
, newi2pat
= 0;
2580 rtvec newpat_vec_with_clobbers
= 0;
2581 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2582 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2584 int added_sets_0
, added_sets_1
, added_sets_2
;
2585 /* Total number of SETs to put into I3. */
2587 /* Nonzero if I2's or I1's body now appears in I3. */
2588 int i2_is_used
= 0, i1_is_used
= 0;
2589 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2590 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2591 /* Contains I3 if the destination of I3 is used in its source, which means
2592 that the old life of I3 is being killed. If that usage is placed into
2593 I2 and not in I3, a REG_DEAD note must be made. */
2594 rtx i3dest_killed
= 0;
2595 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2596 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2597 /* Copy of SET_SRC of I1 and I0, if needed. */
2598 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2599 /* Set if I2DEST was reused as a scratch register. */
2600 bool i2scratch
= false;
2601 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2602 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2603 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2604 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2605 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2606 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2607 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2608 /* Notes that must be added to REG_NOTES in I3 and I2. */
2609 rtx new_i3_notes
, new_i2_notes
;
2610 /* Notes that we substituted I3 into I2 instead of the normal case. */
2611 int i3_subst_into_i2
= 0;
2612 /* Notes that I1, I2 or I3 is a MULT operation. */
2615 int changed_i3_dest
= 0;
2618 rtx_insn
*temp_insn
;
2620 struct insn_link
*link
;
2622 rtx new_other_notes
;
2625 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2627 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2630 /* Only try four-insn combinations when there's high likelihood of
2631 success. Look for simple insns, such as loads of constants or
2632 binary operations involving a constant. */
2640 if (!flag_expensive_optimizations
)
2643 for (i
= 0; i
< 4; i
++)
2645 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2646 rtx set
= single_set (insn
);
2650 src
= SET_SRC (set
);
2651 if (CONSTANT_P (src
))
2656 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2658 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2659 || GET_CODE (src
) == LSHIFTRT
)
2663 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2664 are likely manipulating its value. Ideally we'll be able to combine
2665 all four insns into a bitfield insertion of some kind.
2667 Note the source in I0 might be inside a sign/zero extension and the
2668 memory modes in I0 and I3 might be different. So extract the address
2669 from the destination of I3 and search for it in the source of I0.
2671 In the event that there's a match but the source/dest do not actually
2672 refer to the same memory, the worst that happens is we try some
2673 combinations that we wouldn't have otherwise. */
2674 if ((set0
= single_set (i0
))
2675 /* Ensure the source of SET0 is a MEM, possibly buried inside
2677 && (GET_CODE (SET_SRC (set0
)) == MEM
2678 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2679 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2680 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2681 && (set3
= single_set (i3
))
2682 /* Ensure the destination of SET3 is a MEM. */
2683 && GET_CODE (SET_DEST (set3
)) == MEM
2684 /* Would it be better to extract the base address for the MEM
2685 in SET3 and look for that? I don't have cases where it matters
2686 but I could envision such cases. */
2687 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2690 if (ngood
< 2 && nshift
< 2)
2694 /* Exit early if one of the insns involved can't be used for
2697 || (i1
&& CALL_P (i1
))
2698 || (i0
&& CALL_P (i0
))
2699 || cant_combine_insn_p (i3
)
2700 || cant_combine_insn_p (i2
)
2701 || (i1
&& cant_combine_insn_p (i1
))
2702 || (i0
&& cant_combine_insn_p (i0
))
2703 || likely_spilled_retval_p (i3
))
2707 undobuf
.other_insn
= 0;
2709 /* Reset the hard register usage information. */
2710 CLEAR_HARD_REG_SET (newpat_used_regs
);
2712 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2715 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2716 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2718 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2719 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2721 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2722 INSN_UID (i2
), INSN_UID (i3
));
2725 /* If multiple insns feed into one of I2 or I3, they can be in any
2726 order. To simplify the code below, reorder them in sequence. */
2727 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2729 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2731 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2734 added_links_insn
= 0;
2736 /* First check for one important special case that the code below will
2737 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2738 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2739 we may be able to replace that destination with the destination of I3.
2740 This occurs in the common code where we compute both a quotient and
2741 remainder into a structure, in which case we want to do the computation
2742 directly into the structure to avoid register-register copies.
2744 Note that this case handles both multiple sets in I2 and also cases
2745 where I2 has a number of CLOBBERs inside the PARALLEL.
2747 We make very conservative checks below and only try to handle the
2748 most common cases of this. For example, we only handle the case
2749 where I2 and I3 are adjacent to avoid making difficult register
2752 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2753 && REG_P (SET_SRC (PATTERN (i3
)))
2754 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2755 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2756 && GET_CODE (PATTERN (i2
)) == PARALLEL
2757 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2758 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2759 below would need to check what is inside (and reg_overlap_mentioned_p
2760 doesn't support those codes anyway). Don't allow those destinations;
2761 the resulting insn isn't likely to be recognized anyway. */
2762 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2763 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2764 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2765 SET_DEST (PATTERN (i3
)))
2766 && next_active_insn (i2
) == i3
)
2768 rtx p2
= PATTERN (i2
);
2770 /* Make sure that the destination of I3,
2771 which we are going to substitute into one output of I2,
2772 is not used within another output of I2. We must avoid making this:
2773 (parallel [(set (mem (reg 69)) ...)
2774 (set (reg 69) ...)])
2775 which is not well-defined as to order of actions.
2776 (Besides, reload can't handle output reloads for this.)
2778 The problem can also happen if the dest of I3 is a memory ref,
2779 if another dest in I2 is an indirect memory ref. */
2780 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2781 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2782 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2783 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2784 SET_DEST (XVECEXP (p2
, 0, i
))))
2787 /* Make sure this PARALLEL is not an asm. We do not allow combining
2788 that usually (see can_combine_p), so do not here either. */
2789 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2790 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2791 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2794 if (i
== XVECLEN (p2
, 0))
2795 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2796 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2797 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2802 subst_low_luid
= DF_INSN_LUID (i2
);
2804 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2805 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2806 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2807 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2809 /* Replace the dest in I2 with our dest and make the resulting
2810 insn the new pattern for I3. Then skip to where we validate
2811 the pattern. Everything was set up above. */
2812 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2814 i3_subst_into_i2
= 1;
2815 goto validate_replacement
;
2819 /* If I2 is setting a pseudo to a constant and I3 is setting some
2820 sub-part of it to another constant, merge them by making a new
2823 && (temp_expr
= single_set (i2
)) != 0
2824 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2825 && GET_CODE (PATTERN (i3
)) == SET
2826 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2827 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2829 rtx dest
= SET_DEST (PATTERN (i3
));
2833 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2835 if (CONST_INT_P (XEXP (dest
, 1))
2836 && CONST_INT_P (XEXP (dest
, 2)))
2838 width
= INTVAL (XEXP (dest
, 1));
2839 offset
= INTVAL (XEXP (dest
, 2));
2840 dest
= XEXP (dest
, 0);
2841 if (BITS_BIG_ENDIAN
)
2842 offset
= GET_MODE_PRECISION (GET_MODE (dest
)) - width
- offset
;
2847 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2848 dest
= XEXP (dest
, 0);
2849 width
= GET_MODE_PRECISION (GET_MODE (dest
));
2855 /* If this is the low part, we're done. */
2856 if (subreg_lowpart_p (dest
))
2858 /* Handle the case where inner is twice the size of outer. */
2859 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr
)))
2860 == 2 * GET_MODE_PRECISION (GET_MODE (dest
)))
2861 offset
+= GET_MODE_PRECISION (GET_MODE (dest
));
2862 /* Otherwise give up for now. */
2869 rtx inner
= SET_SRC (PATTERN (i3
));
2870 rtx outer
= SET_SRC (temp_expr
);
2873 = wi::insert (rtx_mode_t (outer
, GET_MODE (SET_DEST (temp_expr
))),
2874 rtx_mode_t (inner
, GET_MODE (dest
)),
2879 subst_low_luid
= DF_INSN_LUID (i2
);
2880 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2881 i2dest
= SET_DEST (temp_expr
);
2882 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2884 /* Replace the source in I2 with the new constant and make the
2885 resulting insn the new pattern for I3. Then skip to where we
2886 validate the pattern. Everything was set up above. */
2887 SUBST (SET_SRC (temp_expr
),
2888 immed_wide_int_const (o
, GET_MODE (SET_DEST (temp_expr
))));
2890 newpat
= PATTERN (i2
);
2892 /* The dest of I3 has been replaced with the dest of I2. */
2893 changed_i3_dest
= 1;
2894 goto validate_replacement
;
2898 /* If we have no I1 and I2 looks like:
2899 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2901 make up a dummy I1 that is
2904 (set (reg:CC X) (compare:CC Y (const_int 0)))
2906 (We can ignore any trailing CLOBBERs.)
2908 This undoes a previous combination and allows us to match a branch-and-
2911 if (!HAVE_cc0
&& i1
== 0
2912 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2913 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2915 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2916 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2917 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2918 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2919 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2920 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2922 /* We make I1 with the same INSN_UID as I2. This gives it
2923 the same DF_INSN_LUID for value tracking. Our fake I1 will
2924 never appear in the insn stream so giving it the same INSN_UID
2925 as I2 will not cause a problem. */
2927 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2928 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2930 INSN_UID (i1
) = INSN_UID (i2
);
2932 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2933 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2934 SET_DEST (PATTERN (i1
)));
2935 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2936 SUBST_LINK (LOG_LINKS (i2
),
2937 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2940 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2941 make those two SETs separate I1 and I2 insns, and make an I0 that is
2943 if (!HAVE_cc0
&& i0
== 0
2944 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2945 && can_split_parallel_of_n_reg_sets (i2
, 2)
2946 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2947 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2949 /* If there is no I1, there is no I0 either. */
2952 /* We make I1 with the same INSN_UID as I2. This gives it
2953 the same DF_INSN_LUID for value tracking. Our fake I1 will
2954 never appear in the insn stream so giving it the same INSN_UID
2955 as I2 will not cause a problem. */
2957 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2958 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2960 INSN_UID (i1
) = INSN_UID (i2
);
2962 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2965 /* Verify that I2 and I1 are valid for combining. */
2966 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
2967 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
2969 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
2976 /* Record whether I2DEST is used in I2SRC and similarly for the other
2977 cases. Knowing this will help in register status updating below. */
2978 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
2979 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
2980 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
2981 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
2982 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
2983 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
2984 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2985 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
2986 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
2988 /* For the earlier insns, determine which of the subsequent ones they
2990 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
2991 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
2992 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
2993 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
2994 && reg_overlap_mentioned_p (i0dest
, i2src
))));
2996 /* Ensure that I3's pattern can be the destination of combines. */
2997 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
2998 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
2999 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3000 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3007 /* See if any of the insns is a MULT operation. Unless one is, we will
3008 reject a combination that is, since it must be slower. Be conservative
3010 if (GET_CODE (i2src
) == MULT
3011 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3012 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3013 || (GET_CODE (PATTERN (i3
)) == SET
3014 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3017 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3018 We used to do this EXCEPT in one case: I3 has a post-inc in an
3019 output operand. However, that exception can give rise to insns like
3021 which is a famous insn on the PDP-11 where the value of r3 used as the
3022 source was model-dependent. Avoid this sort of thing. */
3025 if (!(GET_CODE (PATTERN (i3
)) == SET
3026 && REG_P (SET_SRC (PATTERN (i3
)))
3027 && MEM_P (SET_DEST (PATTERN (i3
)))
3028 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3029 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3030 /* It's not the exception. */
3035 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3036 if (REG_NOTE_KIND (link
) == REG_INC
3037 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3039 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3046 /* See if the SETs in I1 or I2 need to be kept around in the merged
3047 instruction: whenever the value set there is still needed past I3.
3048 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3050 For the SET in I1, we have two cases: if I1 and I2 independently feed
3051 into I3, the set in I1 needs to be kept around unless I1DEST dies
3052 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3053 in I1 needs to be kept around unless I1DEST dies or is set in either
3054 I2 or I3. The same considerations apply to I0. */
3056 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3059 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3060 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3065 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3066 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3067 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3068 && dead_or_set_p (i2
, i0dest
)));
3072 /* We are about to copy insns for the case where they need to be kept
3073 around. Check that they can be copied in the merged instruction. */
3075 if (targetm
.cannot_copy_insn_p
3076 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3077 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3078 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3084 /* If the set in I2 needs to be kept around, we must make a copy of
3085 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3086 PATTERN (I2), we are only substituting for the original I1DEST, not into
3087 an already-substituted copy. This also prevents making self-referential
3088 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3093 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3094 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3096 i2pat
= copy_rtx (PATTERN (i2
));
3101 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3102 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3104 i1pat
= copy_rtx (PATTERN (i1
));
3109 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3110 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3112 i0pat
= copy_rtx (PATTERN (i0
));
3117 /* Substitute in the latest insn for the regs set by the earlier ones. */
3119 maxreg
= max_reg_num ();
3123 /* Many machines that don't use CC0 have insns that can both perform an
3124 arithmetic operation and set the condition code. These operations will
3125 be represented as a PARALLEL with the first element of the vector
3126 being a COMPARE of an arithmetic operation with the constant zero.
3127 The second element of the vector will set some pseudo to the result
3128 of the same arithmetic operation. If we simplify the COMPARE, we won't
3129 match such a pattern and so will generate an extra insn. Here we test
3130 for this case, where both the comparison and the operation result are
3131 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3132 I2SRC. Later we will make the PARALLEL that contains I2. */
3134 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3135 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3136 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3137 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3140 rtx
*cc_use_loc
= NULL
;
3141 rtx_insn
*cc_use_insn
= NULL
;
3142 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3143 machine_mode compare_mode
, orig_compare_mode
;
3144 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3146 newpat
= PATTERN (i3
);
3147 newpat_dest
= SET_DEST (newpat
);
3148 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3150 if (undobuf
.other_insn
== 0
3151 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3154 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3155 compare_code
= simplify_compare_const (compare_code
,
3156 GET_MODE (i2dest
), op0
, &op1
);
3157 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3160 /* Do the rest only if op1 is const0_rtx, which may be the
3161 result of simplification. */
3162 if (op1
== const0_rtx
)
3164 /* If a single use of the CC is found, prepare to modify it
3165 when SELECT_CC_MODE returns a new CC-class mode, or when
3166 the above simplify_compare_const() returned a new comparison
3167 operator. undobuf.other_insn is assigned the CC use insn
3168 when modifying it. */
3171 #ifdef SELECT_CC_MODE
3172 machine_mode new_mode
3173 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3174 if (new_mode
!= orig_compare_mode
3175 && can_change_dest_mode (SET_DEST (newpat
),
3176 added_sets_2
, new_mode
))
3178 unsigned int regno
= REGNO (newpat_dest
);
3179 compare_mode
= new_mode
;
3180 if (regno
< FIRST_PSEUDO_REGISTER
)
3181 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3184 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3185 newpat_dest
= regno_reg_rtx
[regno
];
3189 /* Cases for modifying the CC-using comparison. */
3190 if (compare_code
!= orig_compare_code
3191 /* ??? Do we need to verify the zero rtx? */
3192 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3194 /* Replace cc_use_loc with entire new RTX. */
3196 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3197 newpat_dest
, const0_rtx
));
3198 undobuf
.other_insn
= cc_use_insn
;
3200 else if (compare_mode
!= orig_compare_mode
)
3202 /* Just replace the CC reg with a new mode. */
3203 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3204 undobuf
.other_insn
= cc_use_insn
;
3208 /* Now we modify the current newpat:
3209 First, SET_DEST(newpat) is updated if the CC mode has been
3210 altered. For targets without SELECT_CC_MODE, this should be
3212 if (compare_mode
!= orig_compare_mode
)
3213 SUBST (SET_DEST (newpat
), newpat_dest
);
3214 /* This is always done to propagate i2src into newpat. */
3215 SUBST (SET_SRC (newpat
),
3216 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3217 /* Create new version of i2pat if needed; the below PARALLEL
3218 creation needs this to work correctly. */
3219 if (! rtx_equal_p (i2src
, op0
))
3220 i2pat
= gen_rtx_SET (i2dest
, op0
);
3225 if (i2_is_used
== 0)
3227 /* It is possible that the source of I2 or I1 may be performing
3228 an unneeded operation, such as a ZERO_EXTEND of something
3229 that is known to have the high part zero. Handle that case
3230 by letting subst look at the inner insns.
3232 Another way to do this would be to have a function that tries
3233 to simplify a single insn instead of merging two or more
3234 insns. We don't do this because of the potential of infinite
3235 loops and because of the potential extra memory required.
3236 However, doing it the way we are is a bit of a kludge and
3237 doesn't catch all cases.
3239 But only do this if -fexpensive-optimizations since it slows
3240 things down and doesn't usually win.
3242 This is not done in the COMPARE case above because the
3243 unmodified I2PAT is used in the PARALLEL and so a pattern
3244 with a modified I2SRC would not match. */
3246 if (flag_expensive_optimizations
)
3248 /* Pass pc_rtx so no substitutions are done, just
3252 subst_low_luid
= DF_INSN_LUID (i1
);
3253 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3256 subst_low_luid
= DF_INSN_LUID (i2
);
3257 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3260 n_occurrences
= 0; /* `subst' counts here */
3261 subst_low_luid
= DF_INSN_LUID (i2
);
3263 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3264 copy of I2SRC each time we substitute it, in order to avoid creating
3265 self-referential RTL when we will be substituting I1SRC for I1DEST
3266 later. Likewise if I0 feeds into I2, either directly or indirectly
3267 through I1, and I0DEST is in I0SRC. */
3268 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3269 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3270 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3271 && i0dest_in_i0src
));
3274 /* Record whether I2's body now appears within I3's body. */
3275 i2_is_used
= n_occurrences
;
3278 /* If we already got a failure, don't try to do more. Otherwise, try to
3279 substitute I1 if we have it. */
3281 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3283 /* Check that an autoincrement side-effect on I1 has not been lost.
3284 This happens if I1DEST is mentioned in I2 and dies there, and
3285 has disappeared from the new pattern. */
3286 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3288 && dead_or_set_p (i2
, i1dest
)
3289 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3290 /* Before we can do this substitution, we must redo the test done
3291 above (see detailed comments there) that ensures I1DEST isn't
3292 mentioned in any SETs in NEWPAT that are field assignments. */
3293 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3301 subst_low_luid
= DF_INSN_LUID (i1
);
3303 /* If the following substitution will modify I1SRC, make a copy of it
3304 for the case where it is substituted for I1DEST in I2PAT later. */
3305 if (added_sets_2
&& i1_feeds_i2_n
)
3306 i1src_copy
= copy_rtx (i1src
);
3308 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3309 copy of I1SRC each time we substitute it, in order to avoid creating
3310 self-referential RTL when we will be substituting I0SRC for I0DEST
3312 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3313 i0_feeds_i1_n
&& i0dest_in_i0src
);
3316 /* Record whether I1's body now appears within I3's body. */
3317 i1_is_used
= n_occurrences
;
3320 /* Likewise for I0 if we have it. */
3322 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3324 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3325 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3326 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3327 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3328 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3335 /* If the following substitution will modify I0SRC, make a copy of it
3336 for the case where it is substituted for I0DEST in I1PAT later. */
3337 if (added_sets_1
&& i0_feeds_i1_n
)
3338 i0src_copy
= copy_rtx (i0src
);
3339 /* And a copy for I0DEST in I2PAT substitution. */
3340 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3341 || (i0_feeds_i2_n
)))
3342 i0src_copy2
= copy_rtx (i0src
);
3345 subst_low_luid
= DF_INSN_LUID (i0
);
3346 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3350 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3351 to count all the ways that I2SRC and I1SRC can be used. */
3352 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3353 && i2_is_used
+ added_sets_2
> 1)
3354 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3355 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3357 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3358 && (n_occurrences
+ added_sets_0
3359 + (added_sets_1
&& i0_feeds_i1_n
)
3360 + (added_sets_2
&& i0_feeds_i2_n
)
3362 /* Fail if we tried to make a new register. */
3363 || max_reg_num () != maxreg
3364 /* Fail if we couldn't do something and have a CLOBBER. */
3365 || GET_CODE (newpat
) == CLOBBER
3366 /* Fail if this new pattern is a MULT and we didn't have one before
3367 at the outer level. */
3368 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3375 /* If the actions of the earlier insns must be kept
3376 in addition to substituting them into the latest one,
3377 we must make a new PARALLEL for the latest insn
3378 to hold additional the SETs. */
3380 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3382 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3385 if (GET_CODE (newpat
) == PARALLEL
)
3387 rtvec old
= XVEC (newpat
, 0);
3388 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3389 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3390 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3391 sizeof (old
->elem
[0]) * old
->num_elem
);
3396 total_sets
= 1 + extra_sets
;
3397 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3398 XVECEXP (newpat
, 0, 0) = old
;
3402 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3408 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3410 XVECEXP (newpat
, 0, --total_sets
) = t
;
3416 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3417 i0_feeds_i1_n
&& i0dest_in_i0src
);
3418 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3419 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3421 XVECEXP (newpat
, 0, --total_sets
) = t
;
3425 validate_replacement
:
3427 /* Note which hard regs this insn has as inputs. */
3428 mark_used_regs_combine (newpat
);
3430 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3431 consider splitting this pattern, we might need these clobbers. */
3432 if (i1
&& GET_CODE (newpat
) == PARALLEL
3433 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3435 int len
= XVECLEN (newpat
, 0);
3437 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3438 for (i
= 0; i
< len
; i
++)
3439 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3442 /* We have recognized nothing yet. */
3443 insn_code_number
= -1;
3445 /* See if this is a PARALLEL of two SETs where one SET's destination is
3446 a register that is unused and this isn't marked as an instruction that
3447 might trap in an EH region. In that case, we just need the other SET.
3448 We prefer this over the PARALLEL.
3450 This can occur when simplifying a divmod insn. We *must* test for this
3451 case here because the code below that splits two independent SETs doesn't
3452 handle this case correctly when it updates the register status.
3454 It's pointless doing this if we originally had two sets, one from
3455 i3, and one from i2. Combining then splitting the parallel results
3456 in the original i2 again plus an invalid insn (which we delete).
3457 The net effect is only to move instructions around, which makes
3458 debug info less accurate. */
3460 if (!(added_sets_2
&& i1
== 0)
3461 && is_parallel_of_n_reg_sets (newpat
, 2)
3462 && asm_noperands (newpat
) < 0)
3464 rtx set0
= XVECEXP (newpat
, 0, 0);
3465 rtx set1
= XVECEXP (newpat
, 0, 1);
3466 rtx oldpat
= newpat
;
3468 if (((REG_P (SET_DEST (set1
))
3469 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3470 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3471 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3472 && insn_nothrow_p (i3
)
3473 && !side_effects_p (SET_SRC (set1
)))
3476 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3479 else if (((REG_P (SET_DEST (set0
))
3480 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3481 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3482 && find_reg_note (i3
, REG_UNUSED
,
3483 SUBREG_REG (SET_DEST (set0
)))))
3484 && insn_nothrow_p (i3
)
3485 && !side_effects_p (SET_SRC (set0
)))
3488 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3490 if (insn_code_number
>= 0)
3491 changed_i3_dest
= 1;
3494 if (insn_code_number
< 0)
3498 /* Is the result of combination a valid instruction? */
3499 if (insn_code_number
< 0)
3500 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3502 /* If we were combining three insns and the result is a simple SET
3503 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3504 insns. There are two ways to do this. It can be split using a
3505 machine-specific method (like when you have an addition of a large
3506 constant) or by combine in the function find_split_point. */
3508 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3509 && asm_noperands (newpat
) < 0)
3511 rtx parallel
, *split
;
3512 rtx_insn
*m_split_insn
;
3514 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3515 use I2DEST as a scratch register will help. In the latter case,
3516 convert I2DEST to the mode of the source of NEWPAT if we can. */
3518 m_split_insn
= combine_split_insns (newpat
, i3
);
3520 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3521 inputs of NEWPAT. */
3523 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3524 possible to try that as a scratch reg. This would require adding
3525 more code to make it work though. */
3527 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3529 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3531 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3532 (temporarily, until we are committed to this instruction
3533 combination) does not work: for example, any call to nonzero_bits
3534 on the register (from a splitter in the MD file, for example)
3535 will get the old information, which is invalid.
3537 Since nowadays we can create registers during combine just fine,
3538 we should just create a new one here, not reuse i2dest. */
3540 /* First try to split using the original register as a
3541 scratch register. */
3542 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3543 gen_rtvec (2, newpat
,
3544 gen_rtx_CLOBBER (VOIDmode
,
3546 m_split_insn
= combine_split_insns (parallel
, i3
);
3548 /* If that didn't work, try changing the mode of I2DEST if
3550 if (m_split_insn
== 0
3551 && new_mode
!= GET_MODE (i2dest
)
3552 && new_mode
!= VOIDmode
3553 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3555 machine_mode old_mode
= GET_MODE (i2dest
);
3558 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3559 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3562 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3563 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3566 parallel
= (gen_rtx_PARALLEL
3568 gen_rtvec (2, newpat
,
3569 gen_rtx_CLOBBER (VOIDmode
,
3571 m_split_insn
= combine_split_insns (parallel
, i3
);
3573 if (m_split_insn
== 0
3574 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3578 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3579 buf
= undobuf
.undos
;
3580 undobuf
.undos
= buf
->next
;
3581 buf
->next
= undobuf
.frees
;
3582 undobuf
.frees
= buf
;
3586 i2scratch
= m_split_insn
!= 0;
3589 /* If recog_for_combine has discarded clobbers, try to use them
3590 again for the split. */
3591 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3593 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3594 m_split_insn
= combine_split_insns (parallel
, i3
);
3597 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3599 rtx m_split_pat
= PATTERN (m_split_insn
);
3600 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3601 if (insn_code_number
>= 0)
3602 newpat
= m_split_pat
;
3604 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3605 && (next_nonnote_nondebug_insn (i2
) == i3
3606 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3609 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3610 newi2pat
= PATTERN (m_split_insn
);
3612 i3set
= single_set (NEXT_INSN (m_split_insn
));
3613 i2set
= single_set (m_split_insn
);
3615 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3617 /* If I2 or I3 has multiple SETs, we won't know how to track
3618 register status, so don't use these insns. If I2's destination
3619 is used between I2 and I3, we also can't use these insns. */
3621 if (i2_code_number
>= 0 && i2set
&& i3set
3622 && (next_nonnote_nondebug_insn (i2
) == i3
3623 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3624 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3626 if (insn_code_number
>= 0)
3629 /* It is possible that both insns now set the destination of I3.
3630 If so, we must show an extra use of it. */
3632 if (insn_code_number
>= 0)
3634 rtx new_i3_dest
= SET_DEST (i3set
);
3635 rtx new_i2_dest
= SET_DEST (i2set
);
3637 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3638 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3639 || GET_CODE (new_i3_dest
) == SUBREG
)
3640 new_i3_dest
= XEXP (new_i3_dest
, 0);
3642 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3643 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3644 || GET_CODE (new_i2_dest
) == SUBREG
)
3645 new_i2_dest
= XEXP (new_i2_dest
, 0);
3647 if (REG_P (new_i3_dest
)
3648 && REG_P (new_i2_dest
)
3649 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3650 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3651 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3655 /* If we can split it and use I2DEST, go ahead and see if that
3656 helps things be recognized. Verify that none of the registers
3657 are set between I2 and I3. */
3658 if (insn_code_number
< 0
3659 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3660 && (!HAVE_cc0
|| REG_P (i2dest
))
3661 /* We need I2DEST in the proper mode. If it is a hard register
3662 or the only use of a pseudo, we can change its mode.
3663 Make sure we don't change a hard register to have a mode that
3664 isn't valid for it, or change the number of registers. */
3665 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3666 || GET_MODE (*split
) == VOIDmode
3667 || can_change_dest_mode (i2dest
, added_sets_2
,
3669 && (next_nonnote_nondebug_insn (i2
) == i3
3670 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3671 /* We can't overwrite I2DEST if its value is still used by
3673 && ! reg_referenced_p (i2dest
, newpat
))
3675 rtx newdest
= i2dest
;
3676 enum rtx_code split_code
= GET_CODE (*split
);
3677 machine_mode split_mode
= GET_MODE (*split
);
3678 bool subst_done
= false;
3679 newi2pat
= NULL_RTX
;
3683 /* *SPLIT may be part of I2SRC, so make sure we have the
3684 original expression around for later debug processing.
3685 We should not need I2SRC any more in other cases. */
3686 if (MAY_HAVE_DEBUG_INSNS
)
3687 i2src
= copy_rtx (i2src
);
3691 /* Get NEWDEST as a register in the proper mode. We have already
3692 validated that we can do this. */
3693 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3695 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3696 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3699 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3700 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3704 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3705 an ASHIFT. This can occur if it was inside a PLUS and hence
3706 appeared to be a memory address. This is a kludge. */
3707 if (split_code
== MULT
3708 && CONST_INT_P (XEXP (*split
, 1))
3709 && INTVAL (XEXP (*split
, 1)) > 0
3710 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3712 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3713 XEXP (*split
, 0), GEN_INT (i
)));
3714 /* Update split_code because we may not have a multiply
3716 split_code
= GET_CODE (*split
);
3719 /* Similarly for (plus (mult FOO (const_int pow2))). */
3720 if (split_code
== PLUS
3721 && GET_CODE (XEXP (*split
, 0)) == MULT
3722 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3723 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3724 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3726 rtx nsplit
= XEXP (*split
, 0);
3727 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3728 XEXP (nsplit
, 0), GEN_INT (i
)));
3729 /* Update split_code because we may not have a multiply
3731 split_code
= GET_CODE (*split
);
3734 #ifdef INSN_SCHEDULING
3735 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3736 be written as a ZERO_EXTEND. */
3737 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3739 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3740 what it really is. */
3741 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3743 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3744 SUBREG_REG (*split
)));
3746 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3747 SUBREG_REG (*split
)));
3751 /* Attempt to split binary operators using arithmetic identities. */
3752 if (BINARY_P (SET_SRC (newpat
))
3753 && split_mode
== GET_MODE (SET_SRC (newpat
))
3754 && ! side_effects_p (SET_SRC (newpat
)))
3756 rtx setsrc
= SET_SRC (newpat
);
3757 machine_mode mode
= GET_MODE (setsrc
);
3758 enum rtx_code code
= GET_CODE (setsrc
);
3759 rtx src_op0
= XEXP (setsrc
, 0);
3760 rtx src_op1
= XEXP (setsrc
, 1);
3762 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3763 if (rtx_equal_p (src_op0
, src_op1
))
3765 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3766 SUBST (XEXP (setsrc
, 0), newdest
);
3767 SUBST (XEXP (setsrc
, 1), newdest
);
3770 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3771 else if ((code
== PLUS
|| code
== MULT
)
3772 && GET_CODE (src_op0
) == code
3773 && GET_CODE (XEXP (src_op0
, 0)) == code
3774 && (INTEGRAL_MODE_P (mode
)
3775 || (FLOAT_MODE_P (mode
)
3776 && flag_unsafe_math_optimizations
)))
3778 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3779 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3780 rtx r
= XEXP (src_op0
, 1);
3783 /* Split both "((X op Y) op X) op Y" and
3784 "((X op Y) op Y) op X" as "T op T" where T is
3786 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3787 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3789 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3790 SUBST (XEXP (setsrc
, 0), newdest
);
3791 SUBST (XEXP (setsrc
, 1), newdest
);
3794 /* Split "((X op X) op Y) op Y)" as "T op T" where
3796 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3798 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3799 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3800 SUBST (XEXP (setsrc
, 0), newdest
);
3801 SUBST (XEXP (setsrc
, 1), newdest
);
3809 newi2pat
= gen_rtx_SET (newdest
, *split
);
3810 SUBST (*split
, newdest
);
3813 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3815 /* recog_for_combine might have added CLOBBERs to newi2pat.
3816 Make sure NEWPAT does not depend on the clobbered regs. */
3817 if (GET_CODE (newi2pat
) == PARALLEL
)
3818 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3819 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3821 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3822 if (reg_overlap_mentioned_p (reg
, newpat
))
3829 /* If the split point was a MULT and we didn't have one before,
3830 don't use one now. */
3831 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3832 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3836 /* Check for a case where we loaded from memory in a narrow mode and
3837 then sign extended it, but we need both registers. In that case,
3838 we have a PARALLEL with both loads from the same memory location.
3839 We can split this into a load from memory followed by a register-register
3840 copy. This saves at least one insn, more if register allocation can
3843 We cannot do this if the destination of the first assignment is a
3844 condition code register or cc0. We eliminate this case by making sure
3845 the SET_DEST and SET_SRC have the same mode.
3847 We cannot do this if the destination of the second assignment is
3848 a register that we have already assumed is zero-extended. Similarly
3849 for a SUBREG of such a register. */
3851 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3852 && GET_CODE (newpat
) == PARALLEL
3853 && XVECLEN (newpat
, 0) == 2
3854 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3855 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3856 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3857 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3858 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3859 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3860 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3861 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3863 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3864 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3865 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3867 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3868 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3869 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3870 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3871 != GET_MODE_MASK (word_mode
))))
3872 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3873 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3875 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3876 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3877 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3878 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3879 != GET_MODE_MASK (word_mode
)))))
3880 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3881 SET_SRC (XVECEXP (newpat
, 0, 1)))
3882 && ! find_reg_note (i3
, REG_UNUSED
,
3883 SET_DEST (XVECEXP (newpat
, 0, 0))))
3887 newi2pat
= XVECEXP (newpat
, 0, 0);
3888 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3889 newpat
= XVECEXP (newpat
, 0, 1);
3890 SUBST (SET_SRC (newpat
),
3891 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3892 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3894 if (i2_code_number
>= 0)
3895 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3897 if (insn_code_number
>= 0)
3901 /* Similarly, check for a case where we have a PARALLEL of two independent
3902 SETs but we started with three insns. In this case, we can do the sets
3903 as two separate insns. This case occurs when some SET allows two
3904 other insns to combine, but the destination of that SET is still live.
3906 Also do this if we started with two insns and (at least) one of the
3907 resulting sets is a noop; this noop will be deleted later. */
3909 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3910 && GET_CODE (newpat
) == PARALLEL
3911 && XVECLEN (newpat
, 0) == 2
3912 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3913 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3914 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3915 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3916 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3917 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3918 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3919 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3920 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3921 XVECEXP (newpat
, 0, 0))
3922 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3923 XVECEXP (newpat
, 0, 1))
3924 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3925 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3927 rtx set0
= XVECEXP (newpat
, 0, 0);
3928 rtx set1
= XVECEXP (newpat
, 0, 1);
3930 /* Normally, it doesn't matter which of the two is done first,
3931 but the one that references cc0 can't be the second, and
3932 one which uses any regs/memory set in between i2 and i3 can't
3933 be first. The PARALLEL might also have been pre-existing in i3,
3934 so we need to make sure that we won't wrongly hoist a SET to i2
3935 that would conflict with a death note present in there. */
3936 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3937 && !(REG_P (SET_DEST (set1
))
3938 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3939 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3940 && find_reg_note (i2
, REG_DEAD
,
3941 SUBREG_REG (SET_DEST (set1
))))
3942 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3943 /* If I3 is a jump, ensure that set0 is a jump so that
3944 we do not create invalid RTL. */
3945 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3951 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3952 && !(REG_P (SET_DEST (set0
))
3953 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3954 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3955 && find_reg_note (i2
, REG_DEAD
,
3956 SUBREG_REG (SET_DEST (set0
))))
3957 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3958 /* If I3 is a jump, ensure that set1 is a jump so that
3959 we do not create invalid RTL. */
3960 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3972 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3974 if (i2_code_number
>= 0)
3976 /* recog_for_combine might have added CLOBBERs to newi2pat.
3977 Make sure NEWPAT does not depend on the clobbered regs. */
3978 if (GET_CODE (newi2pat
) == PARALLEL
)
3980 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3981 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3983 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3984 if (reg_overlap_mentioned_p (reg
, newpat
))
3992 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3996 /* If it still isn't recognized, fail and change things back the way they
3998 if ((insn_code_number
< 0
3999 /* Is the result a reasonable ASM_OPERANDS? */
4000 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4006 /* If we had to change another insn, make sure it is valid also. */
4007 if (undobuf
.other_insn
)
4009 CLEAR_HARD_REG_SET (newpat_used_regs
);
4011 other_pat
= PATTERN (undobuf
.other_insn
);
4012 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4015 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4022 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4023 they are adjacent to each other or not. */
4026 rtx_insn
*p
= prev_nonnote_insn (i3
);
4027 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4028 && sets_cc0_p (newi2pat
))
4035 /* Only allow this combination if insn_rtx_costs reports that the
4036 replacement instructions are cheaper than the originals. */
4037 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4043 if (MAY_HAVE_DEBUG_INSNS
)
4047 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4048 if (undo
->kind
== UNDO_MODE
)
4050 rtx reg
= *undo
->where
.r
;
4051 machine_mode new_mode
= GET_MODE (reg
);
4052 machine_mode old_mode
= undo
->old_contents
.m
;
4054 /* Temporarily revert mode back. */
4055 adjust_reg_mode (reg
, old_mode
);
4057 if (reg
== i2dest
&& i2scratch
)
4059 /* If we used i2dest as a scratch register with a
4060 different mode, substitute it for the original
4061 i2src while its original mode is temporarily
4062 restored, and then clear i2scratch so that we don't
4063 do it again later. */
4064 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4067 /* Put back the new mode. */
4068 adjust_reg_mode (reg
, new_mode
);
4072 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4073 rtx_insn
*first
, *last
;
4078 last
= last_combined_insn
;
4083 last
= undobuf
.other_insn
;
4085 if (DF_INSN_LUID (last
)
4086 < DF_INSN_LUID (last_combined_insn
))
4087 last
= last_combined_insn
;
4090 /* We're dealing with a reg that changed mode but not
4091 meaning, so we want to turn it into a subreg for
4092 the new mode. However, because of REG sharing and
4093 because its mode had already changed, we have to do
4094 it in two steps. First, replace any debug uses of
4095 reg, with its original mode temporarily restored,
4096 with this copy we have created; then, replace the
4097 copy with the SUBREG of the original shared reg,
4098 once again changed to the new mode. */
4099 propagate_for_debug (first
, last
, reg
, tempreg
,
4101 adjust_reg_mode (reg
, new_mode
);
4102 propagate_for_debug (first
, last
, tempreg
,
4103 lowpart_subreg (old_mode
, reg
, new_mode
),
4109 /* If we will be able to accept this, we have made a
4110 change to the destination of I3. This requires us to
4111 do a few adjustments. */
4113 if (changed_i3_dest
)
4115 PATTERN (i3
) = newpat
;
4116 adjust_for_new_dest (i3
);
4119 /* We now know that we can do this combination. Merge the insns and
4120 update the status of registers and LOG_LINKS. */
4122 if (undobuf
.other_insn
)
4126 PATTERN (undobuf
.other_insn
) = other_pat
;
4128 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4129 ensure that they are still valid. Then add any non-duplicate
4130 notes added by recog_for_combine. */
4131 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4133 next
= XEXP (note
, 1);
4135 if ((REG_NOTE_KIND (note
) == REG_DEAD
4136 && !reg_referenced_p (XEXP (note
, 0),
4137 PATTERN (undobuf
.other_insn
)))
4138 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4139 && !reg_set_p (XEXP (note
, 0),
4140 PATTERN (undobuf
.other_insn
))))
4141 remove_note (undobuf
.other_insn
, note
);
4144 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4145 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4152 struct insn_link
*link
;
4155 /* I3 now uses what used to be its destination and which is now
4156 I2's destination. This requires us to do a few adjustments. */
4157 PATTERN (i3
) = newpat
;
4158 adjust_for_new_dest (i3
);
4160 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4163 However, some later insn might be using I2's dest and have
4164 a LOG_LINK pointing at I3. We must remove this link.
4165 The simplest way to remove the link is to point it at I1,
4166 which we know will be a NOTE. */
4168 /* newi2pat is usually a SET here; however, recog_for_combine might
4169 have added some clobbers. */
4170 if (GET_CODE (newi2pat
) == PARALLEL
)
4171 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4173 ni2dest
= SET_DEST (newi2pat
);
4175 for (insn
= NEXT_INSN (i3
);
4176 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4177 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4178 insn
= NEXT_INSN (insn
))
4180 if (INSN_P (insn
) && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4182 FOR_EACH_LOG_LINK (link
, insn
)
4183 if (link
->insn
== i3
)
4192 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4193 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4196 /* Compute which registers we expect to eliminate. newi2pat may be setting
4197 either i3dest or i2dest, so we must check it. */
4198 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4199 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4202 /* For i1, we need to compute both local elimination and global
4203 elimination information with respect to newi2pat because i1dest
4204 may be the same as i3dest, in which case newi2pat may be setting
4205 i1dest. Global information is used when distributing REG_DEAD
4206 note for i2 and i3, in which case it does matter if newi2pat sets
4209 Local information is used when distributing REG_DEAD note for i1,
4210 in which case it doesn't matter if newi2pat sets i1dest or not.
4211 See PR62151, if we have four insns combination:
4213 i1: r1 <- i1src (using r0)
4215 i2: r0 <- i2src (using r1)
4216 i3: r3 <- i3src (using r0)
4218 From i1's point of view, r0 is eliminated, no matter if it is set
4219 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4220 should be discarded.
4222 Note local information only affects cases in forms like "I1->I2->I3",
4223 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4224 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4226 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4229 rtx elim_i1
= (local_elim_i1
== 0
4230 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4232 /* Same case as i1. */
4233 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4235 rtx elim_i0
= (local_elim_i0
== 0
4236 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4239 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4241 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4242 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4244 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4246 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4248 /* Ensure that we do not have something that should not be shared but
4249 occurs multiple times in the new insns. Check this by first
4250 resetting all the `used' flags and then copying anything is shared. */
4252 reset_used_flags (i3notes
);
4253 reset_used_flags (i2notes
);
4254 reset_used_flags (i1notes
);
4255 reset_used_flags (i0notes
);
4256 reset_used_flags (newpat
);
4257 reset_used_flags (newi2pat
);
4258 if (undobuf
.other_insn
)
4259 reset_used_flags (PATTERN (undobuf
.other_insn
));
4261 i3notes
= copy_rtx_if_shared (i3notes
);
4262 i2notes
= copy_rtx_if_shared (i2notes
);
4263 i1notes
= copy_rtx_if_shared (i1notes
);
4264 i0notes
= copy_rtx_if_shared (i0notes
);
4265 newpat
= copy_rtx_if_shared (newpat
);
4266 newi2pat
= copy_rtx_if_shared (newi2pat
);
4267 if (undobuf
.other_insn
)
4268 reset_used_flags (PATTERN (undobuf
.other_insn
));
4270 INSN_CODE (i3
) = insn_code_number
;
4271 PATTERN (i3
) = newpat
;
4273 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4275 rtx call_usage
= CALL_INSN_FUNCTION_USAGE (i3
);
4277 reset_used_flags (call_usage
);
4278 call_usage
= copy_rtx (call_usage
);
4282 /* I2SRC must still be meaningful at this point. Some splitting
4283 operations can invalidate I2SRC, but those operations do not
4286 replace_rtx (call_usage
, i2dest
, i2src
);
4290 replace_rtx (call_usage
, i1dest
, i1src
);
4292 replace_rtx (call_usage
, i0dest
, i0src
);
4294 CALL_INSN_FUNCTION_USAGE (i3
) = call_usage
;
4297 if (undobuf
.other_insn
)
4298 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4300 /* We had one special case above where I2 had more than one set and
4301 we replaced a destination of one of those sets with the destination
4302 of I3. In that case, we have to update LOG_LINKS of insns later
4303 in this basic block. Note that this (expensive) case is rare.
4305 Also, in this case, we must pretend that all REG_NOTEs for I2
4306 actually came from I3, so that REG_UNUSED notes from I2 will be
4307 properly handled. */
4309 if (i3_subst_into_i2
)
4311 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4312 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4313 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4314 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4315 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4316 && ! find_reg_note (i2
, REG_UNUSED
,
4317 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4318 for (temp_insn
= NEXT_INSN (i2
);
4320 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4321 || BB_HEAD (this_basic_block
) != temp_insn
);
4322 temp_insn
= NEXT_INSN (temp_insn
))
4323 if (temp_insn
!= i3
&& INSN_P (temp_insn
))
4324 FOR_EACH_LOG_LINK (link
, temp_insn
)
4325 if (link
->insn
== i2
)
4331 while (XEXP (link
, 1))
4332 link
= XEXP (link
, 1);
4333 XEXP (link
, 1) = i2notes
;
4340 LOG_LINKS (i3
) = NULL
;
4342 LOG_LINKS (i2
) = NULL
;
4347 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4348 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4350 INSN_CODE (i2
) = i2_code_number
;
4351 PATTERN (i2
) = newi2pat
;
4355 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4356 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4358 SET_INSN_DELETED (i2
);
4363 LOG_LINKS (i1
) = NULL
;
4365 if (MAY_HAVE_DEBUG_INSNS
)
4366 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4368 SET_INSN_DELETED (i1
);
4373 LOG_LINKS (i0
) = NULL
;
4375 if (MAY_HAVE_DEBUG_INSNS
)
4376 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4378 SET_INSN_DELETED (i0
);
4381 /* Get death notes for everything that is now used in either I3 or
4382 I2 and used to die in a previous insn. If we built two new
4383 patterns, move from I1 to I2 then I2 to I3 so that we get the
4384 proper movement on registers that I2 modifies. */
4387 from_luid
= DF_INSN_LUID (i0
);
4389 from_luid
= DF_INSN_LUID (i1
);
4391 from_luid
= DF_INSN_LUID (i2
);
4393 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4394 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4396 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4398 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4399 elim_i2
, elim_i1
, elim_i0
);
4401 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4402 elim_i2
, elim_i1
, elim_i0
);
4404 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4405 elim_i2
, local_elim_i1
, local_elim_i0
);
4407 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4408 elim_i2
, elim_i1
, local_elim_i0
);
4410 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4411 elim_i2
, elim_i1
, elim_i0
);
4413 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4414 know these are REG_UNUSED and want them to go to the desired insn,
4415 so we always pass it as i3. */
4417 if (newi2pat
&& new_i2_notes
)
4418 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4422 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4425 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4426 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4427 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4428 in that case, it might delete I2. Similarly for I2 and I1.
4429 Show an additional death due to the REG_DEAD note we make here. If
4430 we discard it in distribute_notes, we will decrement it again. */
4434 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4435 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4436 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4439 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4440 elim_i2
, elim_i1
, elim_i0
);
4443 if (i2dest_in_i2src
)
4445 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4446 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4447 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4448 NULL_RTX
, NULL_RTX
);
4450 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4451 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4454 if (i1dest_in_i1src
)
4456 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4457 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4458 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4459 NULL_RTX
, NULL_RTX
);
4461 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4462 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4465 if (i0dest_in_i0src
)
4467 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4468 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4469 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4470 NULL_RTX
, NULL_RTX
);
4472 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4473 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4476 distribute_links (i3links
);
4477 distribute_links (i2links
);
4478 distribute_links (i1links
);
4479 distribute_links (i0links
);
4483 struct insn_link
*link
;
4484 rtx_insn
*i2_insn
= 0;
4485 rtx i2_val
= 0, set
;
4487 /* The insn that used to set this register doesn't exist, and
4488 this life of the register may not exist either. See if one of
4489 I3's links points to an insn that sets I2DEST. If it does,
4490 that is now the last known value for I2DEST. If we don't update
4491 this and I2 set the register to a value that depended on its old
4492 contents, we will get confused. If this insn is used, thing
4493 will be set correctly in combine_instructions. */
4494 FOR_EACH_LOG_LINK (link
, i3
)
4495 if ((set
= single_set (link
->insn
)) != 0
4496 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4497 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4499 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4501 /* If the reg formerly set in I2 died only once and that was in I3,
4502 zero its use count so it won't make `reload' do any work. */
4504 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4505 && ! i2dest_in_i2src
4506 && REGNO (i2dest
) < reg_n_sets_max
)
4507 INC_REG_N_SETS (REGNO (i2dest
), -1);
4510 if (i1
&& REG_P (i1dest
))
4512 struct insn_link
*link
;
4513 rtx_insn
*i1_insn
= 0;
4514 rtx i1_val
= 0, set
;
4516 FOR_EACH_LOG_LINK (link
, i3
)
4517 if ((set
= single_set (link
->insn
)) != 0
4518 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4519 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4521 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4524 && ! i1dest_in_i1src
4525 && REGNO (i1dest
) < reg_n_sets_max
)
4526 INC_REG_N_SETS (REGNO (i1dest
), -1);
4529 if (i0
&& REG_P (i0dest
))
4531 struct insn_link
*link
;
4532 rtx_insn
*i0_insn
= 0;
4533 rtx i0_val
= 0, set
;
4535 FOR_EACH_LOG_LINK (link
, i3
)
4536 if ((set
= single_set (link
->insn
)) != 0
4537 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4538 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4540 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4543 && ! i0dest_in_i0src
4544 && REGNO (i0dest
) < reg_n_sets_max
)
4545 INC_REG_N_SETS (REGNO (i0dest
), -1);
4548 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4549 been made to this insn. The order is important, because newi2pat
4550 can affect nonzero_bits of newpat. */
4552 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4553 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4556 if (undobuf
.other_insn
!= NULL_RTX
)
4560 fprintf (dump_file
, "modifying other_insn ");
4561 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4563 df_insn_rescan (undobuf
.other_insn
);
4566 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4570 fprintf (dump_file
, "modifying insn i0 ");
4571 dump_insn_slim (dump_file
, i0
);
4573 df_insn_rescan (i0
);
4576 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4580 fprintf (dump_file
, "modifying insn i1 ");
4581 dump_insn_slim (dump_file
, i1
);
4583 df_insn_rescan (i1
);
4586 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4590 fprintf (dump_file
, "modifying insn i2 ");
4591 dump_insn_slim (dump_file
, i2
);
4593 df_insn_rescan (i2
);
4596 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4600 fprintf (dump_file
, "modifying insn i3 ");
4601 dump_insn_slim (dump_file
, i3
);
4603 df_insn_rescan (i3
);
4606 /* Set new_direct_jump_p if a new return or simple jump instruction
4607 has been created. Adjust the CFG accordingly. */
4608 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4610 *new_direct_jump_p
= 1;
4611 mark_jump_label (PATTERN (i3
), i3
, 0);
4612 update_cfg_for_uncondjump (i3
);
4615 if (undobuf
.other_insn
!= NULL_RTX
4616 && (returnjump_p (undobuf
.other_insn
)
4617 || any_uncondjump_p (undobuf
.other_insn
)))
4619 *new_direct_jump_p
= 1;
4620 update_cfg_for_uncondjump (undobuf
.other_insn
);
4623 /* A noop might also need cleaning up of CFG, if it comes from the
4624 simplification of a jump. */
4626 && GET_CODE (newpat
) == SET
4627 && SET_SRC (newpat
) == pc_rtx
4628 && SET_DEST (newpat
) == pc_rtx
)
4630 *new_direct_jump_p
= 1;
4631 update_cfg_for_uncondjump (i3
);
4634 if (undobuf
.other_insn
!= NULL_RTX
4635 && JUMP_P (undobuf
.other_insn
)
4636 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4637 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4638 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4640 *new_direct_jump_p
= 1;
4641 update_cfg_for_uncondjump (undobuf
.other_insn
);
4644 combine_successes
++;
4647 if (added_links_insn
4648 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4649 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4650 return added_links_insn
;
4652 return newi2pat
? i2
: i3
;
4655 /* Get a marker for undoing to the current state. */
4658 get_undo_marker (void)
4660 return undobuf
.undos
;
4663 /* Undo the modifications up to the marker. */
4666 undo_to_marker (void *marker
)
4668 struct undo
*undo
, *next
;
4670 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4678 *undo
->where
.r
= undo
->old_contents
.r
;
4681 *undo
->where
.i
= undo
->old_contents
.i
;
4684 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4687 *undo
->where
.l
= undo
->old_contents
.l
;
4693 undo
->next
= undobuf
.frees
;
4694 undobuf
.frees
= undo
;
4697 undobuf
.undos
= (struct undo
*) marker
;
4700 /* Undo all the modifications recorded in undobuf. */
4708 /* We've committed to accepting the changes we made. Move all
4709 of the undos to the free list. */
4714 struct undo
*undo
, *next
;
4716 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4719 undo
->next
= undobuf
.frees
;
4720 undobuf
.frees
= undo
;
4725 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4726 where we have an arithmetic expression and return that point. LOC will
4729 try_combine will call this function to see if an insn can be split into
4733 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4736 enum rtx_code code
= GET_CODE (x
);
4738 unsigned HOST_WIDE_INT len
= 0;
4739 HOST_WIDE_INT pos
= 0;
4741 rtx inner
= NULL_RTX
;
4743 /* First special-case some codes. */
4747 #ifdef INSN_SCHEDULING
4748 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4750 if (MEM_P (SUBREG_REG (x
)))
4753 return find_split_point (&SUBREG_REG (x
), insn
, false);
4756 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4757 using LO_SUM and HIGH. */
4758 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4759 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4761 machine_mode address_mode
= get_address_mode (x
);
4764 gen_rtx_LO_SUM (address_mode
,
4765 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4767 return &XEXP (XEXP (x
, 0), 0);
4770 /* If we have a PLUS whose second operand is a constant and the
4771 address is not valid, perhaps will can split it up using
4772 the machine-specific way to split large constants. We use
4773 the first pseudo-reg (one of the virtual regs) as a placeholder;
4774 it will not remain in the result. */
4775 if (GET_CODE (XEXP (x
, 0)) == PLUS
4776 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4777 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4778 MEM_ADDR_SPACE (x
)))
4780 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4781 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4784 /* This should have produced two insns, each of which sets our
4785 placeholder. If the source of the second is a valid address,
4786 we can make put both sources together and make a split point
4790 && NEXT_INSN (seq
) != NULL_RTX
4791 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4792 && NONJUMP_INSN_P (seq
)
4793 && GET_CODE (PATTERN (seq
)) == SET
4794 && SET_DEST (PATTERN (seq
)) == reg
4795 && ! reg_mentioned_p (reg
,
4796 SET_SRC (PATTERN (seq
)))
4797 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4798 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4799 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4800 && memory_address_addr_space_p
4801 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4802 MEM_ADDR_SPACE (x
)))
4804 rtx src1
= SET_SRC (PATTERN (seq
));
4805 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4807 /* Replace the placeholder in SRC2 with SRC1. If we can
4808 find where in SRC2 it was placed, that can become our
4809 split point and we can replace this address with SRC2.
4810 Just try two obvious places. */
4812 src2
= replace_rtx (src2
, reg
, src1
);
4814 if (XEXP (src2
, 0) == src1
)
4815 split
= &XEXP (src2
, 0);
4816 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4817 && XEXP (XEXP (src2
, 0), 0) == src1
)
4818 split
= &XEXP (XEXP (src2
, 0), 0);
4822 SUBST (XEXP (x
, 0), src2
);
4827 /* If that didn't work, perhaps the first operand is complex and
4828 needs to be computed separately, so make a split point there.
4829 This will occur on machines that just support REG + CONST
4830 and have a constant moved through some previous computation. */
4832 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4833 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4834 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4835 return &XEXP (XEXP (x
, 0), 0);
4838 /* If we have a PLUS whose first operand is complex, try computing it
4839 separately by making a split there. */
4840 if (GET_CODE (XEXP (x
, 0)) == PLUS
4841 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4843 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4844 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4845 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4846 return &XEXP (XEXP (x
, 0), 0);
4850 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4851 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4852 we need to put the operand into a register. So split at that
4855 if (SET_DEST (x
) == cc0_rtx
4856 && GET_CODE (SET_SRC (x
)) != COMPARE
4857 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4858 && !OBJECT_P (SET_SRC (x
))
4859 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4860 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4861 return &SET_SRC (x
);
4863 /* See if we can split SET_SRC as it stands. */
4864 split
= find_split_point (&SET_SRC (x
), insn
, true);
4865 if (split
&& split
!= &SET_SRC (x
))
4868 /* See if we can split SET_DEST as it stands. */
4869 split
= find_split_point (&SET_DEST (x
), insn
, false);
4870 if (split
&& split
!= &SET_DEST (x
))
4873 /* See if this is a bitfield assignment with everything constant. If
4874 so, this is an IOR of an AND, so split it into that. */
4875 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4876 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x
), 0)))
4877 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4878 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4879 && CONST_INT_P (SET_SRC (x
))
4880 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4881 + INTVAL (XEXP (SET_DEST (x
), 2)))
4882 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0))))
4883 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4885 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4886 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4887 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4888 rtx dest
= XEXP (SET_DEST (x
), 0);
4889 machine_mode mode
= GET_MODE (dest
);
4890 unsigned HOST_WIDE_INT mask
4891 = (HOST_WIDE_INT_1U
<< len
) - 1;
4894 if (BITS_BIG_ENDIAN
)
4895 pos
= GET_MODE_PRECISION (mode
) - len
- pos
;
4897 or_mask
= gen_int_mode (src
<< pos
, mode
);
4900 simplify_gen_binary (IOR
, mode
, dest
, or_mask
));
4903 rtx negmask
= gen_int_mode (~(mask
<< pos
), mode
);
4905 simplify_gen_binary (IOR
, mode
,
4906 simplify_gen_binary (AND
, mode
,
4911 SUBST (SET_DEST (x
), dest
);
4913 split
= find_split_point (&SET_SRC (x
), insn
, true);
4914 if (split
&& split
!= &SET_SRC (x
))
4918 /* Otherwise, see if this is an operation that we can split into two.
4919 If so, try to split that. */
4920 code
= GET_CODE (SET_SRC (x
));
4925 /* If we are AND'ing with a large constant that is only a single
4926 bit and the result is only being used in a context where we
4927 need to know if it is zero or nonzero, replace it with a bit
4928 extraction. This will avoid the large constant, which might
4929 have taken more than one insn to make. If the constant were
4930 not a valid argument to the AND but took only one insn to make,
4931 this is no worse, but if it took more than one insn, it will
4934 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4935 && REG_P (XEXP (SET_SRC (x
), 0))
4936 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4937 && REG_P (SET_DEST (x
))
4938 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
4939 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4940 && XEXP (*split
, 0) == SET_DEST (x
)
4941 && XEXP (*split
, 1) == const0_rtx
)
4943 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
4944 XEXP (SET_SRC (x
), 0),
4945 pos
, NULL_RTX
, 1, 1, 0, 0);
4946 if (extraction
!= 0)
4948 SUBST (SET_SRC (x
), extraction
);
4949 return find_split_point (loc
, insn
, false);
4955 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4956 is known to be on, this can be converted into a NEG of a shift. */
4957 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
4958 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
4959 && 1 <= (pos
= exact_log2
4960 (nonzero_bits (XEXP (SET_SRC (x
), 0),
4961 GET_MODE (XEXP (SET_SRC (x
), 0))))))
4963 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
4967 gen_rtx_LSHIFTRT (mode
,
4968 XEXP (SET_SRC (x
), 0),
4971 split
= find_split_point (&SET_SRC (x
), insn
, true);
4972 if (split
&& split
!= &SET_SRC (x
))
4978 inner
= XEXP (SET_SRC (x
), 0);
4980 /* We can't optimize if either mode is a partial integer
4981 mode as we don't know how many bits are significant
4983 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_PARTIAL_INT
4984 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
4988 len
= GET_MODE_PRECISION (GET_MODE (inner
));
4994 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4995 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
4997 inner
= XEXP (SET_SRC (x
), 0);
4998 len
= INTVAL (XEXP (SET_SRC (x
), 1));
4999 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5001 if (BITS_BIG_ENDIAN
)
5002 pos
= GET_MODE_PRECISION (GET_MODE (inner
)) - len
- pos
;
5003 unsignedp
= (code
== ZERO_EXTRACT
);
5012 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
)))
5014 machine_mode mode
= GET_MODE (SET_SRC (x
));
5016 /* For unsigned, we have a choice of a shift followed by an
5017 AND or two shifts. Use two shifts for field sizes where the
5018 constant might be too large. We assume here that we can
5019 always at least get 8-bit constants in an AND insn, which is
5020 true for every current RISC. */
5022 if (unsignedp
&& len
<= 8)
5024 unsigned HOST_WIDE_INT mask
5025 = (HOST_WIDE_INT_1U
<< len
) - 1;
5029 (mode
, gen_lowpart (mode
, inner
),
5031 gen_int_mode (mask
, mode
)));
5033 split
= find_split_point (&SET_SRC (x
), insn
, true);
5034 if (split
&& split
!= &SET_SRC (x
))
5041 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5042 gen_rtx_ASHIFT (mode
,
5043 gen_lowpart (mode
, inner
),
5044 GEN_INT (GET_MODE_PRECISION (mode
)
5046 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5048 split
= find_split_point (&SET_SRC (x
), insn
, true);
5049 if (split
&& split
!= &SET_SRC (x
))
5054 /* See if this is a simple operation with a constant as the second
5055 operand. It might be that this constant is out of range and hence
5056 could be used as a split point. */
5057 if (BINARY_P (SET_SRC (x
))
5058 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5059 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5060 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5061 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5062 return &XEXP (SET_SRC (x
), 1);
5064 /* Finally, see if this is a simple operation with its first operand
5065 not in a register. The operation might require this operand in a
5066 register, so return it as a split point. We can always do this
5067 because if the first operand were another operation, we would have
5068 already found it as a split point. */
5069 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5070 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5071 return &XEXP (SET_SRC (x
), 0);
5077 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5078 it is better to write this as (not (ior A B)) so we can split it.
5079 Similarly for IOR. */
5080 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5083 gen_rtx_NOT (GET_MODE (x
),
5084 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5086 XEXP (XEXP (x
, 0), 0),
5087 XEXP (XEXP (x
, 1), 0))));
5088 return find_split_point (loc
, insn
, set_src
);
5091 /* Many RISC machines have a large set of logical insns. If the
5092 second operand is a NOT, put it first so we will try to split the
5093 other operand first. */
5094 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5096 rtx tem
= XEXP (x
, 0);
5097 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5098 SUBST (XEXP (x
, 1), tem
);
5104 /* Canonicalization can produce (minus A (mult B C)), where C is a
5105 constant. It may be better to try splitting (plus (mult B -C) A)
5106 instead if this isn't a multiply by a power of two. */
5107 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5108 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5109 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5111 machine_mode mode
= GET_MODE (x
);
5112 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5113 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5114 SUBST (*loc
, gen_rtx_PLUS (mode
,
5116 XEXP (XEXP (x
, 1), 0),
5117 gen_int_mode (other_int
,
5120 return find_split_point (loc
, insn
, set_src
);
5123 /* Split at a multiply-accumulate instruction. However if this is
5124 the SET_SRC, we likely do not have such an instruction and it's
5125 worthless to try this split. */
5127 && (GET_CODE (XEXP (x
, 0)) == MULT
5128 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5129 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5136 /* Otherwise, select our actions depending on our rtx class. */
5137 switch (GET_RTX_CLASS (code
))
5139 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5141 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5146 case RTX_COMM_ARITH
:
5148 case RTX_COMM_COMPARE
:
5149 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5154 /* Some machines have (and (shift ...) ...) insns. If X is not
5155 an AND, but XEXP (X, 0) is, use it as our split point. */
5156 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5157 return &XEXP (x
, 0);
5159 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5165 /* Otherwise, we don't have a split point. */
5170 /* Throughout X, replace FROM with TO, and return the result.
5171 The result is TO if X is FROM;
5172 otherwise the result is X, but its contents may have been modified.
5173 If they were modified, a record was made in undobuf so that
5174 undo_all will (among other things) return X to its original state.
5176 If the number of changes necessary is too much to record to undo,
5177 the excess changes are not made, so the result is invalid.
5178 The changes already made can still be undone.
5179 undobuf.num_undo is incremented for such changes, so by testing that
5180 the caller can tell whether the result is valid.
5182 `n_occurrences' is incremented each time FROM is replaced.
5184 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5186 IN_COND is nonzero if we are at the top level of a condition.
5188 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5189 by copying if `n_occurrences' is nonzero. */
5192 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5194 enum rtx_code code
= GET_CODE (x
);
5195 machine_mode op0_mode
= VOIDmode
;
5200 /* Two expressions are equal if they are identical copies of a shared
5201 RTX or if they are both registers with the same register number
5204 #define COMBINE_RTX_EQUAL_P(X,Y) \
5206 || (REG_P (X) && REG_P (Y) \
5207 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5209 /* Do not substitute into clobbers of regs -- this will never result in
5211 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5214 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5217 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5220 /* If X and FROM are the same register but different modes, they
5221 will not have been seen as equal above. However, the log links code
5222 will make a LOG_LINKS entry for that case. If we do nothing, we
5223 will try to rerecognize our original insn and, when it succeeds,
5224 we will delete the feeding insn, which is incorrect.
5226 So force this insn not to match in this (rare) case. */
5227 if (! in_dest
&& code
== REG
&& REG_P (from
)
5228 && reg_overlap_mentioned_p (x
, from
))
5229 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5231 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5232 of which may contain things that can be combined. */
5233 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5236 /* It is possible to have a subexpression appear twice in the insn.
5237 Suppose that FROM is a register that appears within TO.
5238 Then, after that subexpression has been scanned once by `subst',
5239 the second time it is scanned, TO may be found. If we were
5240 to scan TO here, we would find FROM within it and create a
5241 self-referent rtl structure which is completely wrong. */
5242 if (COMBINE_RTX_EQUAL_P (x
, to
))
5245 /* Parallel asm_operands need special attention because all of the
5246 inputs are shared across the arms. Furthermore, unsharing the
5247 rtl results in recognition failures. Failure to handle this case
5248 specially can result in circular rtl.
5250 Solve this by doing a normal pass across the first entry of the
5251 parallel, and only processing the SET_DESTs of the subsequent
5254 if (code
== PARALLEL
5255 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5256 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5258 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5260 /* If this substitution failed, this whole thing fails. */
5261 if (GET_CODE (new_rtx
) == CLOBBER
5262 && XEXP (new_rtx
, 0) == const0_rtx
)
5265 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5267 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5269 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5272 && GET_CODE (dest
) != CC0
5273 && GET_CODE (dest
) != PC
)
5275 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5277 /* If this substitution failed, this whole thing fails. */
5278 if (GET_CODE (new_rtx
) == CLOBBER
5279 && XEXP (new_rtx
, 0) == const0_rtx
)
5282 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5288 len
= GET_RTX_LENGTH (code
);
5289 fmt
= GET_RTX_FORMAT (code
);
5291 /* We don't need to process a SET_DEST that is a register, CC0,
5292 or PC, so set up to skip this common case. All other cases
5293 where we want to suppress replacing something inside a
5294 SET_SRC are handled via the IN_DEST operand. */
5296 && (REG_P (SET_DEST (x
))
5297 || GET_CODE (SET_DEST (x
)) == CC0
5298 || GET_CODE (SET_DEST (x
)) == PC
))
5301 /* Trying to simplify the operands of a widening MULT is not likely
5302 to create RTL matching a machine insn. */
5304 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5305 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5306 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5307 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5308 && REG_P (XEXP (XEXP (x
, 0), 0))
5309 && REG_P (XEXP (XEXP (x
, 1), 0))
5314 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5317 op0_mode
= GET_MODE (XEXP (x
, 0));
5319 for (i
= 0; i
< len
; i
++)
5324 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5326 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5328 new_rtx
= (unique_copy
&& n_occurrences
5329 ? copy_rtx (to
) : to
);
5334 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5337 /* If this substitution failed, this whole thing
5339 if (GET_CODE (new_rtx
) == CLOBBER
5340 && XEXP (new_rtx
, 0) == const0_rtx
)
5344 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5347 else if (fmt
[i
] == 'e')
5349 /* If this is a register being set, ignore it. */
5350 new_rtx
= XEXP (x
, i
);
5353 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5355 || code
== STRICT_LOW_PART
))
5358 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5360 /* In general, don't install a subreg involving two
5361 modes not tieable. It can worsen register
5362 allocation, and can even make invalid reload
5363 insns, since the reg inside may need to be copied
5364 from in the outside mode, and that may be invalid
5365 if it is an fp reg copied in integer mode.
5367 We allow two exceptions to this: It is valid if
5368 it is inside another SUBREG and the mode of that
5369 SUBREG and the mode of the inside of TO is
5370 tieable and it is valid if X is a SET that copies
5373 if (GET_CODE (to
) == SUBREG
5374 && ! MODES_TIEABLE_P (GET_MODE (to
),
5375 GET_MODE (SUBREG_REG (to
)))
5376 && ! (code
== SUBREG
5377 && MODES_TIEABLE_P (GET_MODE (x
),
5378 GET_MODE (SUBREG_REG (to
))))
5382 && XEXP (x
, 0) == cc0_rtx
))))
5383 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5387 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5388 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5391 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5393 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5397 /* If we are in a SET_DEST, suppress most cases unless we
5398 have gone inside a MEM, in which case we want to
5399 simplify the address. We assume here that things that
5400 are actually part of the destination have their inner
5401 parts in the first expression. This is true for SUBREG,
5402 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5403 things aside from REG and MEM that should appear in a
5405 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5407 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5408 || code
== ZERO_EXTRACT
))
5411 code
== IF_THEN_ELSE
&& i
== 0,
5414 /* If we found that we will have to reject this combination,
5415 indicate that by returning the CLOBBER ourselves, rather than
5416 an expression containing it. This will speed things up as
5417 well as prevent accidents where two CLOBBERs are considered
5418 to be equal, thus producing an incorrect simplification. */
5420 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5423 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5425 machine_mode mode
= GET_MODE (x
);
5427 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5428 GET_MODE (SUBREG_REG (x
)),
5431 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5433 else if (CONST_SCALAR_INT_P (new_rtx
)
5434 && GET_CODE (x
) == ZERO_EXTEND
)
5436 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5437 new_rtx
, GET_MODE (XEXP (x
, 0)));
5441 SUBST (XEXP (x
, i
), new_rtx
);
5446 /* Check if we are loading something from the constant pool via float
5447 extension; in this case we would undo compress_float_constant
5448 optimization and degenerate constant load to an immediate value. */
5449 if (GET_CODE (x
) == FLOAT_EXTEND
5450 && MEM_P (XEXP (x
, 0))
5451 && MEM_READONLY_P (XEXP (x
, 0)))
5453 rtx tmp
= avoid_constant_pool_reference (x
);
5458 /* Try to simplify X. If the simplification changed the code, it is likely
5459 that further simplification will help, so loop, but limit the number
5460 of repetitions that will be performed. */
5462 for (i
= 0; i
< 4; i
++)
5464 /* If X is sufficiently simple, don't bother trying to do anything
5466 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5467 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5469 if (GET_CODE (x
) == code
)
5472 code
= GET_CODE (x
);
5474 /* We no longer know the original mode of operand 0 since we
5475 have changed the form of X) */
5476 op0_mode
= VOIDmode
;
5482 /* If X is a commutative operation whose operands are not in the canonical
5483 order, use substitutions to swap them. */
5486 maybe_swap_commutative_operands (rtx x
)
5488 if (COMMUTATIVE_ARITH_P (x
)
5489 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5491 rtx temp
= XEXP (x
, 0);
5492 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5493 SUBST (XEXP (x
, 1), temp
);
5497 /* Simplify X, a piece of RTL. We just operate on the expression at the
5498 outer level; call `subst' to simplify recursively. Return the new
5501 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5502 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5506 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5509 enum rtx_code code
= GET_CODE (x
);
5510 machine_mode mode
= GET_MODE (x
);
5514 /* If this is a commutative operation, put a constant last and a complex
5515 expression first. We don't need to do this for comparisons here. */
5516 maybe_swap_commutative_operands (x
);
5518 /* Try to fold this expression in case we have constants that weren't
5521 switch (GET_RTX_CLASS (code
))
5524 if (op0_mode
== VOIDmode
)
5525 op0_mode
= GET_MODE (XEXP (x
, 0));
5526 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5529 case RTX_COMM_COMPARE
:
5531 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5532 if (cmp_mode
== VOIDmode
)
5534 cmp_mode
= GET_MODE (XEXP (x
, 1));
5535 if (cmp_mode
== VOIDmode
)
5536 cmp_mode
= op0_mode
;
5538 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5539 XEXP (x
, 0), XEXP (x
, 1));
5542 case RTX_COMM_ARITH
:
5544 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5546 case RTX_BITFIELD_OPS
:
5548 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5549 XEXP (x
, 1), XEXP (x
, 2));
5558 code
= GET_CODE (temp
);
5559 op0_mode
= VOIDmode
;
5560 mode
= GET_MODE (temp
);
5563 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5564 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5565 things. Check for cases where both arms are testing the same
5568 Don't do anything if all operands are very simple. */
5571 && ((!OBJECT_P (XEXP (x
, 0))
5572 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5573 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5574 || (!OBJECT_P (XEXP (x
, 1))
5575 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5576 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5578 && (!OBJECT_P (XEXP (x
, 0))
5579 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5580 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5582 rtx cond
, true_rtx
, false_rtx
;
5584 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5586 /* If everything is a comparison, what we have is highly unlikely
5587 to be simpler, so don't use it. */
5588 && ! (COMPARISON_P (x
)
5589 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5591 rtx cop1
= const0_rtx
;
5592 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5594 if (cond_code
== NE
&& COMPARISON_P (cond
))
5597 /* Simplify the alternative arms; this may collapse the true and
5598 false arms to store-flag values. Be careful to use copy_rtx
5599 here since true_rtx or false_rtx might share RTL with x as a
5600 result of the if_then_else_cond call above. */
5601 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5602 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5604 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5605 is unlikely to be simpler. */
5606 if (general_operand (true_rtx
, VOIDmode
)
5607 && general_operand (false_rtx
, VOIDmode
))
5609 enum rtx_code reversed
;
5611 /* Restarting if we generate a store-flag expression will cause
5612 us to loop. Just drop through in this case. */
5614 /* If the result values are STORE_FLAG_VALUE and zero, we can
5615 just make the comparison operation. */
5616 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5617 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5619 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5620 && ((reversed
= reversed_comparison_code_parts
5621 (cond_code
, cond
, cop1
, NULL
))
5623 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5626 /* Likewise, we can make the negate of a comparison operation
5627 if the result values are - STORE_FLAG_VALUE and zero. */
5628 else if (CONST_INT_P (true_rtx
)
5629 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5630 && false_rtx
== const0_rtx
)
5631 x
= simplify_gen_unary (NEG
, mode
,
5632 simplify_gen_relational (cond_code
,
5636 else if (CONST_INT_P (false_rtx
)
5637 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5638 && true_rtx
== const0_rtx
5639 && ((reversed
= reversed_comparison_code_parts
5640 (cond_code
, cond
, cop1
, NULL
))
5642 x
= simplify_gen_unary (NEG
, mode
,
5643 simplify_gen_relational (reversed
,
5648 return gen_rtx_IF_THEN_ELSE (mode
,
5649 simplify_gen_relational (cond_code
,
5654 true_rtx
, false_rtx
);
5656 code
= GET_CODE (x
);
5657 op0_mode
= VOIDmode
;
5662 /* First see if we can apply the inverse distributive law. */
5663 if (code
== PLUS
|| code
== MINUS
5664 || code
== AND
|| code
== IOR
|| code
== XOR
)
5666 x
= apply_distributive_law (x
);
5667 code
= GET_CODE (x
);
5668 op0_mode
= VOIDmode
;
5671 /* If CODE is an associative operation not otherwise handled, see if we
5672 can associate some operands. This can win if they are constants or
5673 if they are logically related (i.e. (a & b) & a). */
5674 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5675 || code
== AND
|| code
== IOR
|| code
== XOR
5676 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5677 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5678 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5680 if (GET_CODE (XEXP (x
, 0)) == code
)
5682 rtx other
= XEXP (XEXP (x
, 0), 0);
5683 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5684 rtx inner_op1
= XEXP (x
, 1);
5687 /* Make sure we pass the constant operand if any as the second
5688 one if this is a commutative operation. */
5689 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5690 std::swap (inner_op0
, inner_op1
);
5691 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5692 : code
== DIV
? MULT
5694 mode
, inner_op0
, inner_op1
);
5696 /* For commutative operations, try the other pair if that one
5698 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5700 other
= XEXP (XEXP (x
, 0), 1);
5701 inner
= simplify_binary_operation (code
, mode
,
5702 XEXP (XEXP (x
, 0), 0),
5707 return simplify_gen_binary (code
, mode
, other
, inner
);
5711 /* A little bit of algebraic simplification here. */
5715 /* Ensure that our address has any ASHIFTs converted to MULT in case
5716 address-recognizing predicates are called later. */
5717 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5718 SUBST (XEXP (x
, 0), temp
);
5722 if (op0_mode
== VOIDmode
)
5723 op0_mode
= GET_MODE (SUBREG_REG (x
));
5725 /* See if this can be moved to simplify_subreg. */
5726 if (CONSTANT_P (SUBREG_REG (x
))
5727 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5728 /* Don't call gen_lowpart if the inner mode
5729 is VOIDmode and we cannot simplify it, as SUBREG without
5730 inner mode is invalid. */
5731 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5732 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5733 return gen_lowpart (mode
, SUBREG_REG (x
));
5735 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5739 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5744 /* If op is known to have all lower bits zero, the result is zero. */
5746 && SCALAR_INT_MODE_P (mode
)
5747 && SCALAR_INT_MODE_P (op0_mode
)
5748 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (op0_mode
)
5749 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5750 && HWI_COMPUTABLE_MODE_P (op0_mode
)
5751 && (nonzero_bits (SUBREG_REG (x
), op0_mode
)
5752 & GET_MODE_MASK (mode
)) == 0)
5753 return CONST0_RTX (mode
);
5756 /* Don't change the mode of the MEM if that would change the meaning
5758 if (MEM_P (SUBREG_REG (x
))
5759 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5760 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5761 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5762 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5764 /* Note that we cannot do any narrowing for non-constants since
5765 we might have been counting on using the fact that some bits were
5766 zero. We now do this in the SET. */
5771 temp
= expand_compound_operation (XEXP (x
, 0));
5773 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5774 replaced by (lshiftrt X C). This will convert
5775 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5777 if (GET_CODE (temp
) == ASHIFTRT
5778 && CONST_INT_P (XEXP (temp
, 1))
5779 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5780 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5781 INTVAL (XEXP (temp
, 1)));
5783 /* If X has only a single bit that might be nonzero, say, bit I, convert
5784 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5785 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5786 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5787 or a SUBREG of one since we'd be making the expression more
5788 complex if it was just a register. */
5791 && ! (GET_CODE (temp
) == SUBREG
5792 && REG_P (SUBREG_REG (temp
)))
5793 && (i
= exact_log2 (nonzero_bits (temp
, mode
))) >= 0)
5795 rtx temp1
= simplify_shift_const
5796 (NULL_RTX
, ASHIFTRT
, mode
,
5797 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, temp
,
5798 GET_MODE_PRECISION (mode
) - 1 - i
),
5799 GET_MODE_PRECISION (mode
) - 1 - i
);
5801 /* If all we did was surround TEMP with the two shifts, we
5802 haven't improved anything, so don't use it. Otherwise,
5803 we are better off with TEMP1. */
5804 if (GET_CODE (temp1
) != ASHIFTRT
5805 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5806 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5812 /* We can't handle truncation to a partial integer mode here
5813 because we don't know the real bitsize of the partial
5815 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5818 if (HWI_COMPUTABLE_MODE_P (mode
))
5820 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5821 GET_MODE_MASK (mode
), 0));
5823 /* We can truncate a constant value and return it. */
5824 if (CONST_INT_P (XEXP (x
, 0)))
5825 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5827 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5828 whose value is a comparison can be replaced with a subreg if
5829 STORE_FLAG_VALUE permits. */
5830 if (HWI_COMPUTABLE_MODE_P (mode
)
5831 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5832 && (temp
= get_last_value (XEXP (x
, 0)))
5833 && COMPARISON_P (temp
))
5834 return gen_lowpart (mode
, XEXP (x
, 0));
5838 /* (const (const X)) can become (const X). Do it this way rather than
5839 returning the inner CONST since CONST can be shared with a
5841 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5842 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5846 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5847 can add in an offset. find_split_point will split this address up
5848 again if it doesn't match. */
5849 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5850 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5855 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5856 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5857 bit-field and can be replaced by either a sign_extend or a
5858 sign_extract. The `and' may be a zero_extend and the two
5859 <c>, -<c> constants may be reversed. */
5860 if (GET_CODE (XEXP (x
, 0)) == XOR
5861 && CONST_INT_P (XEXP (x
, 1))
5862 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5863 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5864 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5865 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5866 && HWI_COMPUTABLE_MODE_P (mode
)
5867 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5868 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5869 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5870 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5871 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5872 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5873 == (unsigned int) i
+ 1))))
5874 return simplify_shift_const
5875 (NULL_RTX
, ASHIFTRT
, mode
,
5876 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5877 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5878 GET_MODE_PRECISION (mode
) - (i
+ 1)),
5879 GET_MODE_PRECISION (mode
) - (i
+ 1));
5881 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5882 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5883 the bitsize of the mode - 1. This allows simplification of
5884 "a = (b & 8) == 0;" */
5885 if (XEXP (x
, 1) == constm1_rtx
5886 && !REG_P (XEXP (x
, 0))
5887 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5888 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5889 && nonzero_bits (XEXP (x
, 0), mode
) == 1)
5890 return simplify_shift_const (NULL_RTX
, ASHIFTRT
, mode
,
5891 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5892 gen_rtx_XOR (mode
, XEXP (x
, 0), const1_rtx
),
5893 GET_MODE_PRECISION (mode
) - 1),
5894 GET_MODE_PRECISION (mode
) - 1);
5896 /* If we are adding two things that have no bits in common, convert
5897 the addition into an IOR. This will often be further simplified,
5898 for example in cases like ((a & 1) + (a & 2)), which can
5901 if (HWI_COMPUTABLE_MODE_P (mode
)
5902 && (nonzero_bits (XEXP (x
, 0), mode
)
5903 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5905 /* Try to simplify the expression further. */
5906 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5907 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5909 /* If we could, great. If not, do not go ahead with the IOR
5910 replacement, since PLUS appears in many special purpose
5911 address arithmetic instructions. */
5912 if (GET_CODE (temp
) != CLOBBER
5913 && (GET_CODE (temp
) != IOR
5914 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5915 || XEXP (temp
, 1) != XEXP (x
, 1))
5916 && (XEXP (temp
, 0) != XEXP (x
, 1)
5917 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5921 /* Canonicalize x + x into x << 1. */
5922 if (GET_MODE_CLASS (mode
) == MODE_INT
5923 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
5924 && !side_effects_p (XEXP (x
, 0)))
5925 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
5930 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5931 (and <foo> (const_int pow2-1)) */
5932 if (GET_CODE (XEXP (x
, 1)) == AND
5933 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
5934 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
5935 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
5936 return simplify_and_const_int (NULL_RTX
, mode
, XEXP (x
, 0),
5937 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
5941 /* If we have (mult (plus A B) C), apply the distributive law and then
5942 the inverse distributive law to see if things simplify. This
5943 occurs mostly in addresses, often when unrolling loops. */
5945 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
5947 rtx result
= distribute_and_simplify_rtx (x
, 0);
5952 /* Try simplify a*(b/c) as (a*b)/c. */
5953 if (FLOAT_MODE_P (mode
) && flag_associative_math
5954 && GET_CODE (XEXP (x
, 0)) == DIV
)
5956 rtx tem
= simplify_binary_operation (MULT
, mode
,
5957 XEXP (XEXP (x
, 0), 0),
5960 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
5965 /* If this is a divide by a power of two, treat it as a shift if
5966 its first operand is a shift. */
5967 if (CONST_INT_P (XEXP (x
, 1))
5968 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
5969 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
5970 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5971 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
5972 || GET_CODE (XEXP (x
, 0)) == ROTATE
5973 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
5974 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (x
, 0), i
);
5978 case GT
: case GTU
: case GE
: case GEU
:
5979 case LT
: case LTU
: case LE
: case LEU
:
5980 case UNEQ
: case LTGT
:
5981 case UNGT
: case UNGE
:
5982 case UNLT
: case UNLE
:
5983 case UNORDERED
: case ORDERED
:
5984 /* If the first operand is a condition code, we can't do anything
5986 if (GET_CODE (XEXP (x
, 0)) == COMPARE
5987 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
5988 && ! CC0_P (XEXP (x
, 0))))
5990 rtx op0
= XEXP (x
, 0);
5991 rtx op1
= XEXP (x
, 1);
5992 enum rtx_code new_code
;
5994 if (GET_CODE (op0
) == COMPARE
)
5995 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
5997 /* Simplify our comparison, if possible. */
5998 new_code
= simplify_comparison (code
, &op0
, &op1
);
6000 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6001 if only the low-order bit is possibly nonzero in X (such as when
6002 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6003 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6004 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6007 Remove any ZERO_EXTRACT we made when thinking this was a
6008 comparison. It may now be simpler to use, e.g., an AND. If a
6009 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6010 the call to make_compound_operation in the SET case.
6012 Don't apply these optimizations if the caller would
6013 prefer a comparison rather than a value.
6014 E.g., for the condition in an IF_THEN_ELSE most targets need
6015 an explicit comparison. */
6020 else if (STORE_FLAG_VALUE
== 1
6021 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6022 && op1
== const0_rtx
6023 && mode
== GET_MODE (op0
)
6024 && nonzero_bits (op0
, mode
) == 1)
6025 return gen_lowpart (mode
,
6026 expand_compound_operation (op0
));
6028 else if (STORE_FLAG_VALUE
== 1
6029 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6030 && op1
== const0_rtx
6031 && mode
== GET_MODE (op0
)
6032 && (num_sign_bit_copies (op0
, mode
)
6033 == GET_MODE_PRECISION (mode
)))
6035 op0
= expand_compound_operation (op0
);
6036 return simplify_gen_unary (NEG
, mode
,
6037 gen_lowpart (mode
, op0
),
6041 else if (STORE_FLAG_VALUE
== 1
6042 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6043 && op1
== const0_rtx
6044 && mode
== GET_MODE (op0
)
6045 && nonzero_bits (op0
, mode
) == 1)
6047 op0
= expand_compound_operation (op0
);
6048 return simplify_gen_binary (XOR
, mode
,
6049 gen_lowpart (mode
, op0
),
6053 else if (STORE_FLAG_VALUE
== 1
6054 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6055 && op1
== const0_rtx
6056 && mode
== GET_MODE (op0
)
6057 && (num_sign_bit_copies (op0
, mode
)
6058 == GET_MODE_PRECISION (mode
)))
6060 op0
= expand_compound_operation (op0
);
6061 return plus_constant (mode
, gen_lowpart (mode
, op0
), 1);
6064 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6069 else if (STORE_FLAG_VALUE
== -1
6070 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6071 && op1
== const0_rtx
6072 && mode
== GET_MODE (op0
)
6073 && (num_sign_bit_copies (op0
, mode
)
6074 == GET_MODE_PRECISION (mode
)))
6075 return gen_lowpart (mode
,
6076 expand_compound_operation (op0
));
6078 else if (STORE_FLAG_VALUE
== -1
6079 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6080 && op1
== const0_rtx
6081 && mode
== GET_MODE (op0
)
6082 && nonzero_bits (op0
, mode
) == 1)
6084 op0
= expand_compound_operation (op0
);
6085 return simplify_gen_unary (NEG
, mode
,
6086 gen_lowpart (mode
, op0
),
6090 else if (STORE_FLAG_VALUE
== -1
6091 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6092 && op1
== const0_rtx
6093 && mode
== GET_MODE (op0
)
6094 && (num_sign_bit_copies (op0
, mode
)
6095 == GET_MODE_PRECISION (mode
)))
6097 op0
= expand_compound_operation (op0
);
6098 return simplify_gen_unary (NOT
, mode
,
6099 gen_lowpart (mode
, op0
),
6103 /* If X is 0/1, (eq X 0) is X-1. */
6104 else if (STORE_FLAG_VALUE
== -1
6105 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6106 && op1
== const0_rtx
6107 && mode
== GET_MODE (op0
)
6108 && nonzero_bits (op0
, mode
) == 1)
6110 op0
= expand_compound_operation (op0
);
6111 return plus_constant (mode
, gen_lowpart (mode
, op0
), -1);
6114 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6115 one bit that might be nonzero, we can convert (ne x 0) to
6116 (ashift x c) where C puts the bit in the sign bit. Remove any
6117 AND with STORE_FLAG_VALUE when we are done, since we are only
6118 going to test the sign bit. */
6119 if (new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6120 && HWI_COMPUTABLE_MODE_P (mode
)
6121 && val_signbit_p (mode
, STORE_FLAG_VALUE
)
6122 && op1
== const0_rtx
6123 && mode
== GET_MODE (op0
)
6124 && (i
= exact_log2 (nonzero_bits (op0
, mode
))) >= 0)
6126 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6127 expand_compound_operation (op0
),
6128 GET_MODE_PRECISION (mode
) - 1 - i
);
6129 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6135 /* If the code changed, return a whole new comparison.
6136 We also need to avoid using SUBST in cases where
6137 simplify_comparison has widened a comparison with a CONST_INT,
6138 since in that case the wider CONST_INT may fail the sanity
6139 checks in do_SUBST. */
6140 if (new_code
!= code
6141 || (CONST_INT_P (op1
)
6142 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6143 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6144 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6146 /* Otherwise, keep this operation, but maybe change its operands.
6147 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6148 SUBST (XEXP (x
, 0), op0
);
6149 SUBST (XEXP (x
, 1), op1
);
6154 return simplify_if_then_else (x
);
6160 /* If we are processing SET_DEST, we are done. */
6164 return expand_compound_operation (x
);
6167 return simplify_set (x
);
6171 return simplify_logical (x
);
6178 /* If this is a shift by a constant amount, simplify it. */
6179 if (CONST_INT_P (XEXP (x
, 1)))
6180 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6181 INTVAL (XEXP (x
, 1)));
6183 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6185 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6187 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6199 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6202 simplify_if_then_else (rtx x
)
6204 machine_mode mode
= GET_MODE (x
);
6205 rtx cond
= XEXP (x
, 0);
6206 rtx true_rtx
= XEXP (x
, 1);
6207 rtx false_rtx
= XEXP (x
, 2);
6208 enum rtx_code true_code
= GET_CODE (cond
);
6209 int comparison_p
= COMPARISON_P (cond
);
6212 enum rtx_code false_code
;
6215 /* Simplify storing of the truth value. */
6216 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6217 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6218 XEXP (cond
, 0), XEXP (cond
, 1));
6220 /* Also when the truth value has to be reversed. */
6222 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6223 && (reversed
= reversed_comparison (cond
, mode
)))
6226 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6227 in it is being compared against certain values. Get the true and false
6228 comparisons and see if that says anything about the value of each arm. */
6231 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6233 && REG_P (XEXP (cond
, 0)))
6236 rtx from
= XEXP (cond
, 0);
6237 rtx true_val
= XEXP (cond
, 1);
6238 rtx false_val
= true_val
;
6241 /* If FALSE_CODE is EQ, swap the codes and arms. */
6243 if (false_code
== EQ
)
6245 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6246 std::swap (true_rtx
, false_rtx
);
6249 /* If we are comparing against zero and the expression being tested has
6250 only a single bit that might be nonzero, that is its value when it is
6251 not equal to zero. Similarly if it is known to be -1 or 0. */
6253 if (true_code
== EQ
&& true_val
== const0_rtx
6254 && pow2p_hwi (nzb
= nonzero_bits (from
, GET_MODE (from
))))
6257 false_val
= gen_int_mode (nzb
, GET_MODE (from
));
6259 else if (true_code
== EQ
&& true_val
== const0_rtx
6260 && (num_sign_bit_copies (from
, GET_MODE (from
))
6261 == GET_MODE_PRECISION (GET_MODE (from
))))
6264 false_val
= constm1_rtx
;
6267 /* Now simplify an arm if we know the value of the register in the
6268 branch and it is used in the arm. Be careful due to the potential
6269 of locally-shared RTL. */
6271 if (reg_mentioned_p (from
, true_rtx
))
6272 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6274 pc_rtx
, pc_rtx
, 0, 0, 0);
6275 if (reg_mentioned_p (from
, false_rtx
))
6276 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6278 pc_rtx
, pc_rtx
, 0, 0, 0);
6280 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6281 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6283 true_rtx
= XEXP (x
, 1);
6284 false_rtx
= XEXP (x
, 2);
6285 true_code
= GET_CODE (cond
);
6288 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6289 reversed, do so to avoid needing two sets of patterns for
6290 subtract-and-branch insns. Similarly if we have a constant in the true
6291 arm, the false arm is the same as the first operand of the comparison, or
6292 the false arm is more complicated than the true arm. */
6295 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6296 && (true_rtx
== pc_rtx
6297 || (CONSTANT_P (true_rtx
)
6298 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6299 || true_rtx
== const0_rtx
6300 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6301 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6302 && !OBJECT_P (false_rtx
))
6303 || reg_mentioned_p (true_rtx
, false_rtx
)
6304 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6306 true_code
= reversed_comparison_code (cond
, NULL
);
6307 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6308 SUBST (XEXP (x
, 1), false_rtx
);
6309 SUBST (XEXP (x
, 2), true_rtx
);
6311 std::swap (true_rtx
, false_rtx
);
6314 /* It is possible that the conditional has been simplified out. */
6315 true_code
= GET_CODE (cond
);
6316 comparison_p
= COMPARISON_P (cond
);
6319 /* If the two arms are identical, we don't need the comparison. */
6321 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6324 /* Convert a == b ? b : a to "a". */
6325 if (true_code
== EQ
&& ! side_effects_p (cond
)
6326 && !HONOR_NANS (mode
)
6327 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6328 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6330 else if (true_code
== NE
&& ! side_effects_p (cond
)
6331 && !HONOR_NANS (mode
)
6332 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6333 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6336 /* Look for cases where we have (abs x) or (neg (abs X)). */
6338 if (GET_MODE_CLASS (mode
) == MODE_INT
6340 && XEXP (cond
, 1) == const0_rtx
6341 && GET_CODE (false_rtx
) == NEG
6342 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6343 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6344 && ! side_effects_p (true_rtx
))
6349 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6353 simplify_gen_unary (NEG
, mode
,
6354 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6360 /* Look for MIN or MAX. */
6362 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6364 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6365 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6366 && ! side_effects_p (cond
))
6371 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6374 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6377 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6380 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6385 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6386 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6387 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6388 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6389 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6390 neither 1 or -1, but it isn't worth checking for. */
6392 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6394 && GET_MODE_CLASS (mode
) == MODE_INT
6395 && ! side_effects_p (x
))
6397 rtx t
= make_compound_operation (true_rtx
, SET
);
6398 rtx f
= make_compound_operation (false_rtx
, SET
);
6399 rtx cond_op0
= XEXP (cond
, 0);
6400 rtx cond_op1
= XEXP (cond
, 1);
6401 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6402 machine_mode m
= mode
;
6403 rtx z
= 0, c1
= NULL_RTX
;
6405 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6406 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6407 || GET_CODE (t
) == ASHIFT
6408 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6409 && rtx_equal_p (XEXP (t
, 0), f
))
6410 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6412 /* If an identity-zero op is commutative, check whether there
6413 would be a match if we swapped the operands. */
6414 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6415 || GET_CODE (t
) == XOR
)
6416 && rtx_equal_p (XEXP (t
, 1), f
))
6417 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6418 else if (GET_CODE (t
) == SIGN_EXTEND
6419 && (GET_CODE (XEXP (t
, 0)) == PLUS
6420 || GET_CODE (XEXP (t
, 0)) == MINUS
6421 || GET_CODE (XEXP (t
, 0)) == IOR
6422 || GET_CODE (XEXP (t
, 0)) == XOR
6423 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6424 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6425 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6426 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6427 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6428 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6429 && (num_sign_bit_copies (f
, GET_MODE (f
))
6431 (GET_MODE_PRECISION (mode
)
6432 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6434 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6435 extend_op
= SIGN_EXTEND
;
6436 m
= GET_MODE (XEXP (t
, 0));
6438 else if (GET_CODE (t
) == SIGN_EXTEND
6439 && (GET_CODE (XEXP (t
, 0)) == PLUS
6440 || GET_CODE (XEXP (t
, 0)) == IOR
6441 || GET_CODE (XEXP (t
, 0)) == XOR
)
6442 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6443 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6444 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6445 && (num_sign_bit_copies (f
, GET_MODE (f
))
6447 (GET_MODE_PRECISION (mode
)
6448 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6450 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6451 extend_op
= SIGN_EXTEND
;
6452 m
= GET_MODE (XEXP (t
, 0));
6454 else if (GET_CODE (t
) == ZERO_EXTEND
6455 && (GET_CODE (XEXP (t
, 0)) == PLUS
6456 || GET_CODE (XEXP (t
, 0)) == MINUS
6457 || GET_CODE (XEXP (t
, 0)) == IOR
6458 || GET_CODE (XEXP (t
, 0)) == XOR
6459 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6460 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6461 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6462 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6463 && HWI_COMPUTABLE_MODE_P (mode
)
6464 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6465 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6466 && ((nonzero_bits (f
, GET_MODE (f
))
6467 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6470 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6471 extend_op
= ZERO_EXTEND
;
6472 m
= GET_MODE (XEXP (t
, 0));
6474 else if (GET_CODE (t
) == ZERO_EXTEND
6475 && (GET_CODE (XEXP (t
, 0)) == PLUS
6476 || GET_CODE (XEXP (t
, 0)) == IOR
6477 || GET_CODE (XEXP (t
, 0)) == XOR
)
6478 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6479 && HWI_COMPUTABLE_MODE_P (mode
)
6480 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6481 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6482 && ((nonzero_bits (f
, GET_MODE (f
))
6483 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6486 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6487 extend_op
= ZERO_EXTEND
;
6488 m
= GET_MODE (XEXP (t
, 0));
6493 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6494 cond_op0
, cond_op1
),
6495 pc_rtx
, pc_rtx
, 0, 0, 0);
6496 temp
= simplify_gen_binary (MULT
, m
, temp
,
6497 simplify_gen_binary (MULT
, m
, c1
,
6499 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6500 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6502 if (extend_op
!= UNKNOWN
)
6503 temp
= simplify_gen_unary (extend_op
, mode
, temp
, m
);
6509 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6510 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6511 negation of a single bit, we can convert this operation to a shift. We
6512 can actually do this more generally, but it doesn't seem worth it. */
6514 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6515 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6516 && ((1 == nonzero_bits (XEXP (cond
, 0), mode
)
6517 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6518 || ((num_sign_bit_copies (XEXP (cond
, 0), mode
)
6519 == GET_MODE_PRECISION (mode
))
6520 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6522 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6523 gen_lowpart (mode
, XEXP (cond
, 0)), i
);
6525 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6526 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6527 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6528 && GET_MODE (XEXP (cond
, 0)) == mode
6529 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6530 == nonzero_bits (XEXP (cond
, 0), mode
)
6531 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6532 return XEXP (cond
, 0);
6537 /* Simplify X, a SET expression. Return the new expression. */
6540 simplify_set (rtx x
)
6542 rtx src
= SET_SRC (x
);
6543 rtx dest
= SET_DEST (x
);
6545 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6546 rtx_insn
*other_insn
;
6549 /* (set (pc) (return)) gets written as (return). */
6550 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6553 /* Now that we know for sure which bits of SRC we are using, see if we can
6554 simplify the expression for the object knowing that we only need the
6557 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6559 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6560 SUBST (SET_SRC (x
), src
);
6563 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6564 the comparison result and try to simplify it unless we already have used
6565 undobuf.other_insn. */
6566 if ((GET_MODE_CLASS (mode
) == MODE_CC
6567 || GET_CODE (src
) == COMPARE
6569 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6570 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6571 && COMPARISON_P (*cc_use
)
6572 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6574 enum rtx_code old_code
= GET_CODE (*cc_use
);
6575 enum rtx_code new_code
;
6577 int other_changed
= 0;
6578 rtx inner_compare
= NULL_RTX
;
6579 machine_mode compare_mode
= GET_MODE (dest
);
6581 if (GET_CODE (src
) == COMPARE
)
6583 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6584 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6586 inner_compare
= op0
;
6587 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6591 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6593 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6596 new_code
= old_code
;
6597 else if (!CONSTANT_P (tmp
))
6599 new_code
= GET_CODE (tmp
);
6600 op0
= XEXP (tmp
, 0);
6601 op1
= XEXP (tmp
, 1);
6605 rtx pat
= PATTERN (other_insn
);
6606 undobuf
.other_insn
= other_insn
;
6607 SUBST (*cc_use
, tmp
);
6609 /* Attempt to simplify CC user. */
6610 if (GET_CODE (pat
) == SET
)
6612 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6613 if (new_rtx
!= NULL_RTX
)
6614 SUBST (SET_SRC (pat
), new_rtx
);
6617 /* Convert X into a no-op move. */
6618 SUBST (SET_DEST (x
), pc_rtx
);
6619 SUBST (SET_SRC (x
), pc_rtx
);
6623 /* Simplify our comparison, if possible. */
6624 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6626 #ifdef SELECT_CC_MODE
6627 /* If this machine has CC modes other than CCmode, check to see if we
6628 need to use a different CC mode here. */
6629 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6630 compare_mode
= GET_MODE (op0
);
6631 else if (inner_compare
6632 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6633 && new_code
== old_code
6634 && op0
== XEXP (inner_compare
, 0)
6635 && op1
== XEXP (inner_compare
, 1))
6636 compare_mode
= GET_MODE (inner_compare
);
6638 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6640 /* If the mode changed, we have to change SET_DEST, the mode in the
6641 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6642 a hard register, just build new versions with the proper mode. If it
6643 is a pseudo, we lose unless it is only time we set the pseudo, in
6644 which case we can safely change its mode. */
6645 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6647 if (can_change_dest_mode (dest
, 0, compare_mode
))
6649 unsigned int regno
= REGNO (dest
);
6652 if (regno
< FIRST_PSEUDO_REGISTER
)
6653 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6656 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6657 new_dest
= regno_reg_rtx
[regno
];
6660 SUBST (SET_DEST (x
), new_dest
);
6661 SUBST (XEXP (*cc_use
, 0), new_dest
);
6667 #endif /* SELECT_CC_MODE */
6669 /* If the code changed, we have to build a new comparison in
6670 undobuf.other_insn. */
6671 if (new_code
!= old_code
)
6673 int other_changed_previously
= other_changed
;
6674 unsigned HOST_WIDE_INT mask
;
6675 rtx old_cc_use
= *cc_use
;
6677 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6681 /* If the only change we made was to change an EQ into an NE or
6682 vice versa, OP0 has only one bit that might be nonzero, and OP1
6683 is zero, check if changing the user of the condition code will
6684 produce a valid insn. If it won't, we can keep the original code
6685 in that insn by surrounding our operation with an XOR. */
6687 if (((old_code
== NE
&& new_code
== EQ
)
6688 || (old_code
== EQ
&& new_code
== NE
))
6689 && ! other_changed_previously
&& op1
== const0_rtx
6690 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6691 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6693 rtx pat
= PATTERN (other_insn
), note
= 0;
6695 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6696 && ! check_asm_operands (pat
)))
6698 *cc_use
= old_cc_use
;
6701 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6709 undobuf
.other_insn
= other_insn
;
6711 /* Don't generate a compare of a CC with 0, just use that CC. */
6712 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6714 SUBST (SET_SRC (x
), op0
);
6717 /* Otherwise, if we didn't previously have the same COMPARE we
6718 want, create it from scratch. */
6719 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6720 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6722 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6728 /* Get SET_SRC in a form where we have placed back any
6729 compound expressions. Then do the checks below. */
6730 src
= make_compound_operation (src
, SET
);
6731 SUBST (SET_SRC (x
), src
);
6734 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6735 and X being a REG or (subreg (reg)), we may be able to convert this to
6736 (set (subreg:m2 x) (op)).
6738 We can always do this if M1 is narrower than M2 because that means that
6739 we only care about the low bits of the result.
6741 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6742 perform a narrower operation than requested since the high-order bits will
6743 be undefined. On machine where it is defined, this transformation is safe
6744 as long as M1 and M2 have the same number of words. */
6746 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6747 && !OBJECT_P (SUBREG_REG (src
))
6748 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6750 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6751 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6752 && (WORD_REGISTER_OPERATIONS
6753 || (GET_MODE_SIZE (GET_MODE (src
))
6754 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))))
6755 #ifdef CANNOT_CHANGE_MODE_CLASS
6756 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6757 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6758 GET_MODE (SUBREG_REG (src
)),
6762 || (GET_CODE (dest
) == SUBREG
6763 && REG_P (SUBREG_REG (dest
)))))
6765 SUBST (SET_DEST (x
),
6766 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6768 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6770 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6773 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6776 && GET_CODE (src
) == SUBREG
6777 && subreg_lowpart_p (src
)
6778 && (GET_MODE_PRECISION (GET_MODE (src
))
6779 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6781 rtx inner
= SUBREG_REG (src
);
6782 machine_mode inner_mode
= GET_MODE (inner
);
6784 /* Here we make sure that we don't have a sign bit on. */
6785 if (val_signbit_known_clear_p (GET_MODE (src
),
6786 nonzero_bits (inner
, inner_mode
)))
6788 SUBST (SET_SRC (x
), inner
);
6793 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6794 would require a paradoxical subreg. Replace the subreg with a
6795 zero_extend to avoid the reload that would otherwise be required. */
6797 enum rtx_code extend_op
;
6798 if (paradoxical_subreg_p (src
)
6799 && MEM_P (SUBREG_REG (src
))
6800 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6803 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6808 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6809 are comparing an item known to be 0 or -1 against 0, use a logical
6810 operation instead. Check for one of the arms being an IOR of the other
6811 arm with some value. We compute three terms to be IOR'ed together. In
6812 practice, at most two will be nonzero. Then we do the IOR's. */
6814 if (GET_CODE (dest
) != PC
6815 && GET_CODE (src
) == IF_THEN_ELSE
6816 && GET_MODE_CLASS (GET_MODE (src
)) == MODE_INT
6817 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6818 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6819 && GET_MODE (src
) == GET_MODE (XEXP (XEXP (src
, 0), 0))
6820 && (!HAVE_conditional_move
6821 || ! can_conditionally_move_p (GET_MODE (src
)))
6822 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0),
6823 GET_MODE (XEXP (XEXP (src
, 0), 0)))
6824 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src
, 0), 0))))
6825 && ! side_effects_p (src
))
6827 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6828 ? XEXP (src
, 1) : XEXP (src
, 2));
6829 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6830 ? XEXP (src
, 2) : XEXP (src
, 1));
6831 rtx term1
= const0_rtx
, term2
, term3
;
6833 if (GET_CODE (true_rtx
) == IOR
6834 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6835 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6836 else if (GET_CODE (true_rtx
) == IOR
6837 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6838 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6839 else if (GET_CODE (false_rtx
) == IOR
6840 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6841 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6842 else if (GET_CODE (false_rtx
) == IOR
6843 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6844 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6846 term2
= simplify_gen_binary (AND
, GET_MODE (src
),
6847 XEXP (XEXP (src
, 0), 0), true_rtx
);
6848 term3
= simplify_gen_binary (AND
, GET_MODE (src
),
6849 simplify_gen_unary (NOT
, GET_MODE (src
),
6850 XEXP (XEXP (src
, 0), 0),
6855 simplify_gen_binary (IOR
, GET_MODE (src
),
6856 simplify_gen_binary (IOR
, GET_MODE (src
),
6863 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6864 whole thing fail. */
6865 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6867 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6870 /* Convert this into a field assignment operation, if possible. */
6871 return make_field_assignment (x
);
6874 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6878 simplify_logical (rtx x
)
6880 machine_mode mode
= GET_MODE (x
);
6881 rtx op0
= XEXP (x
, 0);
6882 rtx op1
= XEXP (x
, 1);
6884 switch (GET_CODE (x
))
6887 /* We can call simplify_and_const_int only if we don't lose
6888 any (sign) bits when converting INTVAL (op1) to
6889 "unsigned HOST_WIDE_INT". */
6890 if (CONST_INT_P (op1
)
6891 && (HWI_COMPUTABLE_MODE_P (mode
)
6892 || INTVAL (op1
) > 0))
6894 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6895 if (GET_CODE (x
) != AND
)
6902 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6903 apply the distributive law and then the inverse distributive
6904 law to see if things simplify. */
6905 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
6907 rtx result
= distribute_and_simplify_rtx (x
, 0);
6911 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
6913 rtx result
= distribute_and_simplify_rtx (x
, 1);
6920 /* If we have (ior (and A B) C), apply the distributive law and then
6921 the inverse distributive law to see if things simplify. */
6923 if (GET_CODE (op0
) == AND
)
6925 rtx result
= distribute_and_simplify_rtx (x
, 0);
6930 if (GET_CODE (op1
) == AND
)
6932 rtx result
= distribute_and_simplify_rtx (x
, 1);
6945 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6946 operations" because they can be replaced with two more basic operations.
6947 ZERO_EXTEND is also considered "compound" because it can be replaced with
6948 an AND operation, which is simpler, though only one operation.
6950 The function expand_compound_operation is called with an rtx expression
6951 and will convert it to the appropriate shifts and AND operations,
6952 simplifying at each stage.
6954 The function make_compound_operation is called to convert an expression
6955 consisting of shifts and ANDs into the equivalent compound expression.
6956 It is the inverse of this function, loosely speaking. */
6959 expand_compound_operation (rtx x
)
6961 unsigned HOST_WIDE_INT pos
= 0, len
;
6963 unsigned int modewidth
;
6966 switch (GET_CODE (x
))
6972 /* We can't necessarily use a const_int for a multiword mode;
6973 it depends on implicitly extending the value.
6974 Since we don't know the right way to extend it,
6975 we can't tell whether the implicit way is right.
6977 Even for a mode that is no wider than a const_int,
6978 we can't win, because we need to sign extend one of its bits through
6979 the rest of it, and we don't know which bit. */
6980 if (CONST_INT_P (XEXP (x
, 0)))
6983 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6984 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6985 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6986 reloaded. If not for that, MEM's would very rarely be safe.
6988 Reject MODEs bigger than a word, because we might not be able
6989 to reference a two-register group starting with an arbitrary register
6990 (and currently gen_lowpart might crash for a SUBREG). */
6992 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) > UNITS_PER_WORD
)
6995 /* Reject MODEs that aren't scalar integers because turning vector
6996 or complex modes into shifts causes problems. */
6998 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7001 len
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
7002 /* If the inner object has VOIDmode (the only way this can happen
7003 is if it is an ASM_OPERANDS), we can't do anything since we don't
7004 know how much masking to do. */
7016 /* If the operand is a CLOBBER, just return it. */
7017 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7020 if (!CONST_INT_P (XEXP (x
, 1))
7021 || !CONST_INT_P (XEXP (x
, 2))
7022 || GET_MODE (XEXP (x
, 0)) == VOIDmode
)
7025 /* Reject MODEs that aren't scalar integers because turning vector
7026 or complex modes into shifts causes problems. */
7028 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7031 len
= INTVAL (XEXP (x
, 1));
7032 pos
= INTVAL (XEXP (x
, 2));
7034 /* This should stay within the object being extracted, fail otherwise. */
7035 if (len
+ pos
> GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))))
7038 if (BITS_BIG_ENDIAN
)
7039 pos
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))) - len
- pos
;
7046 /* Convert sign extension to zero extension, if we know that the high
7047 bit is not set, as this is easier to optimize. It will be converted
7048 back to cheaper alternative in make_extraction. */
7049 if (GET_CODE (x
) == SIGN_EXTEND
7050 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7051 && ((nonzero_bits (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
7052 & ~(((unsigned HOST_WIDE_INT
)
7053 GET_MODE_MASK (GET_MODE (XEXP (x
, 0))))
7057 machine_mode mode
= GET_MODE (x
);
7058 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7059 rtx temp2
= expand_compound_operation (temp
);
7061 /* Make sure this is a profitable operation. */
7062 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7063 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7065 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7066 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7072 /* We can optimize some special cases of ZERO_EXTEND. */
7073 if (GET_CODE (x
) == ZERO_EXTEND
)
7075 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7076 know that the last value didn't have any inappropriate bits
7078 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7079 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7080 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7081 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
7082 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7083 return XEXP (XEXP (x
, 0), 0);
7085 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7086 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7087 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7088 && subreg_lowpart_p (XEXP (x
, 0))
7089 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7090 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
7091 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7092 return SUBREG_REG (XEXP (x
, 0));
7094 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7095 is a comparison and STORE_FLAG_VALUE permits. This is like
7096 the first case, but it works even when GET_MODE (x) is larger
7097 than HOST_WIDE_INT. */
7098 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7099 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7100 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7101 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7102 <= HOST_BITS_PER_WIDE_INT
)
7103 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7104 return XEXP (XEXP (x
, 0), 0);
7106 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7107 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7108 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7109 && subreg_lowpart_p (XEXP (x
, 0))
7110 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7111 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7112 <= HOST_BITS_PER_WIDE_INT
)
7113 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7114 return SUBREG_REG (XEXP (x
, 0));
7118 /* If we reach here, we want to return a pair of shifts. The inner
7119 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7120 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7121 logical depending on the value of UNSIGNEDP.
7123 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7124 converted into an AND of a shift.
7126 We must check for the case where the left shift would have a negative
7127 count. This can happen in a case like (x >> 31) & 255 on machines
7128 that can't shift by a constant. On those machines, we would first
7129 combine the shift with the AND to produce a variable-position
7130 extraction. Then the constant of 31 would be substituted in
7131 to produce such a position. */
7133 modewidth
= GET_MODE_PRECISION (GET_MODE (x
));
7134 if (modewidth
>= pos
+ len
)
7136 machine_mode mode
= GET_MODE (x
);
7137 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7138 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7140 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7141 tem
, modewidth
- pos
- len
);
7142 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7143 mode
, tem
, modewidth
- len
);
7145 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7146 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
7147 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7150 (HOST_WIDE_INT_1U
<< len
) - 1);
7152 /* Any other cases we can't handle. */
7155 /* If we couldn't do this for some reason, return the original
7157 if (GET_CODE (tem
) == CLOBBER
)
7163 /* X is a SET which contains an assignment of one object into
7164 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7165 or certain SUBREGS). If possible, convert it into a series of
7168 We half-heartedly support variable positions, but do not at all
7169 support variable lengths. */
7172 expand_field_assignment (const_rtx x
)
7175 rtx pos
; /* Always counts from low bit. */
7177 rtx mask
, cleared
, masked
;
7178 machine_mode compute_mode
;
7180 /* Loop until we find something we can't simplify. */
7183 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7184 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7186 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7187 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7188 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7190 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7191 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7193 inner
= XEXP (SET_DEST (x
), 0);
7194 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7195 pos
= XEXP (SET_DEST (x
), 2);
7197 /* A constant position should stay within the width of INNER. */
7198 if (CONST_INT_P (pos
)
7199 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7202 if (BITS_BIG_ENDIAN
)
7204 if (CONST_INT_P (pos
))
7205 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7207 else if (GET_CODE (pos
) == MINUS
7208 && CONST_INT_P (XEXP (pos
, 1))
7209 && (INTVAL (XEXP (pos
, 1))
7210 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7211 /* If position is ADJUST - X, new position is X. */
7212 pos
= XEXP (pos
, 0);
7215 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7216 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7217 gen_int_mode (prec
- len
,
7224 /* A SUBREG between two modes that occupy the same numbers of words
7225 can be done by moving the SUBREG to the source. */
7226 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7227 /* We need SUBREGs to compute nonzero_bits properly. */
7228 && nonzero_sign_valid
7229 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7230 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7231 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7232 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7234 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7236 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7243 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7244 inner
= SUBREG_REG (inner
);
7246 compute_mode
= GET_MODE (inner
);
7248 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7249 if (! SCALAR_INT_MODE_P (compute_mode
))
7253 /* Don't do anything for vector or complex integral types. */
7254 if (! FLOAT_MODE_P (compute_mode
))
7257 /* Try to find an integral mode to pun with. */
7258 imode
= mode_for_size (GET_MODE_BITSIZE (compute_mode
), MODE_INT
, 0);
7259 if (imode
== BLKmode
)
7262 compute_mode
= imode
;
7263 inner
= gen_lowpart (imode
, inner
);
7266 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7267 if (len
>= HOST_BITS_PER_WIDE_INT
)
7270 /* Don't try to compute in too wide unsupported modes. */
7271 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7274 /* Now compute the equivalent expression. Make a copy of INNER
7275 for the SET_DEST in case it is a MEM into which we will substitute;
7276 we don't want shared RTL in that case. */
7277 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7279 cleared
= simplify_gen_binary (AND
, compute_mode
,
7280 simplify_gen_unary (NOT
, compute_mode
,
7281 simplify_gen_binary (ASHIFT
,
7286 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7287 simplify_gen_binary (
7289 gen_lowpart (compute_mode
, SET_SRC (x
)),
7293 x
= gen_rtx_SET (copy_rtx (inner
),
7294 simplify_gen_binary (IOR
, compute_mode
,
7301 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7302 it is an RTX that represents the (variable) starting position; otherwise,
7303 POS is the (constant) starting bit position. Both are counted from the LSB.
7305 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7307 IN_DEST is nonzero if this is a reference in the destination of a SET.
7308 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7309 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7312 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7313 ZERO_EXTRACT should be built even for bits starting at bit 0.
7315 MODE is the desired mode of the result (if IN_DEST == 0).
7317 The result is an RTX for the extraction or NULL_RTX if the target
7321 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7322 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7323 int in_dest
, int in_compare
)
7325 /* This mode describes the size of the storage area
7326 to fetch the overall value from. Within that, we
7327 ignore the POS lowest bits, etc. */
7328 machine_mode is_mode
= GET_MODE (inner
);
7329 machine_mode inner_mode
;
7330 machine_mode wanted_inner_mode
;
7331 machine_mode wanted_inner_reg_mode
= word_mode
;
7332 machine_mode pos_mode
= word_mode
;
7333 machine_mode extraction_mode
= word_mode
;
7334 machine_mode tmode
= mode_for_size (len
, MODE_INT
, 1);
7336 rtx orig_pos_rtx
= pos_rtx
;
7337 HOST_WIDE_INT orig_pos
;
7339 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7340 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7342 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7344 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7345 consider just the QI as the memory to extract from.
7346 The subreg adds or removes high bits; its mode is
7347 irrelevant to the meaning of this extraction,
7348 since POS and LEN count from the lsb. */
7349 if (MEM_P (SUBREG_REG (inner
)))
7350 is_mode
= GET_MODE (SUBREG_REG (inner
));
7351 inner
= SUBREG_REG (inner
);
7353 else if (GET_CODE (inner
) == ASHIFT
7354 && CONST_INT_P (XEXP (inner
, 1))
7355 && pos_rtx
== 0 && pos
== 0
7356 && len
> UINTVAL (XEXP (inner
, 1)))
7358 /* We're extracting the least significant bits of an rtx
7359 (ashift X (const_int C)), where LEN > C. Extract the
7360 least significant (LEN - C) bits of X, giving an rtx
7361 whose mode is MODE, then shift it left C times. */
7362 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7363 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7364 unsignedp
, in_dest
, in_compare
);
7366 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7368 else if (GET_CODE (inner
) == TRUNCATE
)
7369 inner
= XEXP (inner
, 0);
7371 inner_mode
= GET_MODE (inner
);
7373 /* See if this can be done without an extraction. We never can if the
7374 width of the field is not the same as that of some integer mode. For
7375 registers, we can only avoid the extraction if the position is at the
7376 low-order bit and this is either not in the destination or we have the
7377 appropriate STRICT_LOW_PART operation available.
7379 For MEM, we can avoid an extract if the field starts on an appropriate
7380 boundary and we can change the mode of the memory reference. */
7382 if (tmode
!= BLKmode
7383 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7385 && (pos
== 0 || REG_P (inner
))
7386 && (inner_mode
== tmode
7388 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7389 || reg_truncated_to_mode (tmode
, inner
))
7392 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7393 || (MEM_P (inner
) && pos_rtx
== 0
7395 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7396 : BITS_PER_UNIT
)) == 0
7397 /* We can't do this if we are widening INNER_MODE (it
7398 may not be aligned, for one thing). */
7399 && GET_MODE_PRECISION (inner_mode
) >= GET_MODE_PRECISION (tmode
)
7400 && (inner_mode
== tmode
7401 || (! mode_dependent_address_p (XEXP (inner
, 0),
7402 MEM_ADDR_SPACE (inner
))
7403 && ! MEM_VOLATILE_P (inner
))))))
7405 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7406 field. If the original and current mode are the same, we need not
7407 adjust the offset. Otherwise, we do if bytes big endian.
7409 If INNER is not a MEM, get a piece consisting of just the field
7410 of interest (in this case POS % BITS_PER_WORD must be 0). */
7414 HOST_WIDE_INT offset
;
7416 /* POS counts from lsb, but make OFFSET count in memory order. */
7417 if (BYTES_BIG_ENDIAN
)
7418 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7420 offset
= pos
/ BITS_PER_UNIT
;
7422 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7424 else if (REG_P (inner
))
7426 if (tmode
!= inner_mode
)
7428 /* We can't call gen_lowpart in a DEST since we
7429 always want a SUBREG (see below) and it would sometimes
7430 return a new hard register. */
7433 HOST_WIDE_INT final_word
= pos
/ BITS_PER_WORD
;
7435 if (WORDS_BIG_ENDIAN
7436 && GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7437 final_word
= ((GET_MODE_SIZE (inner_mode
)
7438 - GET_MODE_SIZE (tmode
))
7439 / UNITS_PER_WORD
) - final_word
;
7441 final_word
*= UNITS_PER_WORD
;
7442 if (BYTES_BIG_ENDIAN
&&
7443 GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (tmode
))
7444 final_word
+= (GET_MODE_SIZE (inner_mode
)
7445 - GET_MODE_SIZE (tmode
)) % UNITS_PER_WORD
;
7447 /* Avoid creating invalid subregs, for example when
7448 simplifying (x>>32)&255. */
7449 if (!validate_subreg (tmode
, inner_mode
, inner
, final_word
))
7452 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, final_word
);
7455 new_rtx
= gen_lowpart (tmode
, inner
);
7461 new_rtx
= force_to_mode (inner
, tmode
,
7462 len
>= HOST_BITS_PER_WIDE_INT
7464 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7466 /* If this extraction is going into the destination of a SET,
7467 make a STRICT_LOW_PART unless we made a MEM. */
7470 return (MEM_P (new_rtx
) ? new_rtx
7471 : (GET_CODE (new_rtx
) != SUBREG
7472 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7473 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7478 if (CONST_SCALAR_INT_P (new_rtx
))
7479 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7480 mode
, new_rtx
, tmode
);
7482 /* If we know that no extraneous bits are set, and that the high
7483 bit is not set, convert the extraction to the cheaper of
7484 sign and zero extension, that are equivalent in these cases. */
7485 if (flag_expensive_optimizations
7486 && (HWI_COMPUTABLE_MODE_P (tmode
)
7487 && ((nonzero_bits (new_rtx
, tmode
)
7488 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7491 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7492 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7494 /* Prefer ZERO_EXTENSION, since it gives more information to
7496 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7497 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7502 /* Otherwise, sign- or zero-extend unless we already are in the
7505 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7509 /* Unless this is a COMPARE or we have a funny memory reference,
7510 don't do anything with zero-extending field extracts starting at
7511 the low-order bit since they are simple AND operations. */
7512 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7513 && ! in_compare
&& unsignedp
)
7516 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7517 if the position is not a constant and the length is not 1. In all
7518 other cases, we would only be going outside our object in cases when
7519 an original shift would have been undefined. */
7521 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7522 || (pos_rtx
!= 0 && len
!= 1)))
7525 enum extraction_pattern pattern
= (in_dest
? EP_insv
7526 : unsignedp
? EP_extzv
: EP_extv
);
7528 /* If INNER is not from memory, we want it to have the mode of a register
7529 extraction pattern's structure operand, or word_mode if there is no
7530 such pattern. The same applies to extraction_mode and pos_mode
7531 and their respective operands.
7533 For memory, assume that the desired extraction_mode and pos_mode
7534 are the same as for a register operation, since at present we don't
7535 have named patterns for aligned memory structures. */
7536 struct extraction_insn insn
;
7537 if (get_best_reg_extraction_insn (&insn
, pattern
,
7538 GET_MODE_BITSIZE (inner_mode
), mode
))
7540 wanted_inner_reg_mode
= insn
.struct_mode
;
7541 pos_mode
= insn
.pos_mode
;
7542 extraction_mode
= insn
.field_mode
;
7545 /* Never narrow an object, since that might not be safe. */
7547 if (mode
!= VOIDmode
7548 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7549 extraction_mode
= mode
;
7552 wanted_inner_mode
= wanted_inner_reg_mode
;
7555 /* Be careful not to go beyond the extracted object and maintain the
7556 natural alignment of the memory. */
7557 wanted_inner_mode
= smallest_mode_for_size (len
, MODE_INT
);
7558 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7559 > GET_MODE_BITSIZE (wanted_inner_mode
))
7561 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
);
7562 gcc_assert (wanted_inner_mode
!= VOIDmode
);
7568 if (BITS_BIG_ENDIAN
)
7570 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7571 BITS_BIG_ENDIAN style. If position is constant, compute new
7572 position. Otherwise, build subtraction.
7573 Note that POS is relative to the mode of the original argument.
7574 If it's a MEM we need to recompute POS relative to that.
7575 However, if we're extracting from (or inserting into) a register,
7576 we want to recompute POS relative to wanted_inner_mode. */
7577 int width
= (MEM_P (inner
)
7578 ? GET_MODE_BITSIZE (is_mode
)
7579 : GET_MODE_BITSIZE (wanted_inner_mode
));
7582 pos
= width
- len
- pos
;
7585 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7586 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7588 /* POS may be less than 0 now, but we check for that below.
7589 Note that it can only be less than 0 if !MEM_P (inner). */
7592 /* If INNER has a wider mode, and this is a constant extraction, try to
7593 make it smaller and adjust the byte to point to the byte containing
7595 if (wanted_inner_mode
!= VOIDmode
7596 && inner_mode
!= wanted_inner_mode
7598 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7600 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7601 && ! MEM_VOLATILE_P (inner
))
7605 /* The computations below will be correct if the machine is big
7606 endian in both bits and bytes or little endian in bits and bytes.
7607 If it is mixed, we must adjust. */
7609 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7610 adjust OFFSET to compensate. */
7611 if (BYTES_BIG_ENDIAN
7612 && GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (is_mode
))
7613 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7615 /* We can now move to the desired byte. */
7616 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7617 * GET_MODE_SIZE (wanted_inner_mode
);
7618 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7620 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7621 && is_mode
!= wanted_inner_mode
)
7622 offset
= (GET_MODE_SIZE (is_mode
)
7623 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7625 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7628 /* If INNER is not memory, get it into the proper mode. If we are changing
7629 its mode, POS must be a constant and smaller than the size of the new
7631 else if (!MEM_P (inner
))
7633 /* On the LHS, don't create paradoxical subregs implicitely truncating
7634 the register unless TRULY_NOOP_TRUNCATION. */
7636 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7640 if (GET_MODE (inner
) != wanted_inner_mode
7642 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7648 inner
= force_to_mode (inner
, wanted_inner_mode
,
7650 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7652 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7657 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7658 have to zero extend. Otherwise, we can just use a SUBREG. */
7660 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7662 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7663 GET_MODE (pos_rtx
));
7665 /* If we know that no extraneous bits are set, and that the high
7666 bit is not set, convert extraction to cheaper one - either
7667 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7669 if (flag_expensive_optimizations
7670 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7671 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7672 & ~(((unsigned HOST_WIDE_INT
)
7673 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7677 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7678 GET_MODE (pos_rtx
));
7680 /* Prefer ZERO_EXTENSION, since it gives more information to
7682 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7683 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7689 /* Make POS_RTX unless we already have it and it is correct. If we don't
7690 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7692 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7693 pos_rtx
= orig_pos_rtx
;
7695 else if (pos_rtx
== 0)
7696 pos_rtx
= GEN_INT (pos
);
7698 /* Make the required operation. See if we can use existing rtx. */
7699 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7700 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7702 new_rtx
= gen_lowpart (mode
, new_rtx
);
7707 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7708 with any other operations in X. Return X without that shift if so. */
7711 extract_left_shift (rtx x
, int count
)
7713 enum rtx_code code
= GET_CODE (x
);
7714 machine_mode mode
= GET_MODE (x
);
7720 /* This is the shift itself. If it is wide enough, we will return
7721 either the value being shifted if the shift count is equal to
7722 COUNT or a shift for the difference. */
7723 if (CONST_INT_P (XEXP (x
, 1))
7724 && INTVAL (XEXP (x
, 1)) >= count
)
7725 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7726 INTVAL (XEXP (x
, 1)) - count
);
7730 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7731 return simplify_gen_unary (code
, mode
, tem
, mode
);
7735 case PLUS
: case IOR
: case XOR
: case AND
:
7736 /* If we can safely shift this constant and we find the inner shift,
7737 make a new operation. */
7738 if (CONST_INT_P (XEXP (x
, 1))
7739 && (UINTVAL (XEXP (x
, 1))
7740 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7741 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7743 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7744 return simplify_gen_binary (code
, mode
, tem
,
7745 gen_int_mode (val
, mode
));
7756 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7757 level of the expression and MODE is its mode. IN_CODE is as for
7758 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7759 that should be used when recursing on operands of *X_PTR.
7761 There are two possible actions:
7763 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7764 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7766 - Return a new rtx, which the caller returns directly. */
7769 make_compound_operation_int (machine_mode mode
, rtx
*x_ptr
,
7770 enum rtx_code in_code
,
7771 enum rtx_code
*next_code_ptr
)
7774 enum rtx_code next_code
= *next_code_ptr
;
7775 enum rtx_code code
= GET_CODE (x
);
7776 int mode_width
= GET_MODE_PRECISION (mode
);
7781 bool equality_comparison
= false;
7785 equality_comparison
= true;
7789 /* Process depending on the code of this operation. If NEW is set
7790 nonzero, it will be returned. */
7795 /* Convert shifts by constants into multiplications if inside
7797 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7798 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7799 && INTVAL (XEXP (x
, 1)) >= 0)
7801 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7802 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7804 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7805 if (GET_CODE (new_rtx
) == NEG
)
7807 new_rtx
= XEXP (new_rtx
, 0);
7810 multval
= trunc_int_for_mode (multval
, mode
);
7811 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7818 lhs
= make_compound_operation (lhs
, next_code
);
7819 rhs
= make_compound_operation (rhs
, next_code
);
7820 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7822 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7824 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7826 else if (GET_CODE (lhs
) == MULT
7827 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7829 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7830 simplify_gen_unary (NEG
, mode
,
7833 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7837 SUBST (XEXP (x
, 0), lhs
);
7838 SUBST (XEXP (x
, 1), rhs
);
7840 maybe_swap_commutative_operands (x
);
7846 lhs
= make_compound_operation (lhs
, next_code
);
7847 rhs
= make_compound_operation (rhs
, next_code
);
7848 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
7850 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7852 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7854 else if (GET_CODE (rhs
) == MULT
7855 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7857 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7858 simplify_gen_unary (NEG
, mode
,
7861 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7865 SUBST (XEXP (x
, 0), lhs
);
7866 SUBST (XEXP (x
, 1), rhs
);
7871 /* If the second operand is not a constant, we can't do anything
7873 if (!CONST_INT_P (XEXP (x
, 1)))
7876 /* If the constant is a power of two minus one and the first operand
7877 is a logical right shift, make an extraction. */
7878 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7879 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7881 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7882 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7883 0, in_code
== COMPARE
);
7886 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7887 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7888 && subreg_lowpart_p (XEXP (x
, 0))
7889 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7890 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7892 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
7893 machine_mode inner_mode
= GET_MODE (inner_x0
);
7894 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
7895 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
7897 i
, 1, 0, in_code
== COMPARE
);
7901 /* If we narrowed the mode when dropping the subreg, then
7902 we must zero-extend to keep the semantics of the AND. */
7903 if (GET_MODE_SIZE (inner_mode
) >= GET_MODE_SIZE (mode
))
7905 else if (SCALAR_INT_MODE_P (inner_mode
))
7906 new_rtx
= simplify_gen_unary (ZERO_EXTEND
, mode
,
7907 new_rtx
, inner_mode
);
7912 /* If that didn't give anything, see if the AND simplifies on
7914 if (!new_rtx
&& i
>= 0)
7916 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7917 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
7918 0, in_code
== COMPARE
);
7921 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7922 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7923 || GET_CODE (XEXP (x
, 0)) == IOR
)
7924 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7925 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7926 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7928 /* Apply the distributive law, and then try to make extractions. */
7929 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7930 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7932 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7934 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7937 /* If we are have (and (rotate X C) M) and C is larger than the number
7938 of bits in M, this is an extraction. */
7940 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
7941 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7942 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
7943 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
7945 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7946 new_rtx
= make_extraction (mode
, new_rtx
,
7947 (GET_MODE_PRECISION (mode
)
7948 - INTVAL (XEXP (XEXP (x
, 0), 1))),
7949 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7952 /* On machines without logical shifts, if the operand of the AND is
7953 a logical shift and our mask turns off all the propagated sign
7954 bits, we can replace the logical shift with an arithmetic shift. */
7955 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7956 && !have_insn_for (LSHIFTRT
, mode
)
7957 && have_insn_for (ASHIFTRT
, mode
)
7958 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7959 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
7960 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
7961 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
7963 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
7965 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
7966 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
7968 gen_rtx_ASHIFTRT (mode
,
7969 make_compound_operation
7970 (XEXP (XEXP (x
, 0), 0), next_code
),
7971 XEXP (XEXP (x
, 0), 1)));
7974 /* If the constant is one less than a power of two, this might be
7975 representable by an extraction even if no shift is present.
7976 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7977 we are in a COMPARE. */
7978 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7979 new_rtx
= make_extraction (mode
,
7980 make_compound_operation (XEXP (x
, 0),
7982 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7984 /* If we are in a comparison and this is an AND with a power of two,
7985 convert this into the appropriate bit extract. */
7986 else if (in_code
== COMPARE
7987 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
7988 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
7989 new_rtx
= make_extraction (mode
,
7990 make_compound_operation (XEXP (x
, 0),
7992 i
, NULL_RTX
, 1, 1, 0, 1);
7994 /* If the one operand is a paradoxical subreg of a register or memory and
7995 the constant (limited to the smaller mode) has only zero bits where
7996 the sub expression has known zero bits, this can be expressed as
7998 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8002 sub
= XEXP (XEXP (x
, 0), 0);
8003 machine_mode sub_mode
= GET_MODE (sub
);
8004 if ((REG_P (sub
) || MEM_P (sub
))
8005 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8007 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8008 unsigned HOST_WIDE_INT mask
;
8010 /* original AND constant with all the known zero bits set */
8011 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8012 if ((mask
& mode_mask
) == mode_mask
)
8014 new_rtx
= make_compound_operation (sub
, next_code
);
8015 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8016 GET_MODE_PRECISION (sub_mode
),
8017 1, 0, in_code
== COMPARE
);
8025 /* If the sign bit is known to be zero, replace this with an
8026 arithmetic shift. */
8027 if (have_insn_for (ASHIFTRT
, mode
)
8028 && ! have_insn_for (LSHIFTRT
, mode
)
8029 && mode_width
<= HOST_BITS_PER_WIDE_INT
8030 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8032 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8033 make_compound_operation (XEXP (x
, 0),
8045 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8046 this is a SIGN_EXTRACT. */
8047 if (CONST_INT_P (rhs
)
8048 && GET_CODE (lhs
) == ASHIFT
8049 && CONST_INT_P (XEXP (lhs
, 1))
8050 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8051 && INTVAL (XEXP (lhs
, 1)) >= 0
8052 && INTVAL (rhs
) < mode_width
)
8054 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8055 new_rtx
= make_extraction (mode
, new_rtx
,
8056 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8057 NULL_RTX
, mode_width
- INTVAL (rhs
),
8058 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8062 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8063 If so, try to merge the shifts into a SIGN_EXTEND. We could
8064 also do this for some cases of SIGN_EXTRACT, but it doesn't
8065 seem worth the effort; the case checked for occurs on Alpha. */
8068 && ! (GET_CODE (lhs
) == SUBREG
8069 && (OBJECT_P (SUBREG_REG (lhs
))))
8070 && CONST_INT_P (rhs
)
8071 && INTVAL (rhs
) >= 0
8072 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8073 && INTVAL (rhs
) < mode_width
8074 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
8075 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
8076 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8077 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8082 /* Call ourselves recursively on the inner expression. If we are
8083 narrowing the object and it has a different RTL code from
8084 what it originally did, do this SUBREG as a force_to_mode. */
8086 rtx inner
= SUBREG_REG (x
), simplified
;
8087 enum rtx_code subreg_code
= in_code
;
8089 /* If the SUBREG is masking of a logical right shift,
8090 make an extraction. */
8091 if (GET_CODE (inner
) == LSHIFTRT
8092 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8093 && subreg_lowpart_p (x
))
8095 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8096 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8097 mode_width
, 1, 0, in_code
== COMPARE
);
8101 /* If in_code is COMPARE, it isn't always safe to pass it through
8102 to the recursive make_compound_operation call. */
8103 if (subreg_code
== COMPARE
8104 && (!subreg_lowpart_p (x
)
8105 || GET_CODE (inner
) == SUBREG
8106 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8107 is (const_int 0), rather than
8108 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8109 || (GET_CODE (inner
) == AND
8110 && CONST_INT_P (XEXP (inner
, 1))
8111 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8112 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8113 >= GET_MODE_BITSIZE (mode
))))
8116 tem
= make_compound_operation (inner
, subreg_code
);
8119 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8123 if (GET_CODE (tem
) != GET_CODE (inner
)
8124 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8125 && subreg_lowpart_p (x
))
8128 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8130 /* If we have something other than a SUBREG, we might have
8131 done an expansion, so rerun ourselves. */
8132 if (GET_CODE (newer
) != SUBREG
)
8133 newer
= make_compound_operation (newer
, in_code
);
8135 /* force_to_mode can expand compounds. If it just re-expanded the
8136 compound, use gen_lowpart to convert to the desired mode. */
8137 if (rtx_equal_p (newer
, x
)
8138 /* Likewise if it re-expanded the compound only partially.
8139 This happens for SUBREG of ZERO_EXTRACT if they extract
8140 the same number of bits. */
8141 || (GET_CODE (newer
) == SUBREG
8142 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8143 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8144 && GET_CODE (inner
) == AND
8145 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8146 return gen_lowpart (GET_MODE (x
), tem
);
8161 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8162 *next_code_ptr
= next_code
;
8166 /* Look at the expression rooted at X. Look for expressions
8167 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8168 Form these expressions.
8170 Return the new rtx, usually just X.
8172 Also, for machines like the VAX that don't have logical shift insns,
8173 try to convert logical to arithmetic shift operations in cases where
8174 they are equivalent. This undoes the canonicalizations to logical
8175 shifts done elsewhere.
8177 We try, as much as possible, to re-use rtl expressions to save memory.
8179 IN_CODE says what kind of expression we are processing. Normally, it is
8180 SET. In a memory address it is MEM. When processing the arguments of
8181 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8182 precisely it is an equality comparison against zero. */
8185 make_compound_operation (rtx x
, enum rtx_code in_code
)
8187 enum rtx_code code
= GET_CODE (x
);
8190 enum rtx_code next_code
;
8193 /* Select the code to be used in recursive calls. Once we are inside an
8194 address, we stay there. If we have a comparison, set to COMPARE,
8195 but once inside, go back to our default of SET. */
8197 next_code
= (code
== MEM
? MEM
8198 : ((code
== COMPARE
|| COMPARISON_P (x
))
8199 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8200 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8202 if (SCALAR_INT_MODE_P (GET_MODE (x
)))
8204 rtx new_rtx
= make_compound_operation_int (GET_MODE (x
), &x
,
8205 in_code
, &next_code
);
8208 code
= GET_CODE (x
);
8211 /* Now recursively process each operand of this operation. We need to
8212 handle ZERO_EXTEND specially so that we don't lose track of the
8214 if (code
== ZERO_EXTEND
)
8216 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8217 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8218 new_rtx
, GET_MODE (XEXP (x
, 0)));
8221 SUBST (XEXP (x
, 0), new_rtx
);
8225 fmt
= GET_RTX_FORMAT (code
);
8226 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8229 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8230 SUBST (XEXP (x
, i
), new_rtx
);
8232 else if (fmt
[i
] == 'E')
8233 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8235 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8236 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8239 maybe_swap_commutative_operands (x
);
8243 /* Given M see if it is a value that would select a field of bits
8244 within an item, but not the entire word. Return -1 if not.
8245 Otherwise, return the starting position of the field, where 0 is the
8248 *PLEN is set to the length of the field. */
8251 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8253 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8254 int pos
= m
? ctz_hwi (m
) : -1;
8258 /* Now shift off the low-order zero bits and see if we have a
8259 power of two minus 1. */
8260 len
= exact_log2 ((m
>> pos
) + 1);
8269 /* If X refers to a register that equals REG in value, replace these
8270 references with REG. */
8272 canon_reg_for_combine (rtx x
, rtx reg
)
8279 enum rtx_code code
= GET_CODE (x
);
8280 switch (GET_RTX_CLASS (code
))
8283 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8284 if (op0
!= XEXP (x
, 0))
8285 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8290 case RTX_COMM_ARITH
:
8291 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8292 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8293 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8294 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8298 case RTX_COMM_COMPARE
:
8299 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8300 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8301 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8302 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8303 GET_MODE (op0
), op0
, op1
);
8307 case RTX_BITFIELD_OPS
:
8308 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8309 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8310 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8311 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8312 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8313 GET_MODE (op0
), op0
, op1
, op2
);
8319 if (rtx_equal_p (get_last_value (reg
), x
)
8320 || rtx_equal_p (reg
, get_last_value (x
)))
8329 fmt
= GET_RTX_FORMAT (code
);
8331 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8334 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8335 if (op
!= XEXP (x
, i
))
8345 else if (fmt
[i
] == 'E')
8348 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8350 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8351 if (op
!= XVECEXP (x
, i
, j
))
8358 XVECEXP (x
, i
, j
) = op
;
8369 /* Return X converted to MODE. If the value is already truncated to
8370 MODE we can just return a subreg even though in the general case we
8371 would need an explicit truncation. */
8374 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8376 if (!CONST_INT_P (x
)
8377 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8378 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8379 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8381 /* Bit-cast X into an integer mode. */
8382 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8383 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)), x
);
8384 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
),
8388 return gen_lowpart (mode
, x
);
8391 /* See if X can be simplified knowing that we will only refer to it in
8392 MODE and will only refer to those bits that are nonzero in MASK.
8393 If other bits are being computed or if masking operations are done
8394 that select a superset of the bits in MASK, they can sometimes be
8397 Return a possibly simplified expression, but always convert X to
8398 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8400 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8401 are all off in X. This is used when X will be complemented, by either
8402 NOT, NEG, or XOR. */
8405 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8408 enum rtx_code code
= GET_CODE (x
);
8409 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8410 machine_mode op_mode
;
8411 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8414 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8415 code below will do the wrong thing since the mode of such an
8416 expression is VOIDmode.
8418 Also do nothing if X is a CLOBBER; this can happen if X was
8419 the return value from a call to gen_lowpart. */
8420 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8423 /* We want to perform the operation in its present mode unless we know
8424 that the operation is valid in MODE, in which case we do the operation
8426 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8427 && have_insn_for (code
, mode
))
8428 ? mode
: GET_MODE (x
));
8430 /* It is not valid to do a right-shift in a narrower mode
8431 than the one it came in with. */
8432 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8433 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8434 op_mode
= GET_MODE (x
);
8436 /* Truncate MASK to fit OP_MODE. */
8438 mask
&= GET_MODE_MASK (op_mode
);
8440 /* When we have an arithmetic operation, or a shift whose count we
8441 do not know, we need to assume that all bits up to the highest-order
8442 bit in MASK will be needed. This is how we form such a mask. */
8443 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8444 fuller_mask
= HOST_WIDE_INT_M1U
;
8446 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8449 /* Determine what bits of X are guaranteed to be (non)zero. */
8450 nonzero
= nonzero_bits (x
, mode
);
8452 /* If none of the bits in X are needed, return a zero. */
8453 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8456 /* If X is a CONST_INT, return a new one. Do this here since the
8457 test below will fail. */
8458 if (CONST_INT_P (x
))
8460 if (SCALAR_INT_MODE_P (mode
))
8461 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8464 x
= GEN_INT (INTVAL (x
) & mask
);
8465 return gen_lowpart_common (mode
, x
);
8469 /* If X is narrower than MODE and we want all the bits in X's mode, just
8470 get X in the proper mode. */
8471 if (GET_MODE_SIZE (GET_MODE (x
)) < GET_MODE_SIZE (mode
)
8472 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8473 return gen_lowpart (mode
, x
);
8475 /* We can ignore the effect of a SUBREG if it narrows the mode or
8476 if the constant masks to zero all the bits the mode doesn't have. */
8477 if (GET_CODE (x
) == SUBREG
8478 && subreg_lowpart_p (x
)
8479 && ((GET_MODE_SIZE (GET_MODE (x
))
8480 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8482 & GET_MODE_MASK (GET_MODE (x
))
8483 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8484 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8486 /* The arithmetic simplifications here only work for scalar integer modes. */
8487 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8488 return gen_lowpart_or_truncate (mode
, x
);
8493 /* If X is a (clobber (const_int)), return it since we know we are
8494 generating something that won't match. */
8501 x
= expand_compound_operation (x
);
8502 if (GET_CODE (x
) != code
)
8503 return force_to_mode (x
, mode
, mask
, next_select
);
8507 /* Similarly for a truncate. */
8508 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8511 /* If this is an AND with a constant, convert it into an AND
8512 whose constant is the AND of that constant with MASK. If it
8513 remains an AND of MASK, delete it since it is redundant. */
8515 if (CONST_INT_P (XEXP (x
, 1)))
8517 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8518 mask
& INTVAL (XEXP (x
, 1)));
8520 /* If X is still an AND, see if it is an AND with a mask that
8521 is just some low-order bits. If so, and it is MASK, we don't
8524 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8525 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8529 /* If it remains an AND, try making another AND with the bits
8530 in the mode mask that aren't in MASK turned on. If the
8531 constant in the AND is wide enough, this might make a
8532 cheaper constant. */
8534 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8535 && GET_MODE_MASK (GET_MODE (x
)) != mask
8536 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
8538 unsigned HOST_WIDE_INT cval
8539 = UINTVAL (XEXP (x
, 1))
8540 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8543 y
= simplify_gen_binary (AND
, GET_MODE (x
), XEXP (x
, 0),
8544 gen_int_mode (cval
, GET_MODE (x
)));
8545 if (set_src_cost (y
, GET_MODE (x
), optimize_this_for_speed_p
)
8546 < set_src_cost (x
, GET_MODE (x
), optimize_this_for_speed_p
))
8556 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8557 low-order bits (as in an alignment operation) and FOO is already
8558 aligned to that boundary, mask C1 to that boundary as well.
8559 This may eliminate that PLUS and, later, the AND. */
8562 unsigned int width
= GET_MODE_PRECISION (mode
);
8563 unsigned HOST_WIDE_INT smask
= mask
;
8565 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8566 number, sign extend it. */
8568 if (width
< HOST_BITS_PER_WIDE_INT
8569 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8570 smask
|= HOST_WIDE_INT_M1U
<< width
;
8572 if (CONST_INT_P (XEXP (x
, 1))
8573 && pow2p_hwi (- smask
)
8574 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8575 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8576 return force_to_mode (plus_constant (GET_MODE (x
), XEXP (x
, 0),
8577 (INTVAL (XEXP (x
, 1)) & smask
)),
8578 mode
, smask
, next_select
);
8584 /* Substituting into the operands of a widening MULT is not likely to
8585 create RTL matching a machine insn. */
8587 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8588 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8589 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8590 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8591 && REG_P (XEXP (XEXP (x
, 0), 0))
8592 && REG_P (XEXP (XEXP (x
, 1), 0)))
8593 return gen_lowpart_or_truncate (mode
, x
);
8595 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8596 most significant bit in MASK since carries from those bits will
8597 affect the bits we are interested in. */
8602 /* If X is (minus C Y) where C's least set bit is larger than any bit
8603 in the mask, then we may replace with (neg Y). */
8604 if (CONST_INT_P (XEXP (x
, 0))
8605 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8607 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8609 return force_to_mode (x
, mode
, mask
, next_select
);
8612 /* Similarly, if C contains every bit in the fuller_mask, then we may
8613 replace with (not Y). */
8614 if (CONST_INT_P (XEXP (x
, 0))
8615 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8617 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8618 XEXP (x
, 1), GET_MODE (x
));
8619 return force_to_mode (x
, mode
, mask
, next_select
);
8627 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8628 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8629 operation which may be a bitfield extraction. Ensure that the
8630 constant we form is not wider than the mode of X. */
8632 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8633 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8634 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8635 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8636 && CONST_INT_P (XEXP (x
, 1))
8637 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8638 + floor_log2 (INTVAL (XEXP (x
, 1))))
8639 < GET_MODE_PRECISION (GET_MODE (x
)))
8640 && (UINTVAL (XEXP (x
, 1))
8641 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8643 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8644 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8646 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8647 XEXP (XEXP (x
, 0), 0), temp
);
8648 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8649 XEXP (XEXP (x
, 0), 1));
8650 return force_to_mode (x
, mode
, mask
, next_select
);
8654 /* For most binary operations, just propagate into the operation and
8655 change the mode if we have an operation of that mode. */
8657 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8658 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8660 /* If we ended up truncating both operands, truncate the result of the
8661 operation instead. */
8662 if (GET_CODE (op0
) == TRUNCATE
8663 && GET_CODE (op1
) == TRUNCATE
)
8665 op0
= XEXP (op0
, 0);
8666 op1
= XEXP (op1
, 0);
8669 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8670 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8672 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8673 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8677 /* For left shifts, do the same, but just for the first operand.
8678 However, we cannot do anything with shifts where we cannot
8679 guarantee that the counts are smaller than the size of the mode
8680 because such a count will have a different meaning in a
8683 if (! (CONST_INT_P (XEXP (x
, 1))
8684 && INTVAL (XEXP (x
, 1)) >= 0
8685 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8686 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8687 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8688 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8691 /* If the shift count is a constant and we can do arithmetic in
8692 the mode of the shift, refine which bits we need. Otherwise, use the
8693 conservative form of the mask. */
8694 if (CONST_INT_P (XEXP (x
, 1))
8695 && INTVAL (XEXP (x
, 1)) >= 0
8696 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8697 && HWI_COMPUTABLE_MODE_P (op_mode
))
8698 mask
>>= INTVAL (XEXP (x
, 1));
8702 op0
= gen_lowpart_or_truncate (op_mode
,
8703 force_to_mode (XEXP (x
, 0), op_mode
,
8704 mask
, next_select
));
8706 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8707 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8711 /* Here we can only do something if the shift count is a constant,
8712 this shift constant is valid for the host, and we can do arithmetic
8715 if (CONST_INT_P (XEXP (x
, 1))
8716 && INTVAL (XEXP (x
, 1)) >= 0
8717 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8718 && HWI_COMPUTABLE_MODE_P (op_mode
))
8720 rtx inner
= XEXP (x
, 0);
8721 unsigned HOST_WIDE_INT inner_mask
;
8723 /* Select the mask of the bits we need for the shift operand. */
8724 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8726 /* We can only change the mode of the shift if we can do arithmetic
8727 in the mode of the shift and INNER_MASK is no wider than the
8728 width of X's mode. */
8729 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8730 op_mode
= GET_MODE (x
);
8732 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8734 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8735 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8738 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8739 shift and AND produces only copies of the sign bit (C2 is one less
8740 than a power of two), we can do this with just a shift. */
8742 if (GET_CODE (x
) == LSHIFTRT
8743 && CONST_INT_P (XEXP (x
, 1))
8744 /* The shift puts one of the sign bit copies in the least significant
8746 && ((INTVAL (XEXP (x
, 1))
8747 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8748 >= GET_MODE_PRECISION (GET_MODE (x
)))
8749 && pow2p_hwi (mask
+ 1)
8750 /* Number of bits left after the shift must be more than the mask
8752 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8753 <= GET_MODE_PRECISION (GET_MODE (x
)))
8754 /* Must be more sign bit copies than the mask needs. */
8755 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8756 >= exact_log2 (mask
+ 1)))
8757 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8758 GEN_INT (GET_MODE_PRECISION (GET_MODE (x
))
8759 - exact_log2 (mask
+ 1)));
8764 /* If we are just looking for the sign bit, we don't need this shift at
8765 all, even if it has a variable count. */
8766 if (val_signbit_p (GET_MODE (x
), mask
))
8767 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8769 /* If this is a shift by a constant, get a mask that contains those bits
8770 that are not copies of the sign bit. We then have two cases: If
8771 MASK only includes those bits, this can be a logical shift, which may
8772 allow simplifications. If MASK is a single-bit field not within
8773 those bits, we are requesting a copy of the sign bit and hence can
8774 shift the sign bit to the appropriate location. */
8776 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8777 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8781 /* If the considered data is wider than HOST_WIDE_INT, we can't
8782 represent a mask for all its bits in a single scalar.
8783 But we only care about the lower bits, so calculate these. */
8785 if (GET_MODE_PRECISION (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8787 nonzero
= HOST_WIDE_INT_M1U
;
8789 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8790 is the number of bits a full-width mask would have set.
8791 We need only shift if these are fewer than nonzero can
8792 hold. If not, we must keep all bits set in nonzero. */
8794 if (GET_MODE_PRECISION (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8795 < HOST_BITS_PER_WIDE_INT
)
8796 nonzero
>>= INTVAL (XEXP (x
, 1))
8797 + HOST_BITS_PER_WIDE_INT
8798 - GET_MODE_PRECISION (GET_MODE (x
)) ;
8802 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8803 nonzero
>>= INTVAL (XEXP (x
, 1));
8806 if ((mask
& ~nonzero
) == 0)
8808 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8809 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8810 if (GET_CODE (x
) != ASHIFTRT
)
8811 return force_to_mode (x
, mode
, mask
, next_select
);
8814 else if ((i
= exact_log2 (mask
)) >= 0)
8816 x
= simplify_shift_const
8817 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8818 GET_MODE_PRECISION (GET_MODE (x
)) - 1 - i
);
8820 if (GET_CODE (x
) != ASHIFTRT
)
8821 return force_to_mode (x
, mode
, mask
, next_select
);
8825 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8826 even if the shift count isn't a constant. */
8828 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8829 XEXP (x
, 0), XEXP (x
, 1));
8833 /* If this is a zero- or sign-extension operation that just affects bits
8834 we don't care about, remove it. Be sure the call above returned
8835 something that is still a shift. */
8837 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8838 && CONST_INT_P (XEXP (x
, 1))
8839 && INTVAL (XEXP (x
, 1)) >= 0
8840 && (INTVAL (XEXP (x
, 1))
8841 <= GET_MODE_PRECISION (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8842 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8843 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8844 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8851 /* If the shift count is constant and we can do computations
8852 in the mode of X, compute where the bits we care about are.
8853 Otherwise, we can't do anything. Don't change the mode of
8854 the shift or propagate MODE into the shift, though. */
8855 if (CONST_INT_P (XEXP (x
, 1))
8856 && INTVAL (XEXP (x
, 1)) >= 0)
8858 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8860 gen_int_mode (mask
, GET_MODE (x
)),
8862 if (temp
&& CONST_INT_P (temp
))
8863 x
= simplify_gen_binary (code
, GET_MODE (x
),
8864 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8865 INTVAL (temp
), next_select
),
8871 /* If we just want the low-order bit, the NEG isn't needed since it
8872 won't change the low-order bit. */
8874 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8876 /* We need any bits less significant than the most significant bit in
8877 MASK since carries from those bits will affect the bits we are
8883 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8884 same as the XOR case above. Ensure that the constant we form is not
8885 wider than the mode of X. */
8887 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8888 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8889 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8890 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8891 < GET_MODE_PRECISION (GET_MODE (x
)))
8892 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8894 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8896 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8897 XEXP (XEXP (x
, 0), 0), temp
);
8898 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8899 temp
, XEXP (XEXP (x
, 0), 1));
8901 return force_to_mode (x
, mode
, mask
, next_select
);
8904 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8905 use the full mask inside the NOT. */
8909 op0
= gen_lowpart_or_truncate (op_mode
,
8910 force_to_mode (XEXP (x
, 0), mode
, mask
,
8912 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8913 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8917 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8918 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8919 which is equal to STORE_FLAG_VALUE. */
8920 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8921 && XEXP (x
, 1) == const0_rtx
8922 && GET_MODE (XEXP (x
, 0)) == mode
8923 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
8924 && (nonzero_bits (XEXP (x
, 0), mode
)
8925 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8926 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8931 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8932 written in a narrower mode. We play it safe and do not do so. */
8934 op0
= gen_lowpart_or_truncate (GET_MODE (x
),
8935 force_to_mode (XEXP (x
, 1), mode
,
8936 mask
, next_select
));
8937 op1
= gen_lowpart_or_truncate (GET_MODE (x
),
8938 force_to_mode (XEXP (x
, 2), mode
,
8939 mask
, next_select
));
8940 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
8941 x
= simplify_gen_ternary (IF_THEN_ELSE
, GET_MODE (x
),
8942 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
8950 /* Ensure we return a value of the proper mode. */
8951 return gen_lowpart_or_truncate (mode
, x
);
8954 /* Return nonzero if X is an expression that has one of two values depending on
8955 whether some other value is zero or nonzero. In that case, we return the
8956 value that is being tested, *PTRUE is set to the value if the rtx being
8957 returned has a nonzero value, and *PFALSE is set to the other alternative.
8959 If we return zero, we set *PTRUE and *PFALSE to X. */
8962 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
8964 machine_mode mode
= GET_MODE (x
);
8965 enum rtx_code code
= GET_CODE (x
);
8966 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
8967 unsigned HOST_WIDE_INT nz
;
8969 /* If we are comparing a value against zero, we are done. */
8970 if ((code
== NE
|| code
== EQ
)
8971 && XEXP (x
, 1) == const0_rtx
)
8973 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
8974 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
8978 /* If this is a unary operation whose operand has one of two values, apply
8979 our opcode to compute those values. */
8980 else if (UNARY_P (x
)
8981 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
8983 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
8984 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
8985 GET_MODE (XEXP (x
, 0)));
8989 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8990 make can't possibly match and would suppress other optimizations. */
8991 else if (code
== COMPARE
)
8994 /* If this is a binary operation, see if either side has only one of two
8995 values. If either one does or if both do and they are conditional on
8996 the same value, compute the new true and false values. */
8997 else if (BINARY_P (x
))
8999 cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
);
9000 cond1
= if_then_else_cond (XEXP (x
, 1), &true1
, &false1
);
9002 if ((cond0
!= 0 || cond1
!= 0)
9003 && ! (cond0
!= 0 && cond1
!= 0 && ! rtx_equal_p (cond0
, cond1
)))
9005 /* If if_then_else_cond returned zero, then true/false are the
9006 same rtl. We must copy one of them to prevent invalid rtl
9009 true0
= copy_rtx (true0
);
9010 else if (cond1
== 0)
9011 true1
= copy_rtx (true1
);
9013 if (COMPARISON_P (x
))
9015 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9017 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9022 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9023 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9026 return cond0
? cond0
: cond1
;
9029 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9030 operands is zero when the other is nonzero, and vice-versa,
9031 and STORE_FLAG_VALUE is 1 or -1. */
9033 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9034 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9036 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9038 rtx op0
= XEXP (XEXP (x
, 0), 1);
9039 rtx op1
= XEXP (XEXP (x
, 1), 1);
9041 cond0
= XEXP (XEXP (x
, 0), 0);
9042 cond1
= XEXP (XEXP (x
, 1), 0);
9044 if (COMPARISON_P (cond0
)
9045 && COMPARISON_P (cond1
)
9046 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9047 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9048 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9049 || ((swap_condition (GET_CODE (cond0
))
9050 == reversed_comparison_code (cond1
, NULL
))
9051 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9052 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9053 && ! side_effects_p (x
))
9055 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9056 *pfalse
= simplify_gen_binary (MULT
, mode
,
9058 ? simplify_gen_unary (NEG
, mode
,
9066 /* Similarly for MULT, AND and UMIN, except that for these the result
9068 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9069 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9070 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9072 cond0
= XEXP (XEXP (x
, 0), 0);
9073 cond1
= XEXP (XEXP (x
, 1), 0);
9075 if (COMPARISON_P (cond0
)
9076 && COMPARISON_P (cond1
)
9077 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9078 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9079 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9080 || ((swap_condition (GET_CODE (cond0
))
9081 == reversed_comparison_code (cond1
, NULL
))
9082 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9083 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9084 && ! side_effects_p (x
))
9086 *ptrue
= *pfalse
= const0_rtx
;
9092 else if (code
== IF_THEN_ELSE
)
9094 /* If we have IF_THEN_ELSE already, extract the condition and
9095 canonicalize it if it is NE or EQ. */
9096 cond0
= XEXP (x
, 0);
9097 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9098 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9099 return XEXP (cond0
, 0);
9100 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9102 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9103 return XEXP (cond0
, 0);
9109 /* If X is a SUBREG, we can narrow both the true and false values
9110 if the inner expression, if there is a condition. */
9111 else if (code
== SUBREG
9112 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9115 true0
= simplify_gen_subreg (mode
, true0
,
9116 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9117 false0
= simplify_gen_subreg (mode
, false0
,
9118 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9119 if (true0
&& false0
)
9127 /* If X is a constant, this isn't special and will cause confusions
9128 if we treat it as such. Likewise if it is equivalent to a constant. */
9129 else if (CONSTANT_P (x
)
9130 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9133 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9134 will be least confusing to the rest of the compiler. */
9135 else if (mode
== BImode
)
9137 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9141 /* If X is known to be either 0 or -1, those are the true and
9142 false values when testing X. */
9143 else if (x
== constm1_rtx
|| x
== const0_rtx
9144 || (mode
!= VOIDmode
9145 && num_sign_bit_copies (x
, mode
) == GET_MODE_PRECISION (mode
)))
9147 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9151 /* Likewise for 0 or a single bit. */
9152 else if (HWI_COMPUTABLE_MODE_P (mode
)
9153 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9155 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9159 /* Otherwise fail; show no condition with true and false values the same. */
9160 *ptrue
= *pfalse
= x
;
9164 /* Return the value of expression X given the fact that condition COND
9165 is known to be true when applied to REG as its first operand and VAL
9166 as its second. X is known to not be shared and so can be modified in
9169 We only handle the simplest cases, and specifically those cases that
9170 arise with IF_THEN_ELSE expressions. */
9173 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9175 enum rtx_code code
= GET_CODE (x
);
9179 if (side_effects_p (x
))
9182 /* If either operand of the condition is a floating point value,
9183 then we have to avoid collapsing an EQ comparison. */
9185 && rtx_equal_p (x
, reg
)
9186 && ! FLOAT_MODE_P (GET_MODE (x
))
9187 && ! FLOAT_MODE_P (GET_MODE (val
)))
9190 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9193 /* If X is (abs REG) and we know something about REG's relationship
9194 with zero, we may be able to simplify this. */
9196 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9199 case GE
: case GT
: case EQ
:
9202 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9204 GET_MODE (XEXP (x
, 0)));
9209 /* The only other cases we handle are MIN, MAX, and comparisons if the
9210 operands are the same as REG and VAL. */
9212 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9214 if (rtx_equal_p (XEXP (x
, 0), val
))
9216 std::swap (val
, reg
);
9217 cond
= swap_condition (cond
);
9220 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9222 if (COMPARISON_P (x
))
9224 if (comparison_dominates_p (cond
, code
))
9225 return const_true_rtx
;
9227 code
= reversed_comparison_code (x
, NULL
);
9229 && comparison_dominates_p (cond
, code
))
9234 else if (code
== SMAX
|| code
== SMIN
9235 || code
== UMIN
|| code
== UMAX
)
9237 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9239 /* Do not reverse the condition when it is NE or EQ.
9240 This is because we cannot conclude anything about
9241 the value of 'SMAX (x, y)' when x is not equal to y,
9242 but we can when x equals y. */
9243 if ((code
== SMAX
|| code
== UMAX
)
9244 && ! (cond
== EQ
|| cond
== NE
))
9245 cond
= reverse_condition (cond
);
9250 return unsignedp
? x
: XEXP (x
, 1);
9252 return unsignedp
? x
: XEXP (x
, 0);
9254 return unsignedp
? XEXP (x
, 1) : x
;
9256 return unsignedp
? XEXP (x
, 0) : x
;
9263 else if (code
== SUBREG
)
9265 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9266 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9268 if (SUBREG_REG (x
) != r
)
9270 /* We must simplify subreg here, before we lose track of the
9271 original inner_mode. */
9272 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9273 inner_mode
, SUBREG_BYTE (x
));
9277 SUBST (SUBREG_REG (x
), r
);
9282 /* We don't have to handle SIGN_EXTEND here, because even in the
9283 case of replacing something with a modeless CONST_INT, a
9284 CONST_INT is already (supposed to be) a valid sign extension for
9285 its narrower mode, which implies it's already properly
9286 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9287 story is different. */
9288 else if (code
== ZERO_EXTEND
)
9290 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9291 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9293 if (XEXP (x
, 0) != r
)
9295 /* We must simplify the zero_extend here, before we lose
9296 track of the original inner_mode. */
9297 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9302 SUBST (XEXP (x
, 0), r
);
9308 fmt
= GET_RTX_FORMAT (code
);
9309 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9312 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9313 else if (fmt
[i
] == 'E')
9314 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9315 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9322 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9323 assignment as a field assignment. */
9326 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9328 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9330 if (GET_MODE_SIZE (GET_MODE (x
)) > GET_MODE_SIZE (GET_MODE (y
)))
9332 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9334 /* For big endian, adjust the memory offset. */
9335 if (BYTES_BIG_ENDIAN
)
9336 x
= adjust_address_nv (x
, GET_MODE (y
),
9337 -subreg_lowpart_offset (GET_MODE (x
),
9340 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9343 if (x
== y
|| rtx_equal_p (x
, y
))
9346 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9349 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9350 Note that all SUBREGs of MEM are paradoxical; otherwise they
9351 would have been rewritten. */
9352 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9353 && MEM_P (SUBREG_REG (y
))
9354 && rtx_equal_p (SUBREG_REG (y
),
9355 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9358 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9359 && MEM_P (SUBREG_REG (x
))
9360 && rtx_equal_p (SUBREG_REG (x
),
9361 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9364 /* We used to see if get_last_value of X and Y were the same but that's
9365 not correct. In one direction, we'll cause the assignment to have
9366 the wrong destination and in the case, we'll import a register into this
9367 insn that might have already have been dead. So fail if none of the
9368 above cases are true. */
9372 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9373 Return that assignment if so.
9375 We only handle the most common cases. */
9378 make_field_assignment (rtx x
)
9380 rtx dest
= SET_DEST (x
);
9381 rtx src
= SET_SRC (x
);
9386 unsigned HOST_WIDE_INT len
;
9390 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9391 a clear of a one-bit field. We will have changed it to
9392 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9395 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9396 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9397 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9398 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9400 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9403 return gen_rtx_SET (assign
, const0_rtx
);
9407 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9408 && subreg_lowpart_p (XEXP (src
, 0))
9409 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9410 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9411 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9412 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9413 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9414 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9416 assign
= make_extraction (VOIDmode
, dest
, 0,
9417 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9420 return gen_rtx_SET (assign
, const0_rtx
);
9424 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9426 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9427 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9428 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9430 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9433 return gen_rtx_SET (assign
, const1_rtx
);
9437 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9438 SRC is an AND with all bits of that field set, then we can discard
9440 if (GET_CODE (dest
) == ZERO_EXTRACT
9441 && CONST_INT_P (XEXP (dest
, 1))
9442 && GET_CODE (src
) == AND
9443 && CONST_INT_P (XEXP (src
, 1)))
9445 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9446 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9447 unsigned HOST_WIDE_INT ze_mask
;
9449 if (width
>= HOST_BITS_PER_WIDE_INT
)
9452 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9454 /* Complete overlap. We can remove the source AND. */
9455 if ((and_mask
& ze_mask
) == ze_mask
)
9456 return gen_rtx_SET (dest
, XEXP (src
, 0));
9458 /* Partial overlap. We can reduce the source AND. */
9459 if ((and_mask
& ze_mask
) != and_mask
)
9461 mode
= GET_MODE (src
);
9462 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9463 gen_int_mode (and_mask
& ze_mask
, mode
));
9464 return gen_rtx_SET (dest
, src
);
9468 /* The other case we handle is assignments into a constant-position
9469 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9470 a mask that has all one bits except for a group of zero bits and
9471 OTHER is known to have zeros where C1 has ones, this is such an
9472 assignment. Compute the position and length from C1. Shift OTHER
9473 to the appropriate position, force it to the required mode, and
9474 make the extraction. Check for the AND in both operands. */
9476 /* One or more SUBREGs might obscure the constant-position field
9477 assignment. The first one we are likely to encounter is an outer
9478 narrowing SUBREG, which we can just strip for the purposes of
9479 identifying the constant-field assignment. */
9480 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
))
9481 src
= SUBREG_REG (src
);
9483 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9486 rhs
= expand_compound_operation (XEXP (src
, 0));
9487 lhs
= expand_compound_operation (XEXP (src
, 1));
9489 if (GET_CODE (rhs
) == AND
9490 && CONST_INT_P (XEXP (rhs
, 1))
9491 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9492 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9493 /* The second SUBREG that might get in the way is a paradoxical
9494 SUBREG around the first operand of the AND. We want to
9495 pretend the operand is as wide as the destination here. We
9496 do this by adjusting the MEM to wider mode for the sole
9497 purpose of the call to rtx_equal_for_field_assignment_p. Also
9498 note this trick only works for MEMs. */
9499 else if (GET_CODE (rhs
) == AND
9500 && paradoxical_subreg_p (XEXP (rhs
, 0))
9501 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9502 && CONST_INT_P (XEXP (rhs
, 1))
9503 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9505 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9506 else if (GET_CODE (lhs
) == AND
9507 && CONST_INT_P (XEXP (lhs
, 1))
9508 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9509 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9510 /* The second SUBREG that might get in the way is a paradoxical
9511 SUBREG around the first operand of the AND. We want to
9512 pretend the operand is as wide as the destination here. We
9513 do this by adjusting the MEM to wider mode for the sole
9514 purpose of the call to rtx_equal_for_field_assignment_p. Also
9515 note this trick only works for MEMs. */
9516 else if (GET_CODE (lhs
) == AND
9517 && paradoxical_subreg_p (XEXP (lhs
, 0))
9518 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9519 && CONST_INT_P (XEXP (lhs
, 1))
9520 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9522 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9526 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (GET_MODE (dest
)), &len
);
9527 if (pos
< 0 || pos
+ len
> GET_MODE_PRECISION (GET_MODE (dest
))
9528 || GET_MODE_PRECISION (GET_MODE (dest
)) > HOST_BITS_PER_WIDE_INT
9529 || (c1
& nonzero_bits (other
, GET_MODE (dest
))) != 0)
9532 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9536 /* The mode to use for the source is the mode of the assignment, or of
9537 what is inside a possible STRICT_LOW_PART. */
9538 mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9539 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9541 /* Shift OTHER right POS places and make it the source, restricting it
9542 to the proper length and mode. */
9544 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9548 src
= force_to_mode (src
, mode
,
9549 GET_MODE_PRECISION (mode
) >= HOST_BITS_PER_WIDE_INT
9551 : (HOST_WIDE_INT_1U
<< len
) - 1,
9554 /* If SRC is masked by an AND that does not make a difference in
9555 the value being stored, strip it. */
9556 if (GET_CODE (assign
) == ZERO_EXTRACT
9557 && CONST_INT_P (XEXP (assign
, 1))
9558 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9559 && GET_CODE (src
) == AND
9560 && CONST_INT_P (XEXP (src
, 1))
9561 && UINTVAL (XEXP (src
, 1))
9562 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9563 src
= XEXP (src
, 0);
9565 return gen_rtx_SET (assign
, src
);
9568 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9572 apply_distributive_law (rtx x
)
9574 enum rtx_code code
= GET_CODE (x
);
9575 enum rtx_code inner_code
;
9576 rtx lhs
, rhs
, other
;
9579 /* Distributivity is not true for floating point as it can change the
9580 value. So we don't do it unless -funsafe-math-optimizations. */
9581 if (FLOAT_MODE_P (GET_MODE (x
))
9582 && ! flag_unsafe_math_optimizations
)
9585 /* The outer operation can only be one of the following: */
9586 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9587 && code
!= PLUS
&& code
!= MINUS
)
9593 /* If either operand is a primitive we can't do anything, so get out
9595 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9598 lhs
= expand_compound_operation (lhs
);
9599 rhs
= expand_compound_operation (rhs
);
9600 inner_code
= GET_CODE (lhs
);
9601 if (inner_code
!= GET_CODE (rhs
))
9604 /* See if the inner and outer operations distribute. */
9611 /* These all distribute except over PLUS. */
9612 if (code
== PLUS
|| code
== MINUS
)
9617 if (code
!= PLUS
&& code
!= MINUS
)
9622 /* This is also a multiply, so it distributes over everything. */
9625 /* This used to handle SUBREG, but this turned out to be counter-
9626 productive, since (subreg (op ...)) usually is not handled by
9627 insn patterns, and this "optimization" therefore transformed
9628 recognizable patterns into unrecognizable ones. Therefore the
9629 SUBREG case was removed from here.
9631 It is possible that distributing SUBREG over arithmetic operations
9632 leads to an intermediate result than can then be optimized further,
9633 e.g. by moving the outer SUBREG to the other side of a SET as done
9634 in simplify_set. This seems to have been the original intent of
9635 handling SUBREGs here.
9637 However, with current GCC this does not appear to actually happen,
9638 at least on major platforms. If some case is found where removing
9639 the SUBREG case here prevents follow-on optimizations, distributing
9640 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9646 /* Set LHS and RHS to the inner operands (A and B in the example
9647 above) and set OTHER to the common operand (C in the example).
9648 There is only one way to do this unless the inner operation is
9650 if (COMMUTATIVE_ARITH_P (lhs
)
9651 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9652 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9653 else if (COMMUTATIVE_ARITH_P (lhs
)
9654 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9655 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9656 else if (COMMUTATIVE_ARITH_P (lhs
)
9657 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9658 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9659 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9660 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9664 /* Form the new inner operation, seeing if it simplifies first. */
9665 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9667 /* There is one exception to the general way of distributing:
9668 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9669 if (code
== XOR
&& inner_code
== IOR
)
9672 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9675 /* We may be able to continuing distributing the result, so call
9676 ourselves recursively on the inner operation before forming the
9677 outer operation, which we return. */
9678 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9679 apply_distributive_law (tem
), other
);
9682 /* See if X is of the form (* (+ A B) C), and if so convert to
9683 (+ (* A C) (* B C)) and try to simplify.
9685 Most of the time, this results in no change. However, if some of
9686 the operands are the same or inverses of each other, simplifications
9689 For example, (and (ior A B) (not B)) can occur as the result of
9690 expanding a bit field assignment. When we apply the distributive
9691 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9692 which then simplifies to (and (A (not B))).
9694 Note that no checks happen on the validity of applying the inverse
9695 distributive law. This is pointless since we can do it in the
9696 few places where this routine is called.
9698 N is the index of the term that is decomposed (the arithmetic operation,
9699 i.e. (+ A B) in the first example above). !N is the index of the term that
9700 is distributed, i.e. of C in the first example above. */
9702 distribute_and_simplify_rtx (rtx x
, int n
)
9705 enum rtx_code outer_code
, inner_code
;
9706 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9708 /* Distributivity is not true for floating point as it can change the
9709 value. So we don't do it unless -funsafe-math-optimizations. */
9710 if (FLOAT_MODE_P (GET_MODE (x
))
9711 && ! flag_unsafe_math_optimizations
)
9714 decomposed
= XEXP (x
, n
);
9715 if (!ARITHMETIC_P (decomposed
))
9718 mode
= GET_MODE (x
);
9719 outer_code
= GET_CODE (x
);
9720 distributed
= XEXP (x
, !n
);
9722 inner_code
= GET_CODE (decomposed
);
9723 inner_op0
= XEXP (decomposed
, 0);
9724 inner_op1
= XEXP (decomposed
, 1);
9726 /* Special case (and (xor B C) (not A)), which is equivalent to
9727 (xor (ior A B) (ior A C)) */
9728 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9730 distributed
= XEXP (distributed
, 0);
9736 /* Distribute the second term. */
9737 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9738 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9742 /* Distribute the first term. */
9743 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9744 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9747 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9749 if (GET_CODE (tmp
) != outer_code
9750 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9751 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9757 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9758 in MODE. Return an equivalent form, if different from (and VAROP
9759 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9762 simplify_and_const_int_1 (machine_mode mode
, rtx varop
,
9763 unsigned HOST_WIDE_INT constop
)
9765 unsigned HOST_WIDE_INT nonzero
;
9766 unsigned HOST_WIDE_INT orig_constop
;
9771 orig_constop
= constop
;
9772 if (GET_CODE (varop
) == CLOBBER
)
9775 /* Simplify VAROP knowing that we will be only looking at some of the
9778 Note by passing in CONSTOP, we guarantee that the bits not set in
9779 CONSTOP are not significant and will never be examined. We must
9780 ensure that is the case by explicitly masking out those bits
9781 before returning. */
9782 varop
= force_to_mode (varop
, mode
, constop
, 0);
9784 /* If VAROP is a CLOBBER, we will fail so return it. */
9785 if (GET_CODE (varop
) == CLOBBER
)
9788 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9789 to VAROP and return the new constant. */
9790 if (CONST_INT_P (varop
))
9791 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9793 /* See what bits may be nonzero in VAROP. Unlike the general case of
9794 a call to nonzero_bits, here we don't care about bits outside
9797 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9799 /* Turn off all bits in the constant that are known to already be zero.
9800 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9801 which is tested below. */
9805 /* If we don't have any bits left, return zero. */
9809 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9810 a power of two, we can replace this with an ASHIFT. */
9811 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9812 && (i
= exact_log2 (constop
)) >= 0)
9813 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9815 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9816 or XOR, then try to apply the distributive law. This may eliminate
9817 operations if either branch can be simplified because of the AND.
9818 It may also make some cases more complex, but those cases probably
9819 won't match a pattern either with or without this. */
9821 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9825 apply_distributive_law
9826 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9827 simplify_and_const_int (NULL_RTX
,
9831 simplify_and_const_int (NULL_RTX
,
9836 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9837 the AND and see if one of the operands simplifies to zero. If so, we
9838 may eliminate it. */
9840 if (GET_CODE (varop
) == PLUS
9841 && pow2p_hwi (constop
+ 1))
9845 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9846 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9847 if (o0
== const0_rtx
)
9849 if (o1
== const0_rtx
)
9853 /* Make a SUBREG if necessary. If we can't make it, fail. */
9854 varop
= gen_lowpart (mode
, varop
);
9855 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9858 /* If we are only masking insignificant bits, return VAROP. */
9859 if (constop
== nonzero
)
9862 if (varop
== orig_varop
&& constop
== orig_constop
)
9865 /* Otherwise, return an AND. */
9866 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9870 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9873 Return an equivalent form, if different from X. Otherwise, return X. If
9874 X is zero, we are to always construct the equivalent form. */
9877 simplify_and_const_int (rtx x
, machine_mode mode
, rtx varop
,
9878 unsigned HOST_WIDE_INT constop
)
9880 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9885 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9886 gen_int_mode (constop
, mode
));
9887 if (GET_MODE (x
) != mode
)
9888 x
= gen_lowpart (mode
, x
);
9892 /* Given a REG, X, compute which bits in X can be nonzero.
9893 We don't care about bits outside of those defined in MODE.
9895 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9896 a shift, AND, or zero_extract, we can do better. */
9899 reg_nonzero_bits_for_combine (const_rtx x
, machine_mode mode
,
9900 const_rtx known_x ATTRIBUTE_UNUSED
,
9901 machine_mode known_mode ATTRIBUTE_UNUSED
,
9902 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
9903 unsigned HOST_WIDE_INT
*nonzero
)
9908 /* If X is a register whose nonzero bits value is current, use it.
9909 Otherwise, if X is a register whose value we can find, use that
9910 value. Otherwise, use the previously-computed global nonzero bits
9911 for this register. */
9913 rsp
= ®_stat
[REGNO (x
)];
9914 if (rsp
->last_set_value
!= 0
9915 && (rsp
->last_set_mode
== mode
9916 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
9917 && GET_MODE_CLASS (mode
) == MODE_INT
))
9918 && ((rsp
->last_set_label
>= label_tick_ebb_start
9919 && rsp
->last_set_label
< label_tick
)
9920 || (rsp
->last_set_label
== label_tick
9921 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9922 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9923 && REGNO (x
) < reg_n_sets_max
9924 && REG_N_SETS (REGNO (x
)) == 1
9926 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9929 /* Note that, even if the precision of last_set_mode is lower than that
9930 of mode, record_value_for_reg invoked nonzero_bits on the register
9931 with nonzero_bits_mode (because last_set_mode is necessarily integral
9932 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
9933 are all valid, hence in mode too since nonzero_bits_mode is defined
9934 to the largest HWI_COMPUTABLE_MODE_P mode. */
9935 *nonzero
&= rsp
->last_set_nonzero_bits
;
9939 tem
= get_last_value (x
);
9942 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
9943 tem
= sign_extend_short_imm (tem
, GET_MODE (x
),
9944 GET_MODE_PRECISION (mode
));
9949 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
9951 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
9953 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
))
9954 /* We don't know anything about the upper bits. */
9955 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
9963 /* Return the number of bits at the high-order end of X that are known to
9964 be equal to the sign bit. X will be used in mode MODE; if MODE is
9965 VOIDmode, X will be used in its own mode. The returned value will always
9966 be between 1 and the number of bits in MODE. */
9969 reg_num_sign_bit_copies_for_combine (const_rtx x
, machine_mode mode
,
9970 const_rtx known_x ATTRIBUTE_UNUSED
,
9971 machine_mode known_mode
9973 unsigned int known_ret ATTRIBUTE_UNUSED
,
9974 unsigned int *result
)
9979 rsp
= ®_stat
[REGNO (x
)];
9980 if (rsp
->last_set_value
!= 0
9981 && rsp
->last_set_mode
== mode
9982 && ((rsp
->last_set_label
>= label_tick_ebb_start
9983 && rsp
->last_set_label
< label_tick
)
9984 || (rsp
->last_set_label
== label_tick
9985 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9986 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9987 && REGNO (x
) < reg_n_sets_max
9988 && REG_N_SETS (REGNO (x
)) == 1
9990 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9993 *result
= rsp
->last_set_sign_bit_copies
;
9997 tem
= get_last_value (x
);
10001 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10002 && GET_MODE_PRECISION (GET_MODE (x
)) == GET_MODE_PRECISION (mode
))
10003 *result
= rsp
->sign_bit_copies
;
10008 /* Return the number of "extended" bits there are in X, when interpreted
10009 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10010 unsigned quantities, this is the number of high-order zero bits.
10011 For signed quantities, this is the number of copies of the sign bit
10012 minus 1. In both case, this function returns the number of "spare"
10013 bits. For example, if two quantities for which this function returns
10014 at least 1 are added, the addition is known not to overflow.
10016 This function will always return 0 unless called during combine, which
10017 implies that it must be called from a define_split. */
10020 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10022 if (nonzero_sign_valid
== 0)
10026 ? (HWI_COMPUTABLE_MODE_P (mode
)
10027 ? (unsigned int) (GET_MODE_PRECISION (mode
) - 1
10028 - floor_log2 (nonzero_bits (x
, mode
)))
10030 : num_sign_bit_copies (x
, mode
) - 1);
10033 /* This function is called from `simplify_shift_const' to merge two
10034 outer operations. Specifically, we have already found that we need
10035 to perform operation *POP0 with constant *PCONST0 at the outermost
10036 position. We would now like to also perform OP1 with constant CONST1
10037 (with *POP0 being done last).
10039 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10040 the resulting operation. *PCOMP_P is set to 1 if we would need to
10041 complement the innermost operand, otherwise it is unchanged.
10043 MODE is the mode in which the operation will be done. No bits outside
10044 the width of this mode matter. It is assumed that the width of this mode
10045 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10047 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10048 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10049 result is simply *PCONST0.
10051 If the resulting operation cannot be expressed as one operation, we
10052 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10055 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10057 enum rtx_code op0
= *pop0
;
10058 HOST_WIDE_INT const0
= *pconst0
;
10060 const0
&= GET_MODE_MASK (mode
);
10061 const1
&= GET_MODE_MASK (mode
);
10063 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10067 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10070 if (op1
== UNKNOWN
|| op0
== SET
)
10073 else if (op0
== UNKNOWN
)
10074 op0
= op1
, const0
= const1
;
10076 else if (op0
== op1
)
10100 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10101 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10104 /* If the two constants aren't the same, we can't do anything. The
10105 remaining six cases can all be done. */
10106 else if (const0
!= const1
)
10114 /* (a & b) | b == b */
10116 else /* op1 == XOR */
10117 /* (a ^ b) | b == a | b */
10123 /* (a & b) ^ b == (~a) & b */
10124 op0
= AND
, *pcomp_p
= 1;
10125 else /* op1 == IOR */
10126 /* (a | b) ^ b == a & ~b */
10127 op0
= AND
, const0
= ~const0
;
10132 /* (a | b) & b == b */
10134 else /* op1 == XOR */
10135 /* (a ^ b) & b) == (~a) & b */
10142 /* Check for NO-OP cases. */
10143 const0
&= GET_MODE_MASK (mode
);
10145 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10147 else if (const0
== 0 && op0
== AND
)
10149 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10155 /* ??? Slightly redundant with the above mask, but not entirely.
10156 Moving this above means we'd have to sign-extend the mode mask
10157 for the final test. */
10158 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10159 *pconst0
= trunc_int_for_mode (const0
, mode
);
10164 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10165 the shift in. The original shift operation CODE is performed on OP in
10166 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10167 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10168 result of the shift is subject to operation OUTER_CODE with operand
10171 static machine_mode
10172 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10173 machine_mode orig_mode
, machine_mode mode
,
10174 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10176 if (orig_mode
== mode
)
10178 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10180 /* In general we can't perform in wider mode for right shift and rotate. */
10184 /* We can still widen if the bits brought in from the left are identical
10185 to the sign bit of ORIG_MODE. */
10186 if (num_sign_bit_copies (op
, mode
)
10187 > (unsigned) (GET_MODE_PRECISION (mode
)
10188 - GET_MODE_PRECISION (orig_mode
)))
10193 /* Similarly here but with zero bits. */
10194 if (HWI_COMPUTABLE_MODE_P (mode
)
10195 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10198 /* We can also widen if the bits brought in will be masked off. This
10199 operation is performed in ORIG_MODE. */
10200 if (outer_code
== AND
)
10202 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10205 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10214 gcc_unreachable ();
10221 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10222 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10223 if we cannot simplify it. Otherwise, return a simplified value.
10225 The shift is normally computed in the widest mode we find in VAROP, as
10226 long as it isn't a different number of words than RESULT_MODE. Exceptions
10227 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10230 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10231 rtx varop
, int orig_count
)
10233 enum rtx_code orig_code
= code
;
10234 rtx orig_varop
= varop
;
10236 machine_mode mode
= result_mode
;
10237 machine_mode shift_mode
, tmode
;
10238 unsigned int mode_words
10239 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10240 /* We form (outer_op (code varop count) (outer_const)). */
10241 enum rtx_code outer_op
= UNKNOWN
;
10242 HOST_WIDE_INT outer_const
= 0;
10243 int complement_p
= 0;
10246 /* Make sure and truncate the "natural" shift on the way in. We don't
10247 want to do this inside the loop as it makes it more difficult to
10249 if (SHIFT_COUNT_TRUNCATED
)
10250 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10252 /* If we were given an invalid count, don't do anything except exactly
10253 what was requested. */
10255 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10258 count
= orig_count
;
10260 /* Unless one of the branches of the `if' in this loop does a `continue',
10261 we will `break' the loop after the `if'. */
10265 /* If we have an operand of (clobber (const_int 0)), fail. */
10266 if (GET_CODE (varop
) == CLOBBER
)
10269 /* Convert ROTATERT to ROTATE. */
10270 if (code
== ROTATERT
)
10272 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10274 count
= bitsize
- count
;
10277 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
10278 mode
, outer_op
, outer_const
);
10279 machine_mode shift_unit_mode
= GET_MODE_INNER (shift_mode
);
10281 /* Handle cases where the count is greater than the size of the mode
10282 minus 1. For ASHIFT, use the size minus one as the count (this can
10283 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10284 take the count modulo the size. For other shifts, the result is
10287 Since these shifts are being produced by the compiler by combining
10288 multiple operations, each of which are defined, we know what the
10289 result is supposed to be. */
10291 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10293 if (code
== ASHIFTRT
)
10294 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10295 else if (code
== ROTATE
|| code
== ROTATERT
)
10296 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10299 /* We can't simply return zero because there may be an
10301 varop
= const0_rtx
;
10307 /* If we discovered we had to complement VAROP, leave. Making a NOT
10308 here would cause an infinite loop. */
10312 if (shift_mode
== shift_unit_mode
)
10314 /* An arithmetic right shift of a quantity known to be -1 or 0
10316 if (code
== ASHIFTRT
10317 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10318 == GET_MODE_PRECISION (shift_unit_mode
)))
10324 /* If we are doing an arithmetic right shift and discarding all but
10325 the sign bit copies, this is equivalent to doing a shift by the
10326 bitsize minus one. Convert it into that shift because it will
10327 often allow other simplifications. */
10329 if (code
== ASHIFTRT
10330 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10331 >= GET_MODE_PRECISION (shift_unit_mode
)))
10332 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10334 /* We simplify the tests below and elsewhere by converting
10335 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10336 `make_compound_operation' will convert it to an ASHIFTRT for
10337 those machines (such as VAX) that don't have an LSHIFTRT. */
10338 if (code
== ASHIFTRT
10339 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10340 && val_signbit_known_clear_p (shift_unit_mode
,
10341 nonzero_bits (varop
,
10345 if (((code
== LSHIFTRT
10346 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10347 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10349 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10350 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10351 & GET_MODE_MASK (shift_unit_mode
))))
10352 && !side_effects_p (varop
))
10353 varop
= const0_rtx
;
10356 switch (GET_CODE (varop
))
10362 new_rtx
= expand_compound_operation (varop
);
10363 if (new_rtx
!= varop
)
10371 /* The following rules apply only to scalars. */
10372 if (shift_mode
!= shift_unit_mode
)
10375 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10376 minus the width of a smaller mode, we can do this with a
10377 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10378 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10379 && ! mode_dependent_address_p (XEXP (varop
, 0),
10380 MEM_ADDR_SPACE (varop
))
10381 && ! MEM_VOLATILE_P (varop
)
10382 && (tmode
= mode_for_size (GET_MODE_BITSIZE (mode
) - count
,
10383 MODE_INT
, 1)) != BLKmode
)
10385 new_rtx
= adjust_address_nv (varop
, tmode
,
10386 BYTES_BIG_ENDIAN
? 0
10387 : count
/ BITS_PER_UNIT
);
10389 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10390 : ZERO_EXTEND
, mode
, new_rtx
);
10397 /* The following rules apply only to scalars. */
10398 if (shift_mode
!= shift_unit_mode
)
10401 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10402 the same number of words as what we've seen so far. Then store
10403 the widest mode in MODE. */
10404 if (subreg_lowpart_p (varop
)
10405 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10406 > GET_MODE_SIZE (GET_MODE (varop
)))
10407 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10408 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10410 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
10411 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop
))) == MODE_INT
)
10413 varop
= SUBREG_REG (varop
);
10414 if (GET_MODE_SIZE (GET_MODE (varop
)) > GET_MODE_SIZE (mode
))
10415 mode
= GET_MODE (varop
);
10421 /* Some machines use MULT instead of ASHIFT because MULT
10422 is cheaper. But it is still better on those machines to
10423 merge two shifts into one. */
10424 if (CONST_INT_P (XEXP (varop
, 1))
10425 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10428 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10430 GEN_INT (exact_log2 (
10431 UINTVAL (XEXP (varop
, 1)))));
10437 /* Similar, for when divides are cheaper. */
10438 if (CONST_INT_P (XEXP (varop
, 1))
10439 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10442 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10444 GEN_INT (exact_log2 (
10445 UINTVAL (XEXP (varop
, 1)))));
10451 /* If we are extracting just the sign bit of an arithmetic
10452 right shift, that shift is not needed. However, the sign
10453 bit of a wider mode may be different from what would be
10454 interpreted as the sign bit in a narrower mode, so, if
10455 the result is narrower, don't discard the shift. */
10456 if (code
== LSHIFTRT
10457 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10458 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10459 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10461 varop
= XEXP (varop
, 0);
10470 /* The following rules apply only to scalars. */
10471 if (shift_mode
!= shift_unit_mode
)
10474 /* Here we have two nested shifts. The result is usually the
10475 AND of a new shift with a mask. We compute the result below. */
10476 if (CONST_INT_P (XEXP (varop
, 1))
10477 && INTVAL (XEXP (varop
, 1)) >= 0
10478 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (GET_MODE (varop
))
10479 && HWI_COMPUTABLE_MODE_P (result_mode
)
10480 && HWI_COMPUTABLE_MODE_P (mode
))
10482 enum rtx_code first_code
= GET_CODE (varop
);
10483 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10484 unsigned HOST_WIDE_INT mask
;
10487 /* We have one common special case. We can't do any merging if
10488 the inner code is an ASHIFTRT of a smaller mode. However, if
10489 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10490 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10491 we can convert it to
10492 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10493 This simplifies certain SIGN_EXTEND operations. */
10494 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10495 && count
== (GET_MODE_PRECISION (result_mode
)
10496 - GET_MODE_PRECISION (GET_MODE (varop
))))
10498 /* C3 has the low-order C1 bits zero. */
10500 mask
= GET_MODE_MASK (mode
)
10501 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10503 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10504 XEXP (varop
, 0), mask
);
10505 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10507 count
= first_count
;
10512 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10513 than C1 high-order bits equal to the sign bit, we can convert
10514 this to either an ASHIFT or an ASHIFTRT depending on the
10517 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10519 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10520 && GET_MODE (varop
) == shift_mode
10521 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10524 varop
= XEXP (varop
, 0);
10525 count
-= first_count
;
10535 /* There are some cases we can't do. If CODE is ASHIFTRT,
10536 we can only do this if FIRST_CODE is also ASHIFTRT.
10538 We can't do the case when CODE is ROTATE and FIRST_CODE is
10541 If the mode of this shift is not the mode of the outer shift,
10542 we can't do this if either shift is a right shift or ROTATE.
10544 Finally, we can't do any of these if the mode is too wide
10545 unless the codes are the same.
10547 Handle the case where the shift codes are the same
10550 if (code
== first_code
)
10552 if (GET_MODE (varop
) != result_mode
10553 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10554 || code
== ROTATE
))
10557 count
+= first_count
;
10558 varop
= XEXP (varop
, 0);
10562 if (code
== ASHIFTRT
10563 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10564 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
10565 || (GET_MODE (varop
) != result_mode
10566 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10567 || first_code
== ROTATE
10568 || code
== ROTATE
)))
10571 /* To compute the mask to apply after the shift, shift the
10572 nonzero bits of the inner shift the same way the
10573 outer shift will. */
10575 mask_rtx
= gen_int_mode (nonzero_bits (varop
, GET_MODE (varop
)),
10579 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10582 /* Give up if we can't compute an outer operation to use. */
10584 || !CONST_INT_P (mask_rtx
)
10585 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10587 result_mode
, &complement_p
))
10590 /* If the shifts are in the same direction, we add the
10591 counts. Otherwise, we subtract them. */
10592 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10593 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10594 count
+= first_count
;
10596 count
-= first_count
;
10598 /* If COUNT is positive, the new shift is usually CODE,
10599 except for the two exceptions below, in which case it is
10600 FIRST_CODE. If the count is negative, FIRST_CODE should
10603 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10604 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10606 else if (count
< 0)
10607 code
= first_code
, count
= -count
;
10609 varop
= XEXP (varop
, 0);
10613 /* If we have (A << B << C) for any shift, we can convert this to
10614 (A << C << B). This wins if A is a constant. Only try this if
10615 B is not a constant. */
10617 else if (GET_CODE (varop
) == code
10618 && CONST_INT_P (XEXP (varop
, 0))
10619 && !CONST_INT_P (XEXP (varop
, 1)))
10621 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10622 sure the result will be masked. See PR70222. */
10623 if (code
== LSHIFTRT
10624 && mode
!= result_mode
10625 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10626 GET_MODE_MASK (result_mode
)
10627 >> orig_count
, result_mode
,
10630 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10631 up outer sign extension (often left and right shift) is
10632 hardly more efficient than the original. See PR70429. */
10633 if (code
== ASHIFTRT
&& mode
!= result_mode
)
10636 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10639 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10646 /* The following rules apply only to scalars. */
10647 if (shift_mode
!= shift_unit_mode
)
10650 /* Make this fit the case below. */
10651 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10657 /* The following rules apply only to scalars. */
10658 if (shift_mode
!= shift_unit_mode
)
10661 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10662 with C the size of VAROP - 1 and the shift is logical if
10663 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10664 we have an (le X 0) operation. If we have an arithmetic shift
10665 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10666 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10668 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10669 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10670 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10671 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10672 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10673 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10676 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10679 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10680 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10685 /* If we have (shift (logical)), move the logical to the outside
10686 to allow it to possibly combine with another logical and the
10687 shift to combine with another shift. This also canonicalizes to
10688 what a ZERO_EXTRACT looks like. Also, some machines have
10689 (and (shift)) insns. */
10691 if (CONST_INT_P (XEXP (varop
, 1))
10692 /* We can't do this if we have (ashiftrt (xor)) and the
10693 constant has its sign bit set in shift_mode with shift_mode
10694 wider than result_mode. */
10695 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10696 && result_mode
!= shift_mode
10697 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10699 && (new_rtx
= simplify_const_binary_operation
10700 (code
, result_mode
,
10701 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10702 GEN_INT (count
))) != 0
10703 && CONST_INT_P (new_rtx
)
10704 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10705 INTVAL (new_rtx
), result_mode
, &complement_p
))
10707 varop
= XEXP (varop
, 0);
10711 /* If we can't do that, try to simplify the shift in each arm of the
10712 logical expression, make a new logical expression, and apply
10713 the inverse distributive law. This also can't be done for
10714 (ashiftrt (xor)) where we've widened the shift and the constant
10715 changes the sign bit. */
10716 if (CONST_INT_P (XEXP (varop
, 1))
10717 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10718 && result_mode
!= shift_mode
10719 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10722 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10723 XEXP (varop
, 0), count
);
10724 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10725 XEXP (varop
, 1), count
);
10727 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10729 varop
= apply_distributive_law (varop
);
10737 /* The following rules apply only to scalars. */
10738 if (shift_mode
!= shift_unit_mode
)
10741 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10742 says that the sign bit can be tested, FOO has mode MODE, C is
10743 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10744 that may be nonzero. */
10745 if (code
== LSHIFTRT
10746 && XEXP (varop
, 1) == const0_rtx
10747 && GET_MODE (XEXP (varop
, 0)) == result_mode
10748 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10749 && HWI_COMPUTABLE_MODE_P (result_mode
)
10750 && STORE_FLAG_VALUE
== -1
10751 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10752 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10755 varop
= XEXP (varop
, 0);
10762 /* The following rules apply only to scalars. */
10763 if (shift_mode
!= shift_unit_mode
)
10766 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10767 than the number of bits in the mode is equivalent to A. */
10768 if (code
== LSHIFTRT
10769 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10770 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10772 varop
= XEXP (varop
, 0);
10777 /* NEG commutes with ASHIFT since it is multiplication. Move the
10778 NEG outside to allow shifts to combine. */
10780 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10783 varop
= XEXP (varop
, 0);
10789 /* The following rules apply only to scalars. */
10790 if (shift_mode
!= shift_unit_mode
)
10793 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10794 is one less than the number of bits in the mode is
10795 equivalent to (xor A 1). */
10796 if (code
== LSHIFTRT
10797 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10798 && XEXP (varop
, 1) == constm1_rtx
10799 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10800 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10804 varop
= XEXP (varop
, 0);
10808 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10809 that might be nonzero in BAR are those being shifted out and those
10810 bits are known zero in FOO, we can replace the PLUS with FOO.
10811 Similarly in the other operand order. This code occurs when
10812 we are computing the size of a variable-size array. */
10814 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10815 && count
< HOST_BITS_PER_WIDE_INT
10816 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10817 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10818 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10820 varop
= XEXP (varop
, 0);
10823 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10824 && count
< HOST_BITS_PER_WIDE_INT
10825 && HWI_COMPUTABLE_MODE_P (result_mode
)
10826 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10828 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10829 & nonzero_bits (XEXP (varop
, 1),
10832 varop
= XEXP (varop
, 1);
10836 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10838 && CONST_INT_P (XEXP (varop
, 1))
10839 && (new_rtx
= simplify_const_binary_operation
10840 (ASHIFT
, result_mode
,
10841 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10842 GEN_INT (count
))) != 0
10843 && CONST_INT_P (new_rtx
)
10844 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10845 INTVAL (new_rtx
), result_mode
, &complement_p
))
10847 varop
= XEXP (varop
, 0);
10851 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10852 signbit', and attempt to change the PLUS to an XOR and move it to
10853 the outer operation as is done above in the AND/IOR/XOR case
10854 leg for shift(logical). See details in logical handling above
10855 for reasoning in doing so. */
10856 if (code
== LSHIFTRT
10857 && CONST_INT_P (XEXP (varop
, 1))
10858 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10859 && (new_rtx
= simplify_const_binary_operation
10860 (code
, result_mode
,
10861 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10862 GEN_INT (count
))) != 0
10863 && CONST_INT_P (new_rtx
)
10864 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10865 INTVAL (new_rtx
), result_mode
, &complement_p
))
10867 varop
= XEXP (varop
, 0);
10874 /* The following rules apply only to scalars. */
10875 if (shift_mode
!= shift_unit_mode
)
10878 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10879 with C the size of VAROP - 1 and the shift is logical if
10880 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10881 we have a (gt X 0) operation. If the shift is arithmetic with
10882 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10883 we have a (neg (gt X 0)) operation. */
10885 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10886 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10887 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10888 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10889 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10890 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10891 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10894 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
10897 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10898 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10905 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10906 if the truncate does not affect the value. */
10907 if (code
== LSHIFTRT
10908 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
10909 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10910 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
10911 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
10912 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
10914 rtx varop_inner
= XEXP (varop
, 0);
10917 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
10918 XEXP (varop_inner
, 0),
10920 (count
+ INTVAL (XEXP (varop_inner
, 1))));
10921 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
10934 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
10935 outer_op
, outer_const
);
10937 /* We have now finished analyzing the shift. The result should be
10938 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10939 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10940 to the result of the shift. OUTER_CONST is the relevant constant,
10941 but we must turn off all bits turned off in the shift. */
10943 if (outer_op
== UNKNOWN
10944 && orig_code
== code
&& orig_count
== count
10945 && varop
== orig_varop
10946 && shift_mode
== GET_MODE (varop
))
10949 /* Make a SUBREG if necessary. If we can't make it, fail. */
10950 varop
= gen_lowpart (shift_mode
, varop
);
10951 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10954 /* If we have an outer operation and we just made a shift, it is
10955 possible that we could have simplified the shift were it not
10956 for the outer operation. So try to do the simplification
10959 if (outer_op
!= UNKNOWN
)
10960 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
10965 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
10967 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10968 turn off all the bits that the shift would have turned off. */
10969 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
10970 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
10971 GET_MODE_MASK (result_mode
) >> orig_count
);
10973 /* Do the remainder of the processing in RESULT_MODE. */
10974 x
= gen_lowpart_or_truncate (result_mode
, x
);
10976 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10979 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
10981 if (outer_op
!= UNKNOWN
)
10983 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
10984 && GET_MODE_PRECISION (result_mode
) < HOST_BITS_PER_WIDE_INT
)
10985 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
10987 if (outer_op
== AND
)
10988 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
10989 else if (outer_op
== SET
)
10991 /* This means that we have determined that the result is
10992 equivalent to a constant. This should be rare. */
10993 if (!side_effects_p (x
))
10994 x
= GEN_INT (outer_const
);
10996 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
10997 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
10999 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
11000 GEN_INT (outer_const
));
11006 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11007 The result of the shift is RESULT_MODE. If we cannot simplify it,
11008 return X or, if it is NULL, synthesize the expression with
11009 simplify_gen_binary. Otherwise, return a simplified value.
11011 The shift is normally computed in the widest mode we find in VAROP, as
11012 long as it isn't a different number of words than RESULT_MODE. Exceptions
11013 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11016 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11017 rtx varop
, int count
)
11019 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11024 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
11025 if (GET_MODE (x
) != result_mode
)
11026 x
= gen_lowpart (result_mode
, x
);
11031 /* A subroutine of recog_for_combine. See there for arguments and
11035 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11037 rtx pat
= *pnewpat
;
11038 rtx pat_without_clobbers
;
11039 int insn_code_number
;
11040 int num_clobbers_to_add
= 0;
11042 rtx notes
= NULL_RTX
;
11043 rtx old_notes
, old_pat
;
11046 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11047 we use to indicate that something didn't match. If we find such a
11048 thing, force rejection. */
11049 if (GET_CODE (pat
) == PARALLEL
)
11050 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11051 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11052 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11055 old_pat
= PATTERN (insn
);
11056 old_notes
= REG_NOTES (insn
);
11057 PATTERN (insn
) = pat
;
11058 REG_NOTES (insn
) = NULL_RTX
;
11060 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11061 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11063 if (insn_code_number
< 0)
11064 fputs ("Failed to match this instruction:\n", dump_file
);
11066 fputs ("Successfully matched this instruction:\n", dump_file
);
11067 print_rtl_single (dump_file
, pat
);
11070 /* If it isn't, there is the possibility that we previously had an insn
11071 that clobbered some register as a side effect, but the combined
11072 insn doesn't need to do that. So try once more without the clobbers
11073 unless this represents an ASM insn. */
11075 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11076 && GET_CODE (pat
) == PARALLEL
)
11080 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11081 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11084 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11088 SUBST_INT (XVECLEN (pat
, 0), pos
);
11091 pat
= XVECEXP (pat
, 0, 0);
11093 PATTERN (insn
) = pat
;
11094 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11095 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11097 if (insn_code_number
< 0)
11098 fputs ("Failed to match this instruction:\n", dump_file
);
11100 fputs ("Successfully matched this instruction:\n", dump_file
);
11101 print_rtl_single (dump_file
, pat
);
11105 pat_without_clobbers
= pat
;
11107 PATTERN (insn
) = old_pat
;
11108 REG_NOTES (insn
) = old_notes
;
11110 /* Recognize all noop sets, these will be killed by followup pass. */
11111 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11112 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11114 /* If we had any clobbers to add, make a new pattern than contains
11115 them. Then check to make sure that all of them are dead. */
11116 if (num_clobbers_to_add
)
11118 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11119 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11120 ? (XVECLEN (pat
, 0)
11121 + num_clobbers_to_add
)
11122 : num_clobbers_to_add
+ 1));
11124 if (GET_CODE (pat
) == PARALLEL
)
11125 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11126 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11128 XVECEXP (newpat
, 0, 0) = pat
;
11130 add_clobbers (newpat
, insn_code_number
);
11132 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11133 i
< XVECLEN (newpat
, 0); i
++)
11135 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11136 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11138 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11140 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11141 notes
= alloc_reg_note (REG_UNUSED
,
11142 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11148 if (insn_code_number
>= 0
11149 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11151 old_pat
= PATTERN (insn
);
11152 old_notes
= REG_NOTES (insn
);
11153 old_icode
= INSN_CODE (insn
);
11154 PATTERN (insn
) = pat
;
11155 REG_NOTES (insn
) = notes
;
11157 /* Allow targets to reject combined insn. */
11158 if (!targetm
.legitimate_combined_insn (insn
))
11160 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11161 fputs ("Instruction not appropriate for target.",
11164 /* Callers expect recog_for_combine to strip
11165 clobbers from the pattern on failure. */
11166 pat
= pat_without_clobbers
;
11169 insn_code_number
= -1;
11172 PATTERN (insn
) = old_pat
;
11173 REG_NOTES (insn
) = old_notes
;
11174 INSN_CODE (insn
) = old_icode
;
11180 return insn_code_number
;
11183 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11184 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11185 Return whether anything was so changed. */
11188 change_zero_ext (rtx pat
)
11190 bool changed
= false;
11191 rtx
*src
= &SET_SRC (pat
);
11193 subrtx_ptr_iterator::array_type array
;
11194 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11197 machine_mode mode
= GET_MODE (x
);
11200 if (GET_CODE (x
) == ZERO_EXTRACT
11201 && CONST_INT_P (XEXP (x
, 1))
11202 && CONST_INT_P (XEXP (x
, 2))
11203 && GET_MODE (XEXP (x
, 0)) == mode
)
11205 size
= INTVAL (XEXP (x
, 1));
11207 int start
= INTVAL (XEXP (x
, 2));
11208 if (BITS_BIG_ENDIAN
)
11209 start
= GET_MODE_PRECISION (mode
) - size
- start
;
11212 x
= gen_rtx_LSHIFTRT (mode
, XEXP (x
, 0), GEN_INT (start
));
11216 else if (GET_CODE (x
) == ZERO_EXTEND
11217 && SCALAR_INT_MODE_P (mode
)
11218 && GET_CODE (XEXP (x
, 0)) == SUBREG
11219 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == mode
11220 && subreg_lowpart_p (XEXP (x
, 0)))
11222 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11223 x
= SUBREG_REG (XEXP (x
, 0));
11225 else if (GET_CODE (x
) == ZERO_EXTEND
11226 && SCALAR_INT_MODE_P (mode
)
11227 && REG_P (XEXP (x
, 0))
11228 && HARD_REGISTER_P (XEXP (x
, 0)))
11230 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11231 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11236 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11237 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11244 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11245 maybe_swap_commutative_operands (**iter
);
11247 rtx
*dst
= &SET_DEST (pat
);
11248 if (GET_CODE (*dst
) == ZERO_EXTRACT
11249 && REG_P (XEXP (*dst
, 0))
11250 && CONST_INT_P (XEXP (*dst
, 1))
11251 && CONST_INT_P (XEXP (*dst
, 2)))
11253 rtx reg
= XEXP (*dst
, 0);
11254 int width
= INTVAL (XEXP (*dst
, 1));
11255 int offset
= INTVAL (XEXP (*dst
, 2));
11256 machine_mode mode
= GET_MODE (reg
);
11257 int reg_width
= GET_MODE_PRECISION (mode
);
11258 if (BITS_BIG_ENDIAN
)
11259 offset
= reg_width
- width
- offset
;
11262 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11263 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11264 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11266 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11269 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11270 w
= gen_rtx_IOR (mode
, x
, z
);
11271 SUBST (SET_DEST (pat
), reg
);
11272 SUBST (SET_SRC (pat
), w
);
11280 /* Like recog, but we receive the address of a pointer to a new pattern.
11281 We try to match the rtx that the pointer points to.
11282 If that fails, we may try to modify or replace the pattern,
11283 storing the replacement into the same pointer object.
11285 Modifications include deletion or addition of CLOBBERs. If the
11286 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11287 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11288 (and undo if that fails).
11290 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11291 the CLOBBERs are placed.
11293 The value is the final insn code from the pattern ultimately matched,
11297 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11299 rtx pat
= *pnewpat
;
11300 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11301 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11302 return insn_code_number
;
11304 void *marker
= get_undo_marker ();
11305 bool changed
= false;
11307 if (GET_CODE (pat
) == SET
)
11308 changed
= change_zero_ext (pat
);
11309 else if (GET_CODE (pat
) == PARALLEL
)
11312 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11314 rtx set
= XVECEXP (pat
, 0, i
);
11315 if (GET_CODE (set
) == SET
)
11316 changed
|= change_zero_ext (set
);
11322 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11324 if (insn_code_number
< 0)
11325 undo_to_marker (marker
);
11328 return insn_code_number
;
11331 /* Like gen_lowpart_general but for use by combine. In combine it
11332 is not possible to create any new pseudoregs. However, it is
11333 safe to create invalid memory addresses, because combine will
11334 try to recognize them and all they will do is make the combine
11337 If for some reason this cannot do its job, an rtx
11338 (clobber (const_int 0)) is returned.
11339 An insn containing that will not be recognized. */
11342 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11344 machine_mode imode
= GET_MODE (x
);
11345 unsigned int osize
= GET_MODE_SIZE (omode
);
11346 unsigned int isize
= GET_MODE_SIZE (imode
);
11349 if (omode
== imode
)
11352 /* We can only support MODE being wider than a word if X is a
11353 constant integer or has a mode the same size. */
11354 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11355 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11358 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11359 won't know what to do. So we will strip off the SUBREG here and
11360 process normally. */
11361 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11363 x
= SUBREG_REG (x
);
11365 /* For use in case we fall down into the address adjustments
11366 further below, we need to adjust the known mode and size of
11367 x; imode and isize, since we just adjusted x. */
11368 imode
= GET_MODE (x
);
11370 if (imode
== omode
)
11373 isize
= GET_MODE_SIZE (imode
);
11376 result
= gen_lowpart_common (omode
, x
);
11385 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11387 if (MEM_VOLATILE_P (x
)
11388 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11391 /* If we want to refer to something bigger than the original memref,
11392 generate a paradoxical subreg instead. That will force a reload
11393 of the original memref X. */
11395 return gen_rtx_SUBREG (omode
, x
, 0);
11397 if (WORDS_BIG_ENDIAN
)
11398 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11400 /* Adjust the address so that the address-after-the-data is
11402 if (BYTES_BIG_ENDIAN
)
11403 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11405 return adjust_address_nv (x
, omode
, offset
);
11408 /* If X is a comparison operator, rewrite it in a new mode. This
11409 probably won't match, but may allow further simplifications. */
11410 else if (COMPARISON_P (x
))
11411 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11413 /* If we couldn't simplify X any other way, just enclose it in a
11414 SUBREG. Normally, this SUBREG won't match, but some patterns may
11415 include an explicit SUBREG or we may simplify it further in combine. */
11420 if (imode
== VOIDmode
)
11422 imode
= int_mode_for_mode (omode
);
11423 x
= gen_lowpart_common (imode
, x
);
11427 res
= lowpart_subreg (omode
, x
, imode
);
11433 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11436 /* Try to simplify a comparison between OP0 and a constant OP1,
11437 where CODE is the comparison code that will be tested, into a
11438 (CODE OP0 const0_rtx) form.
11440 The result is a possibly different comparison code to use.
11441 *POP1 may be updated. */
11443 static enum rtx_code
11444 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11445 rtx op0
, rtx
*pop1
)
11447 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11448 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11450 /* Get the constant we are comparing against and turn off all bits
11451 not on in our mode. */
11452 if (mode
!= VOIDmode
)
11453 const_op
= trunc_int_for_mode (const_op
, mode
);
11455 /* If we are comparing against a constant power of two and the value
11456 being compared can only have that single bit nonzero (e.g., it was
11457 `and'ed with that bit), we can replace this with a comparison
11460 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11461 || code
== LT
|| code
== LTU
)
11462 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11463 && pow2p_hwi (const_op
& GET_MODE_MASK (mode
))
11464 && (nonzero_bits (op0
, mode
)
11465 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (mode
))))
11467 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11471 /* Similarly, if we are comparing a value known to be either -1 or
11472 0 with -1, change it to the opposite comparison against zero. */
11474 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11475 || code
== GEU
|| code
== LTU
)
11476 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11478 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11482 /* Do some canonicalizations based on the comparison code. We prefer
11483 comparisons against zero and then prefer equality comparisons.
11484 If we can reduce the size of a constant, we will do that too. */
11488 /* < C is equivalent to <= (C - 1) */
11493 /* ... fall through to LE case below. */
11494 gcc_fallthrough ();
11500 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11507 /* If we are doing a <= 0 comparison on a value known to have
11508 a zero sign bit, we can replace this with == 0. */
11509 else if (const_op
== 0
11510 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11511 && (nonzero_bits (op0
, mode
)
11512 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11518 /* >= C is equivalent to > (C - 1). */
11523 /* ... fall through to GT below. */
11524 gcc_fallthrough ();
11530 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11537 /* If we are doing a > 0 comparison on a value known to have
11538 a zero sign bit, we can replace this with != 0. */
11539 else if (const_op
== 0
11540 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11541 && (nonzero_bits (op0
, mode
)
11542 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11548 /* < C is equivalent to <= (C - 1). */
11553 /* ... fall through ... */
11555 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11556 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11557 && (unsigned HOST_WIDE_INT
) const_op
11558 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11568 /* unsigned <= 0 is equivalent to == 0 */
11571 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11572 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11573 && (unsigned HOST_WIDE_INT
) const_op
11574 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11582 /* >= C is equivalent to > (C - 1). */
11587 /* ... fall through ... */
11590 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11591 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11592 && (unsigned HOST_WIDE_INT
) const_op
11593 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11603 /* unsigned > 0 is equivalent to != 0 */
11606 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11607 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11608 && (unsigned HOST_WIDE_INT
) const_op
11609 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11620 *pop1
= GEN_INT (const_op
);
11624 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11625 comparison code that will be tested.
11627 The result is a possibly different comparison code to use. *POP0 and
11628 *POP1 may be updated.
11630 It is possible that we might detect that a comparison is either always
11631 true or always false. However, we do not perform general constant
11632 folding in combine, so this knowledge isn't useful. Such tautologies
11633 should have been detected earlier. Hence we ignore all such cases. */
11635 static enum rtx_code
11636 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11642 machine_mode mode
, tmode
;
11644 /* Try a few ways of applying the same transformation to both operands. */
11647 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11648 so check specially. */
11649 if (!WORD_REGISTER_OPERATIONS
11650 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11651 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11652 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11653 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11654 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11655 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11656 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0)))
11657 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0))))
11658 && CONST_INT_P (XEXP (op0
, 1))
11659 && XEXP (op0
, 1) == XEXP (op1
, 1)
11660 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11661 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11662 && (INTVAL (XEXP (op0
, 1))
11663 == (GET_MODE_PRECISION (GET_MODE (op0
))
11664 - (GET_MODE_PRECISION
11665 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))))))))
11667 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11668 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11671 /* If both operands are the same constant shift, see if we can ignore the
11672 shift. We can if the shift is a rotate or if the bits shifted out of
11673 this shift are known to be zero for both inputs and if the type of
11674 comparison is compatible with the shift. */
11675 if (GET_CODE (op0
) == GET_CODE (op1
)
11676 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11677 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11678 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11679 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11680 || (GET_CODE (op0
) == ASHIFTRT
11681 && (code
!= GTU
&& code
!= LTU
11682 && code
!= GEU
&& code
!= LEU
)))
11683 && CONST_INT_P (XEXP (op0
, 1))
11684 && INTVAL (XEXP (op0
, 1)) >= 0
11685 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11686 && XEXP (op0
, 1) == XEXP (op1
, 1))
11688 machine_mode mode
= GET_MODE (op0
);
11689 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11690 int shift_count
= INTVAL (XEXP (op0
, 1));
11692 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11693 mask
&= (mask
>> shift_count
) << shift_count
;
11694 else if (GET_CODE (op0
) == ASHIFT
)
11695 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11697 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11698 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11699 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11704 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11705 SUBREGs are of the same mode, and, in both cases, the AND would
11706 be redundant if the comparison was done in the narrower mode,
11707 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11708 and the operand's possibly nonzero bits are 0xffffff01; in that case
11709 if we only care about QImode, we don't need the AND). This case
11710 occurs if the output mode of an scc insn is not SImode and
11711 STORE_FLAG_VALUE == 1 (e.g., the 386).
11713 Similarly, check for a case where the AND's are ZERO_EXTEND
11714 operations from some narrower mode even though a SUBREG is not
11717 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11718 && CONST_INT_P (XEXP (op0
, 1))
11719 && CONST_INT_P (XEXP (op1
, 1)))
11721 rtx inner_op0
= XEXP (op0
, 0);
11722 rtx inner_op1
= XEXP (op1
, 0);
11723 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11724 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11727 if (paradoxical_subreg_p (inner_op0
)
11728 && GET_CODE (inner_op1
) == SUBREG
11729 && (GET_MODE (SUBREG_REG (inner_op0
))
11730 == GET_MODE (SUBREG_REG (inner_op1
)))
11731 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11732 <= HOST_BITS_PER_WIDE_INT
)
11733 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11734 GET_MODE (SUBREG_REG (inner_op0
)))))
11735 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11736 GET_MODE (SUBREG_REG (inner_op1
))))))
11738 op0
= SUBREG_REG (inner_op0
);
11739 op1
= SUBREG_REG (inner_op1
);
11741 /* The resulting comparison is always unsigned since we masked
11742 off the original sign bit. */
11743 code
= unsigned_condition (code
);
11749 for (tmode
= GET_CLASS_NARROWEST_MODE
11750 (GET_MODE_CLASS (GET_MODE (op0
)));
11751 tmode
!= GET_MODE (op0
); tmode
= GET_MODE_WIDER_MODE (tmode
))
11752 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11754 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
11755 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
11756 code
= unsigned_condition (code
);
11765 /* If both operands are NOT, we can strip off the outer operation
11766 and adjust the comparison code for swapped operands; similarly for
11767 NEG, except that this must be an equality comparison. */
11768 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11769 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11770 && (code
== EQ
|| code
== NE
)))
11771 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11777 /* If the first operand is a constant, swap the operands and adjust the
11778 comparison code appropriately, but don't do this if the second operand
11779 is already a constant integer. */
11780 if (swap_commutative_operands_p (op0
, op1
))
11782 std::swap (op0
, op1
);
11783 code
= swap_condition (code
);
11786 /* We now enter a loop during which we will try to simplify the comparison.
11787 For the most part, we only are concerned with comparisons with zero,
11788 but some things may really be comparisons with zero but not start
11789 out looking that way. */
11791 while (CONST_INT_P (op1
))
11793 machine_mode mode
= GET_MODE (op0
);
11794 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11795 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11796 int equality_comparison_p
;
11797 int sign_bit_comparison_p
;
11798 int unsigned_comparison_p
;
11799 HOST_WIDE_INT const_op
;
11801 /* We only want to handle integral modes. This catches VOIDmode,
11802 CCmode, and the floating-point modes. An exception is that we
11803 can handle VOIDmode if OP0 is a COMPARE or a comparison
11806 if (GET_MODE_CLASS (mode
) != MODE_INT
11807 && ! (mode
== VOIDmode
11808 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
11811 /* Try to simplify the compare to constant, possibly changing the
11812 comparison op, and/or changing op1 to zero. */
11813 code
= simplify_compare_const (code
, mode
, op0
, &op1
);
11814 const_op
= INTVAL (op1
);
11816 /* Compute some predicates to simplify code below. */
11818 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11819 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11820 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11823 /* If this is a sign bit comparison and we can do arithmetic in
11824 MODE, say that we will only be needing the sign bit of OP0. */
11825 if (sign_bit_comparison_p
&& HWI_COMPUTABLE_MODE_P (mode
))
11826 op0
= force_to_mode (op0
, mode
,
11828 << (GET_MODE_PRECISION (mode
) - 1),
11831 /* Now try cases based on the opcode of OP0. If none of the cases
11832 does a "continue", we exit this loop immediately after the
11835 switch (GET_CODE (op0
))
11838 /* If we are extracting a single bit from a variable position in
11839 a constant that has only a single bit set and are comparing it
11840 with zero, we can convert this into an equality comparison
11841 between the position and the location of the single bit. */
11842 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11843 have already reduced the shift count modulo the word size. */
11844 if (!SHIFT_COUNT_TRUNCATED
11845 && CONST_INT_P (XEXP (op0
, 0))
11846 && XEXP (op0
, 1) == const1_rtx
11847 && equality_comparison_p
&& const_op
== 0
11848 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11850 if (BITS_BIG_ENDIAN
)
11851 i
= BITS_PER_WORD
- 1 - i
;
11853 op0
= XEXP (op0
, 2);
11857 /* Result is nonzero iff shift count is equal to I. */
11858 code
= reverse_condition (code
);
11865 tem
= expand_compound_operation (op0
);
11874 /* If testing for equality, we can take the NOT of the constant. */
11875 if (equality_comparison_p
11876 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11878 op0
= XEXP (op0
, 0);
11883 /* If just looking at the sign bit, reverse the sense of the
11885 if (sign_bit_comparison_p
)
11887 op0
= XEXP (op0
, 0);
11888 code
= (code
== GE
? LT
: GE
);
11894 /* If testing for equality, we can take the NEG of the constant. */
11895 if (equality_comparison_p
11896 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
11898 op0
= XEXP (op0
, 0);
11903 /* The remaining cases only apply to comparisons with zero. */
11907 /* When X is ABS or is known positive,
11908 (neg X) is < 0 if and only if X != 0. */
11910 if (sign_bit_comparison_p
11911 && (GET_CODE (XEXP (op0
, 0)) == ABS
11912 || (mode_width
<= HOST_BITS_PER_WIDE_INT
11913 && (nonzero_bits (XEXP (op0
, 0), mode
)
11914 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11917 op0
= XEXP (op0
, 0);
11918 code
= (code
== LT
? NE
: EQ
);
11922 /* If we have NEG of something whose two high-order bits are the
11923 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11924 if (num_sign_bit_copies (op0
, mode
) >= 2)
11926 op0
= XEXP (op0
, 0);
11927 code
= swap_condition (code
);
11933 /* If we are testing equality and our count is a constant, we
11934 can perform the inverse operation on our RHS. */
11935 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11936 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
11937 op1
, XEXP (op0
, 1))) != 0)
11939 op0
= XEXP (op0
, 0);
11944 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11945 a particular bit. Convert it to an AND of a constant of that
11946 bit. This will be converted into a ZERO_EXTRACT. */
11947 if (const_op
== 0 && sign_bit_comparison_p
11948 && CONST_INT_P (XEXP (op0
, 1))
11949 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11951 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11954 - INTVAL (XEXP (op0
, 1)))));
11955 code
= (code
== LT
? NE
: EQ
);
11959 /* Fall through. */
11962 /* ABS is ignorable inside an equality comparison with zero. */
11963 if (const_op
== 0 && equality_comparison_p
)
11965 op0
= XEXP (op0
, 0);
11971 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11972 (compare FOO CONST) if CONST fits in FOO's mode and we
11973 are either testing inequality or have an unsigned
11974 comparison with ZERO_EXTEND or a signed comparison with
11975 SIGN_EXTEND. But don't do it if we don't have a compare
11976 insn of the given mode, since we'd have to revert it
11977 later on, and then we wouldn't know whether to sign- or
11979 mode
= GET_MODE (XEXP (op0
, 0));
11980 if (GET_MODE_CLASS (mode
) == MODE_INT
11981 && ! unsigned_comparison_p
11982 && HWI_COMPUTABLE_MODE_P (mode
)
11983 && trunc_int_for_mode (const_op
, mode
) == const_op
11984 && have_insn_for (COMPARE
, mode
))
11986 op0
= XEXP (op0
, 0);
11992 /* Check for the case where we are comparing A - C1 with C2, that is
11994 (subreg:MODE (plus (A) (-C1))) op (C2)
11996 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11997 comparison in the wider mode. One of the following two conditions
11998 must be true in order for this to be valid:
12000 1. The mode extension results in the same bit pattern being added
12001 on both sides and the comparison is equality or unsigned. As
12002 C2 has been truncated to fit in MODE, the pattern can only be
12005 2. The mode extension results in the sign bit being copied on
12008 The difficulty here is that we have predicates for A but not for
12009 (A - C1) so we need to check that C1 is within proper bounds so
12010 as to perturbate A as little as possible. */
12012 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12013 && subreg_lowpart_p (op0
)
12014 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) > mode_width
12015 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12016 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12018 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
12019 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12020 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12023 && (unsigned HOST_WIDE_INT
) c1
12024 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12025 && (equality_comparison_p
|| unsigned_comparison_p
)
12026 /* (A - C1) zero-extends if it is positive and sign-extends
12027 if it is negative, C2 both zero- and sign-extends. */
12028 && ((0 == (nonzero_bits (a
, inner_mode
)
12029 & ~GET_MODE_MASK (mode
))
12031 /* (A - C1) sign-extends if it is positive and 1-extends
12032 if it is negative, C2 both sign- and 1-extends. */
12033 || (num_sign_bit_copies (a
, inner_mode
)
12034 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12037 || ((unsigned HOST_WIDE_INT
) c1
12038 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12039 /* (A - C1) always sign-extends, like C2. */
12040 && num_sign_bit_copies (a
, inner_mode
)
12041 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12042 - (mode_width
- 1))))
12044 op0
= SUBREG_REG (op0
);
12049 /* If the inner mode is narrower and we are extracting the low part,
12050 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12051 if (subreg_lowpart_p (op0
)
12052 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) < mode_width
)
12054 else if (subreg_lowpart_p (op0
)
12055 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12056 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12057 && (code
== NE
|| code
== EQ
)
12058 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12059 <= HOST_BITS_PER_WIDE_INT
)
12060 && !paradoxical_subreg_p (op0
)
12061 && (nonzero_bits (SUBREG_REG (op0
),
12062 GET_MODE (SUBREG_REG (op0
)))
12063 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12065 /* Remove outer subregs that don't do anything. */
12066 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12068 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12069 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12071 op0
= SUBREG_REG (op0
);
12083 mode
= GET_MODE (XEXP (op0
, 0));
12084 if (GET_MODE_CLASS (mode
) == MODE_INT
12085 && (unsigned_comparison_p
|| equality_comparison_p
)
12086 && HWI_COMPUTABLE_MODE_P (mode
)
12087 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12089 && have_insn_for (COMPARE
, mode
))
12091 op0
= XEXP (op0
, 0);
12097 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12098 this for equality comparisons due to pathological cases involving
12100 if (equality_comparison_p
12101 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12102 op1
, XEXP (op0
, 1))))
12104 op0
= XEXP (op0
, 0);
12109 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12110 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12111 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12113 op0
= XEXP (XEXP (op0
, 0), 0);
12114 code
= (code
== LT
? EQ
: NE
);
12120 /* We used to optimize signed comparisons against zero, but that
12121 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12122 arrive here as equality comparisons, or (GEU, LTU) are
12123 optimized away. No need to special-case them. */
12125 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12126 (eq B (minus A C)), whichever simplifies. We can only do
12127 this for equality comparisons due to pathological cases involving
12129 if (equality_comparison_p
12130 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
12131 XEXP (op0
, 1), op1
)))
12133 op0
= XEXP (op0
, 0);
12138 if (equality_comparison_p
12139 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12140 XEXP (op0
, 0), op1
)))
12142 op0
= XEXP (op0
, 1);
12147 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12148 of bits in X minus 1, is one iff X > 0. */
12149 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12150 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12151 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12152 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12154 op0
= XEXP (op0
, 1);
12155 code
= (code
== GE
? LE
: GT
);
12161 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12162 if C is zero or B is a constant. */
12163 if (equality_comparison_p
12164 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
12165 XEXP (op0
, 1), op1
)))
12167 op0
= XEXP (op0
, 0);
12174 case UNEQ
: case LTGT
:
12175 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
12176 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
12177 case UNORDERED
: case ORDERED
:
12178 /* We can't do anything if OP0 is a condition code value, rather
12179 than an actual data value. */
12181 || CC0_P (XEXP (op0
, 0))
12182 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12185 /* Get the two operands being compared. */
12186 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12187 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12189 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12191 /* Check for the cases where we simply want the result of the
12192 earlier test or the opposite of that result. */
12193 if (code
== NE
|| code
== EQ
12194 || (val_signbit_known_set_p (GET_MODE (op0
), STORE_FLAG_VALUE
)
12195 && (code
== LT
|| code
== GE
)))
12197 enum rtx_code new_code
;
12198 if (code
== LT
|| code
== NE
)
12199 new_code
= GET_CODE (op0
);
12201 new_code
= reversed_comparison_code (op0
, NULL
);
12203 if (new_code
!= UNKNOWN
)
12214 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12216 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12217 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12218 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12220 op0
= XEXP (op0
, 1);
12221 code
= (code
== GE
? GT
: LE
);
12227 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12228 will be converted to a ZERO_EXTRACT later. */
12229 if (const_op
== 0 && equality_comparison_p
12230 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12231 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12233 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12234 XEXP (XEXP (op0
, 0), 1));
12235 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12239 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12240 zero and X is a comparison and C1 and C2 describe only bits set
12241 in STORE_FLAG_VALUE, we can compare with X. */
12242 if (const_op
== 0 && equality_comparison_p
12243 && mode_width
<= HOST_BITS_PER_WIDE_INT
12244 && CONST_INT_P (XEXP (op0
, 1))
12245 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12246 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12247 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12248 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12250 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12251 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12252 if ((~STORE_FLAG_VALUE
& mask
) == 0
12253 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12254 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12255 && COMPARISON_P (tem
))))
12257 op0
= XEXP (XEXP (op0
, 0), 0);
12262 /* If we are doing an equality comparison of an AND of a bit equal
12263 to the sign bit, replace this with a LT or GE comparison of
12264 the underlying value. */
12265 if (equality_comparison_p
12267 && CONST_INT_P (XEXP (op0
, 1))
12268 && mode_width
<= HOST_BITS_PER_WIDE_INT
12269 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12270 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12272 op0
= XEXP (op0
, 0);
12273 code
= (code
== EQ
? GE
: LT
);
12277 /* If this AND operation is really a ZERO_EXTEND from a narrower
12278 mode, the constant fits within that mode, and this is either an
12279 equality or unsigned comparison, try to do this comparison in
12284 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12285 -> (ne:DI (reg:SI 4) (const_int 0))
12287 unless TRULY_NOOP_TRUNCATION allows it or the register is
12288 known to hold a value of the required mode the
12289 transformation is invalid. */
12290 if ((equality_comparison_p
|| unsigned_comparison_p
)
12291 && CONST_INT_P (XEXP (op0
, 1))
12292 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12293 & GET_MODE_MASK (mode
))
12295 && const_op
>> i
== 0
12296 && (tmode
= mode_for_size (i
, MODE_INT
, 1)) != BLKmode
)
12298 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12302 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12303 fits in both M1 and M2 and the SUBREG is either paradoxical
12304 or represents the low part, permute the SUBREG and the AND
12306 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12307 && CONST_INT_P (XEXP (op0
, 1)))
12309 tmode
= GET_MODE (SUBREG_REG (XEXP (op0
, 0)));
12310 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12311 /* Require an integral mode, to avoid creating something like
12313 if (SCALAR_INT_MODE_P (tmode
)
12314 /* It is unsafe to commute the AND into the SUBREG if the
12315 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12316 not defined. As originally written the upper bits
12317 have a defined value due to the AND operation.
12318 However, if we commute the AND inside the SUBREG then
12319 they no longer have defined values and the meaning of
12320 the code has been changed.
12321 Also C1 should not change value in the smaller mode,
12322 see PR67028 (a positive C1 can become negative in the
12323 smaller mode, so that the AND does no longer mask the
12325 && ((WORD_REGISTER_OPERATIONS
12326 && mode_width
> GET_MODE_PRECISION (tmode
)
12327 && mode_width
<= BITS_PER_WORD
12328 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12329 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12330 && subreg_lowpart_p (XEXP (op0
, 0))))
12331 && mode_width
<= HOST_BITS_PER_WIDE_INT
12332 && HWI_COMPUTABLE_MODE_P (tmode
)
12333 && (c1
& ~mask
) == 0
12334 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12336 && c1
!= GET_MODE_MASK (tmode
))
12338 op0
= simplify_gen_binary (AND
, tmode
,
12339 SUBREG_REG (XEXP (op0
, 0)),
12340 gen_int_mode (c1
, tmode
));
12341 op0
= gen_lowpart (mode
, op0
);
12346 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12347 if (const_op
== 0 && equality_comparison_p
12348 && XEXP (op0
, 1) == const1_rtx
12349 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12351 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12352 XEXP (XEXP (op0
, 0), 0), 1);
12353 code
= (code
== NE
? EQ
: NE
);
12357 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12358 (eq (and (lshiftrt X) 1) 0).
12359 Also handle the case where (not X) is expressed using xor. */
12360 if (const_op
== 0 && equality_comparison_p
12361 && XEXP (op0
, 1) == const1_rtx
12362 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12364 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12365 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12367 if (GET_CODE (shift_op
) == NOT
12368 || (GET_CODE (shift_op
) == XOR
12369 && CONST_INT_P (XEXP (shift_op
, 1))
12370 && CONST_INT_P (shift_count
)
12371 && HWI_COMPUTABLE_MODE_P (mode
)
12372 && (UINTVAL (XEXP (shift_op
, 1))
12373 == HOST_WIDE_INT_1U
12374 << INTVAL (shift_count
))))
12377 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12378 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12379 code
= (code
== NE
? EQ
: NE
);
12386 /* If we have (compare (ashift FOO N) (const_int C)) and
12387 the high order N bits of FOO (N+1 if an inequality comparison)
12388 are known to be zero, we can do this by comparing FOO with C
12389 shifted right N bits so long as the low-order N bits of C are
12391 if (CONST_INT_P (XEXP (op0
, 1))
12392 && INTVAL (XEXP (op0
, 1)) >= 0
12393 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12394 < HOST_BITS_PER_WIDE_INT
)
12395 && (((unsigned HOST_WIDE_INT
) const_op
12396 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12398 && mode_width
<= HOST_BITS_PER_WIDE_INT
12399 && (nonzero_bits (XEXP (op0
, 0), mode
)
12400 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12401 + ! equality_comparison_p
))) == 0)
12403 /* We must perform a logical shift, not an arithmetic one,
12404 as we want the top N bits of C to be zero. */
12405 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12407 temp
>>= INTVAL (XEXP (op0
, 1));
12408 op1
= gen_int_mode (temp
, mode
);
12409 op0
= XEXP (op0
, 0);
12413 /* If we are doing a sign bit comparison, it means we are testing
12414 a particular bit. Convert it to the appropriate AND. */
12415 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12416 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12418 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12421 - INTVAL (XEXP (op0
, 1)))));
12422 code
= (code
== LT
? NE
: EQ
);
12426 /* If this an equality comparison with zero and we are shifting
12427 the low bit to the sign bit, we can convert this to an AND of the
12429 if (const_op
== 0 && equality_comparison_p
12430 && CONST_INT_P (XEXP (op0
, 1))
12431 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12433 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12439 /* If this is an equality comparison with zero, we can do this
12440 as a logical shift, which might be much simpler. */
12441 if (equality_comparison_p
&& const_op
== 0
12442 && CONST_INT_P (XEXP (op0
, 1)))
12444 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12446 INTVAL (XEXP (op0
, 1)));
12450 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12451 do the comparison in a narrower mode. */
12452 if (! unsigned_comparison_p
12453 && CONST_INT_P (XEXP (op0
, 1))
12454 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12455 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12456 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12457 MODE_INT
, 1)) != BLKmode
12458 && (((unsigned HOST_WIDE_INT
) const_op
12459 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12460 <= GET_MODE_MASK (tmode
)))
12462 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12466 /* Likewise if OP0 is a PLUS of a sign extension with a
12467 constant, which is usually represented with the PLUS
12468 between the shifts. */
12469 if (! unsigned_comparison_p
12470 && CONST_INT_P (XEXP (op0
, 1))
12471 && GET_CODE (XEXP (op0
, 0)) == PLUS
12472 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12473 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12474 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12475 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12476 MODE_INT
, 1)) != BLKmode
12477 && (((unsigned HOST_WIDE_INT
) const_op
12478 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12479 <= GET_MODE_MASK (tmode
)))
12481 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12482 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12483 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
12484 add_const
, XEXP (op0
, 1));
12486 op0
= simplify_gen_binary (PLUS
, tmode
,
12487 gen_lowpart (tmode
, inner
),
12494 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12495 the low order N bits of FOO are known to be zero, we can do this
12496 by comparing FOO with C shifted left N bits so long as no
12497 overflow occurs. Even if the low order N bits of FOO aren't known
12498 to be zero, if the comparison is >= or < we can use the same
12499 optimization and for > or <= by setting all the low
12500 order N bits in the comparison constant. */
12501 if (CONST_INT_P (XEXP (op0
, 1))
12502 && INTVAL (XEXP (op0
, 1)) > 0
12503 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12504 && mode_width
<= HOST_BITS_PER_WIDE_INT
12505 && (((unsigned HOST_WIDE_INT
) const_op
12506 + (GET_CODE (op0
) != LSHIFTRT
12507 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12510 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12512 unsigned HOST_WIDE_INT low_bits
12513 = (nonzero_bits (XEXP (op0
, 0), mode
)
12514 & ((HOST_WIDE_INT_1U
12515 << INTVAL (XEXP (op0
, 1))) - 1));
12516 if (low_bits
== 0 || !equality_comparison_p
)
12518 /* If the shift was logical, then we must make the condition
12520 if (GET_CODE (op0
) == LSHIFTRT
)
12521 code
= unsigned_condition (code
);
12523 const_op
<<= INTVAL (XEXP (op0
, 1));
12525 && (code
== GT
|| code
== GTU
12526 || code
== LE
|| code
== LEU
))
12528 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12529 op1
= GEN_INT (const_op
);
12530 op0
= XEXP (op0
, 0);
12535 /* If we are using this shift to extract just the sign bit, we
12536 can replace this with an LT or GE comparison. */
12538 && (equality_comparison_p
|| sign_bit_comparison_p
)
12539 && CONST_INT_P (XEXP (op0
, 1))
12540 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12542 op0
= XEXP (op0
, 0);
12543 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12555 /* Now make any compound operations involved in this comparison. Then,
12556 check for an outmost SUBREG on OP0 that is not doing anything or is
12557 paradoxical. The latter transformation must only be performed when
12558 it is known that the "extra" bits will be the same in op0 and op1 or
12559 that they don't matter. There are three cases to consider:
12561 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12562 care bits and we can assume they have any convenient value. So
12563 making the transformation is safe.
12565 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12566 In this case the upper bits of op0 are undefined. We should not make
12567 the simplification in that case as we do not know the contents of
12570 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12571 In that case we know those bits are zeros or ones. We must also be
12572 sure that they are the same as the upper bits of op1.
12574 We can never remove a SUBREG for a non-equality comparison because
12575 the sign bit is in a different place in the underlying object. */
12577 rtx_code op0_mco_code
= SET
;
12578 if (op1
== const0_rtx
)
12579 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12581 op0
= make_compound_operation (op0
, op0_mco_code
);
12582 op1
= make_compound_operation (op1
, SET
);
12584 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12585 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12586 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12587 && (code
== NE
|| code
== EQ
))
12589 if (paradoxical_subreg_p (op0
))
12591 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12593 if (REG_P (SUBREG_REG (op0
)))
12595 op0
= SUBREG_REG (op0
);
12596 op1
= gen_lowpart (GET_MODE (op0
), op1
);
12599 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12600 <= HOST_BITS_PER_WIDE_INT
)
12601 && (nonzero_bits (SUBREG_REG (op0
),
12602 GET_MODE (SUBREG_REG (op0
)))
12603 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12605 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12607 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12608 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12609 op0
= SUBREG_REG (op0
), op1
= tem
;
12613 /* We now do the opposite procedure: Some machines don't have compare
12614 insns in all modes. If OP0's mode is an integer mode smaller than a
12615 word and we can't do a compare in that mode, see if there is a larger
12616 mode for which we can do the compare. There are a number of cases in
12617 which we can use the wider mode. */
12619 mode
= GET_MODE (op0
);
12620 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
12621 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12622 && ! have_insn_for (COMPARE
, mode
))
12623 for (tmode
= GET_MODE_WIDER_MODE (mode
);
12624 (tmode
!= VOIDmode
&& HWI_COMPUTABLE_MODE_P (tmode
));
12625 tmode
= GET_MODE_WIDER_MODE (tmode
))
12626 if (have_insn_for (COMPARE
, tmode
))
12630 /* If this is a test for negative, we can make an explicit
12631 test of the sign bit. Test this first so we can use
12632 a paradoxical subreg to extend OP0. */
12634 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12635 && HWI_COMPUTABLE_MODE_P (mode
))
12637 unsigned HOST_WIDE_INT sign
12638 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12639 op0
= simplify_gen_binary (AND
, tmode
,
12640 gen_lowpart (tmode
, op0
),
12641 gen_int_mode (sign
, tmode
));
12642 code
= (code
== LT
) ? NE
: EQ
;
12646 /* If the only nonzero bits in OP0 and OP1 are those in the
12647 narrower mode and this is an equality or unsigned comparison,
12648 we can use the wider mode. Similarly for sign-extended
12649 values, in which case it is true for all comparisons. */
12650 zero_extended
= ((code
== EQ
|| code
== NE
12651 || code
== GEU
|| code
== GTU
12652 || code
== LEU
|| code
== LTU
)
12653 && (nonzero_bits (op0
, tmode
)
12654 & ~GET_MODE_MASK (mode
)) == 0
12655 && ((CONST_INT_P (op1
)
12656 || (nonzero_bits (op1
, tmode
)
12657 & ~GET_MODE_MASK (mode
)) == 0)));
12660 || ((num_sign_bit_copies (op0
, tmode
)
12661 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12662 - GET_MODE_PRECISION (mode
)))
12663 && (num_sign_bit_copies (op1
, tmode
)
12664 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12665 - GET_MODE_PRECISION (mode
)))))
12667 /* If OP0 is an AND and we don't have an AND in MODE either,
12668 make a new AND in the proper mode. */
12669 if (GET_CODE (op0
) == AND
12670 && !have_insn_for (AND
, mode
))
12671 op0
= simplify_gen_binary (AND
, tmode
,
12672 gen_lowpart (tmode
,
12674 gen_lowpart (tmode
,
12680 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op0
, mode
);
12681 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op1
, mode
);
12685 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op0
, mode
);
12686 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op1
, mode
);
12693 /* We may have changed the comparison operands. Re-canonicalize. */
12694 if (swap_commutative_operands_p (op0
, op1
))
12696 std::swap (op0
, op1
);
12697 code
= swap_condition (code
);
12700 /* If this machine only supports a subset of valid comparisons, see if we
12701 can convert an unsupported one into a supported one. */
12702 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12710 /* Utility function for record_value_for_reg. Count number of
12715 enum rtx_code code
= GET_CODE (x
);
12719 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12720 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12722 rtx x0
= XEXP (x
, 0);
12723 rtx x1
= XEXP (x
, 1);
12726 return 1 + 2 * count_rtxs (x0
);
12728 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12729 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12730 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12731 return 2 + 2 * count_rtxs (x0
)
12732 + count_rtxs (x
== XEXP (x1
, 0)
12733 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12735 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12736 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12737 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12738 return 2 + 2 * count_rtxs (x1
)
12739 + count_rtxs (x
== XEXP (x0
, 0)
12740 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12743 fmt
= GET_RTX_FORMAT (code
);
12744 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12746 ret
+= count_rtxs (XEXP (x
, i
));
12747 else if (fmt
[i
] == 'E')
12748 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12749 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12754 /* Utility function for following routine. Called when X is part of a value
12755 being stored into last_set_value. Sets last_set_table_tick
12756 for each register mentioned. Similar to mention_regs in cse.c */
12759 update_table_tick (rtx x
)
12761 enum rtx_code code
= GET_CODE (x
);
12762 const char *fmt
= GET_RTX_FORMAT (code
);
12767 unsigned int regno
= REGNO (x
);
12768 unsigned int endregno
= END_REGNO (x
);
12771 for (r
= regno
; r
< endregno
; r
++)
12773 reg_stat_type
*rsp
= ®_stat
[r
];
12774 rsp
->last_set_table_tick
= label_tick
;
12780 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12783 /* Check for identical subexpressions. If x contains
12784 identical subexpression we only have to traverse one of
12786 if (i
== 0 && ARITHMETIC_P (x
))
12788 /* Note that at this point x1 has already been
12790 rtx x0
= XEXP (x
, 0);
12791 rtx x1
= XEXP (x
, 1);
12793 /* If x0 and x1 are identical then there is no need to
12798 /* If x0 is identical to a subexpression of x1 then while
12799 processing x1, x0 has already been processed. Thus we
12800 are done with x. */
12801 if (ARITHMETIC_P (x1
)
12802 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12805 /* If x1 is identical to a subexpression of x0 then we
12806 still have to process the rest of x0. */
12807 if (ARITHMETIC_P (x0
)
12808 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12810 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12815 update_table_tick (XEXP (x
, i
));
12817 else if (fmt
[i
] == 'E')
12818 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12819 update_table_tick (XVECEXP (x
, i
, j
));
12822 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12823 are saying that the register is clobbered and we no longer know its
12824 value. If INSN is zero, don't update reg_stat[].last_set; this is
12825 only permitted with VALUE also zero and is used to invalidate the
12829 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
12831 unsigned int regno
= REGNO (reg
);
12832 unsigned int endregno
= END_REGNO (reg
);
12834 reg_stat_type
*rsp
;
12836 /* If VALUE contains REG and we have a previous value for REG, substitute
12837 the previous value. */
12838 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12842 /* Set things up so get_last_value is allowed to see anything set up to
12844 subst_low_luid
= DF_INSN_LUID (insn
);
12845 tem
= get_last_value (reg
);
12847 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12848 it isn't going to be useful and will take a lot of time to process,
12849 so just use the CLOBBER. */
12853 if (ARITHMETIC_P (tem
)
12854 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12855 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12856 tem
= XEXP (tem
, 0);
12857 else if (count_occurrences (value
, reg
, 1) >= 2)
12859 /* If there are two or more occurrences of REG in VALUE,
12860 prevent the value from growing too much. */
12861 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12862 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12865 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12869 /* For each register modified, show we don't know its value, that
12870 we don't know about its bitwise content, that its value has been
12871 updated, and that we don't know the location of the death of the
12873 for (i
= regno
; i
< endregno
; i
++)
12875 rsp
= ®_stat
[i
];
12878 rsp
->last_set
= insn
;
12880 rsp
->last_set_value
= 0;
12881 rsp
->last_set_mode
= VOIDmode
;
12882 rsp
->last_set_nonzero_bits
= 0;
12883 rsp
->last_set_sign_bit_copies
= 0;
12884 rsp
->last_death
= 0;
12885 rsp
->truncated_to_mode
= VOIDmode
;
12888 /* Mark registers that are being referenced in this value. */
12890 update_table_tick (value
);
12892 /* Now update the status of each register being set.
12893 If someone is using this register in this block, set this register
12894 to invalid since we will get confused between the two lives in this
12895 basic block. This makes using this register always invalid. In cse, we
12896 scan the table to invalidate all entries using this register, but this
12897 is too much work for us. */
12899 for (i
= regno
; i
< endregno
; i
++)
12901 rsp
= ®_stat
[i
];
12902 rsp
->last_set_label
= label_tick
;
12904 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
12905 rsp
->last_set_invalid
= 1;
12907 rsp
->last_set_invalid
= 0;
12910 /* The value being assigned might refer to X (like in "x++;"). In that
12911 case, we must replace it with (clobber (const_int 0)) to prevent
12913 rsp
= ®_stat
[regno
];
12914 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
12916 value
= copy_rtx (value
);
12917 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
12921 /* For the main register being modified, update the value, the mode, the
12922 nonzero bits, and the number of sign bit copies. */
12924 rsp
->last_set_value
= value
;
12928 machine_mode mode
= GET_MODE (reg
);
12929 subst_low_luid
= DF_INSN_LUID (insn
);
12930 rsp
->last_set_mode
= mode
;
12931 if (GET_MODE_CLASS (mode
) == MODE_INT
12932 && HWI_COMPUTABLE_MODE_P (mode
))
12933 mode
= nonzero_bits_mode
;
12934 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
12935 rsp
->last_set_sign_bit_copies
12936 = num_sign_bit_copies (value
, GET_MODE (reg
));
12940 /* Called via note_stores from record_dead_and_set_regs to handle one
12941 SET or CLOBBER in an insn. DATA is the instruction in which the
12942 set is occurring. */
12945 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
12947 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
12949 if (GET_CODE (dest
) == SUBREG
)
12950 dest
= SUBREG_REG (dest
);
12952 if (!record_dead_insn
)
12955 record_value_for_reg (dest
, NULL
, NULL_RTX
);
12961 /* If we are setting the whole register, we know its value. Otherwise
12962 show that we don't know the value. We can handle SUBREG in
12964 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
12965 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
12966 else if (GET_CODE (setter
) == SET
12967 && GET_CODE (SET_DEST (setter
)) == SUBREG
12968 && SUBREG_REG (SET_DEST (setter
)) == dest
12969 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
12970 && subreg_lowpart_p (SET_DEST (setter
)))
12971 record_value_for_reg (dest
, record_dead_insn
,
12972 gen_lowpart (GET_MODE (dest
),
12973 SET_SRC (setter
)));
12975 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
12977 else if (MEM_P (dest
)
12978 /* Ignore pushes, they clobber nothing. */
12979 && ! push_operand (dest
, GET_MODE (dest
)))
12980 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
12983 /* Update the records of when each REG was most recently set or killed
12984 for the things done by INSN. This is the last thing done in processing
12985 INSN in the combiner loop.
12987 We update reg_stat[], in particular fields last_set, last_set_value,
12988 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12989 last_death, and also the similar information mem_last_set (which insn
12990 most recently modified memory) and last_call_luid (which insn was the
12991 most recent subroutine call). */
12994 record_dead_and_set_regs (rtx_insn
*insn
)
12999 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13001 if (REG_NOTE_KIND (link
) == REG_DEAD
13002 && REG_P (XEXP (link
, 0)))
13004 unsigned int regno
= REGNO (XEXP (link
, 0));
13005 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13007 for (i
= regno
; i
< endregno
; i
++)
13009 reg_stat_type
*rsp
;
13011 rsp
= ®_stat
[i
];
13012 rsp
->last_death
= insn
;
13015 else if (REG_NOTE_KIND (link
) == REG_INC
)
13016 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13021 hard_reg_set_iterator hrsi
;
13022 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13024 reg_stat_type
*rsp
;
13026 rsp
= ®_stat
[i
];
13027 rsp
->last_set_invalid
= 1;
13028 rsp
->last_set
= insn
;
13029 rsp
->last_set_value
= 0;
13030 rsp
->last_set_mode
= VOIDmode
;
13031 rsp
->last_set_nonzero_bits
= 0;
13032 rsp
->last_set_sign_bit_copies
= 0;
13033 rsp
->last_death
= 0;
13034 rsp
->truncated_to_mode
= VOIDmode
;
13037 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13039 /* We can't combine into a call pattern. Remember, though, that
13040 the return value register is set at this LUID. We could
13041 still replace a register with the return value from the
13042 wrong subroutine call! */
13043 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13046 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13049 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13050 register present in the SUBREG, so for each such SUBREG go back and
13051 adjust nonzero and sign bit information of the registers that are
13052 known to have some zero/sign bits set.
13054 This is needed because when combine blows the SUBREGs away, the
13055 information on zero/sign bits is lost and further combines can be
13056 missed because of that. */
13059 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13061 struct insn_link
*links
;
13063 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13064 machine_mode mode
= GET_MODE (subreg
);
13066 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
13069 for (links
= LOG_LINKS (insn
); links
;)
13071 reg_stat_type
*rsp
;
13073 insn
= links
->insn
;
13074 set
= single_set (insn
);
13076 if (! set
|| !REG_P (SET_DEST (set
))
13077 || REGNO (SET_DEST (set
)) != regno
13078 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13080 links
= links
->next
;
13084 rsp
= ®_stat
[regno
];
13085 if (rsp
->last_set
== insn
)
13087 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13088 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13091 if (REG_P (SET_SRC (set
)))
13093 regno
= REGNO (SET_SRC (set
));
13094 links
= LOG_LINKS (insn
);
13101 /* Check if X, a register, is known to contain a value already
13102 truncated to MODE. In this case we can use a subreg to refer to
13103 the truncated value even though in the generic case we would need
13104 an explicit truncation. */
13107 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13109 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13110 machine_mode truncated
= rsp
->truncated_to_mode
;
13113 || rsp
->truncation_label
< label_tick_ebb_start
)
13115 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
13117 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13122 /* If X is a hard reg or a subreg record the mode that the register is
13123 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13124 to turn a truncate into a subreg using this information. Return true
13125 if traversing X is complete. */
13128 record_truncated_value (rtx x
)
13130 machine_mode truncated_mode
;
13131 reg_stat_type
*rsp
;
13133 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13135 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13136 truncated_mode
= GET_MODE (x
);
13138 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
13141 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13144 x
= SUBREG_REG (x
);
13146 /* ??? For hard-regs we now record everything. We might be able to
13147 optimize this using last_set_mode. */
13148 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13149 truncated_mode
= GET_MODE (x
);
13153 rsp
= ®_stat
[REGNO (x
)];
13154 if (rsp
->truncated_to_mode
== 0
13155 || rsp
->truncation_label
< label_tick_ebb_start
13156 || (GET_MODE_SIZE (truncated_mode
)
13157 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
13159 rsp
->truncated_to_mode
= truncated_mode
;
13160 rsp
->truncation_label
= label_tick
;
13166 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13167 the modes they are used in. This can help truning TRUNCATEs into
13171 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13173 subrtx_var_iterator::array_type array
;
13174 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13175 if (record_truncated_value (*iter
))
13176 iter
.skip_subrtxes ();
13179 /* Scan X for promoted SUBREGs. For each one found,
13180 note what it implies to the registers used in it. */
13183 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13185 if (GET_CODE (x
) == SUBREG
13186 && SUBREG_PROMOTED_VAR_P (x
)
13187 && REG_P (SUBREG_REG (x
)))
13188 record_promoted_value (insn
, x
);
13191 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13194 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13198 check_promoted_subreg (insn
, XEXP (x
, i
));
13202 if (XVEC (x
, i
) != 0)
13203 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13204 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13210 /* Verify that all the registers and memory references mentioned in *LOC are
13211 still valid. *LOC was part of a value set in INSN when label_tick was
13212 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13213 the invalid references with (clobber (const_int 0)) and return 1. This
13214 replacement is useful because we often can get useful information about
13215 the form of a value (e.g., if it was produced by a shift that always
13216 produces -1 or 0) even though we don't know exactly what registers it
13217 was produced from. */
13220 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13223 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13224 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13229 unsigned int regno
= REGNO (x
);
13230 unsigned int endregno
= END_REGNO (x
);
13233 for (j
= regno
; j
< endregno
; j
++)
13235 reg_stat_type
*rsp
= ®_stat
[j
];
13236 if (rsp
->last_set_invalid
13237 /* If this is a pseudo-register that was only set once and not
13238 live at the beginning of the function, it is always valid. */
13239 || (! (regno
>= FIRST_PSEUDO_REGISTER
13240 && regno
< reg_n_sets_max
13241 && REG_N_SETS (regno
) == 1
13242 && (!REGNO_REG_SET_P
13243 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13245 && rsp
->last_set_label
> tick
))
13248 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13255 /* If this is a memory reference, make sure that there were no stores after
13256 it that might have clobbered the value. We don't have alias info, so we
13257 assume any store invalidates it. Moreover, we only have local UIDs, so
13258 we also assume that there were stores in the intervening basic blocks. */
13259 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13260 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13263 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13267 for (i
= 0; i
< len
; i
++)
13271 /* Check for identical subexpressions. If x contains
13272 identical subexpression we only have to traverse one of
13274 if (i
== 1 && ARITHMETIC_P (x
))
13276 /* Note that at this point x0 has already been checked
13277 and found valid. */
13278 rtx x0
= XEXP (x
, 0);
13279 rtx x1
= XEXP (x
, 1);
13281 /* If x0 and x1 are identical then x is also valid. */
13285 /* If x1 is identical to a subexpression of x0 then
13286 while checking x0, x1 has already been checked. Thus
13287 it is valid and so as x. */
13288 if (ARITHMETIC_P (x0
)
13289 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13292 /* If x0 is identical to a subexpression of x1 then x is
13293 valid iff the rest of x1 is valid. */
13294 if (ARITHMETIC_P (x1
)
13295 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13297 get_last_value_validate (&XEXP (x1
,
13298 x0
== XEXP (x1
, 0) ? 1 : 0),
13299 insn
, tick
, replace
);
13302 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13306 else if (fmt
[i
] == 'E')
13307 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13308 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13309 insn
, tick
, replace
) == 0)
13313 /* If we haven't found a reason for it to be invalid, it is valid. */
13317 /* Get the last value assigned to X, if known. Some registers
13318 in the value may be replaced with (clobber (const_int 0)) if their value
13319 is known longer known reliably. */
13322 get_last_value (const_rtx x
)
13324 unsigned int regno
;
13326 reg_stat_type
*rsp
;
13328 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13329 then convert it to the desired mode. If this is a paradoxical SUBREG,
13330 we cannot predict what values the "extra" bits might have. */
13331 if (GET_CODE (x
) == SUBREG
13332 && subreg_lowpart_p (x
)
13333 && !paradoxical_subreg_p (x
)
13334 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13335 return gen_lowpart (GET_MODE (x
), value
);
13341 rsp
= ®_stat
[regno
];
13342 value
= rsp
->last_set_value
;
13344 /* If we don't have a value, or if it isn't for this basic block and
13345 it's either a hard register, set more than once, or it's a live
13346 at the beginning of the function, return 0.
13348 Because if it's not live at the beginning of the function then the reg
13349 is always set before being used (is never used without being set).
13350 And, if it's set only once, and it's always set before use, then all
13351 uses must have the same last value, even if it's not from this basic
13355 || (rsp
->last_set_label
< label_tick_ebb_start
13356 && (regno
< FIRST_PSEUDO_REGISTER
13357 || regno
>= reg_n_sets_max
13358 || REG_N_SETS (regno
) != 1
13360 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13363 /* If the value was set in a later insn than the ones we are processing,
13364 we can't use it even if the register was only set once. */
13365 if (rsp
->last_set_label
== label_tick
13366 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13369 /* If fewer bits were set than what we are asked for now, we cannot use
13371 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13372 < GET_MODE_PRECISION (GET_MODE (x
)))
13375 /* If the value has all its registers valid, return it. */
13376 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13379 /* Otherwise, make a copy and replace any invalid register with
13380 (clobber (const_int 0)). If that fails for some reason, return 0. */
13382 value
= copy_rtx (value
);
13383 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13389 /* Return nonzero if expression X refers to a REG or to memory
13390 that is set in an instruction more recent than FROM_LUID. */
13393 use_crosses_set_p (const_rtx x
, int from_luid
)
13397 enum rtx_code code
= GET_CODE (x
);
13401 unsigned int regno
= REGNO (x
);
13402 unsigned endreg
= END_REGNO (x
);
13404 #ifdef PUSH_ROUNDING
13405 /* Don't allow uses of the stack pointer to be moved,
13406 because we don't know whether the move crosses a push insn. */
13407 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13410 for (; regno
< endreg
; regno
++)
13412 reg_stat_type
*rsp
= ®_stat
[regno
];
13414 && rsp
->last_set_label
== label_tick
13415 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13421 if (code
== MEM
&& mem_last_set
> from_luid
)
13424 fmt
= GET_RTX_FORMAT (code
);
13426 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13431 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13432 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13435 else if (fmt
[i
] == 'e'
13436 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13442 /* Define three variables used for communication between the following
13445 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13446 static int reg_dead_flag
;
13448 /* Function called via note_stores from reg_dead_at_p.
13450 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13451 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13454 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13456 unsigned int regno
, endregno
;
13461 regno
= REGNO (dest
);
13462 endregno
= END_REGNO (dest
);
13463 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13464 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13467 /* Return nonzero if REG is known to be dead at INSN.
13469 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13470 referencing REG, it is dead. If we hit a SET referencing REG, it is
13471 live. Otherwise, see if it is live or dead at the start of the basic
13472 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13473 must be assumed to be always live. */
13476 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13481 /* Set variables for reg_dead_at_p_1. */
13482 reg_dead_regno
= REGNO (reg
);
13483 reg_dead_endregno
= END_REGNO (reg
);
13487 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13488 we allow the machine description to decide whether use-and-clobber
13489 patterns are OK. */
13490 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13492 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13493 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13497 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13498 beginning of basic block. */
13499 block
= BLOCK_FOR_INSN (insn
);
13504 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13507 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13509 return reg_dead_flag
== 1 ? 1 : 0;
13511 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13515 if (insn
== BB_HEAD (block
))
13518 insn
= PREV_INSN (insn
);
13521 /* Look at live-in sets for the basic block that we were in. */
13522 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13523 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13529 /* Note hard registers in X that are used. */
13532 mark_used_regs_combine (rtx x
)
13534 RTX_CODE code
= GET_CODE (x
);
13535 unsigned int regno
;
13546 case ADDR_DIFF_VEC
:
13548 /* CC0 must die in the insn after it is set, so we don't need to take
13549 special note of it here. */
13554 /* If we are clobbering a MEM, mark any hard registers inside the
13555 address as used. */
13556 if (MEM_P (XEXP (x
, 0)))
13557 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13562 /* A hard reg in a wide mode may really be multiple registers.
13563 If so, mark all of them just like the first. */
13564 if (regno
< FIRST_PSEUDO_REGISTER
)
13566 /* None of this applies to the stack, frame or arg pointers. */
13567 if (regno
== STACK_POINTER_REGNUM
13568 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13569 && regno
== HARD_FRAME_POINTER_REGNUM
)
13570 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13571 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13572 || regno
== FRAME_POINTER_REGNUM
)
13575 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13581 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13583 rtx testreg
= SET_DEST (x
);
13585 while (GET_CODE (testreg
) == SUBREG
13586 || GET_CODE (testreg
) == ZERO_EXTRACT
13587 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13588 testreg
= XEXP (testreg
, 0);
13590 if (MEM_P (testreg
))
13591 mark_used_regs_combine (XEXP (testreg
, 0));
13593 mark_used_regs_combine (SET_SRC (x
));
13601 /* Recursively scan the operands of this expression. */
13604 const char *fmt
= GET_RTX_FORMAT (code
);
13606 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13609 mark_used_regs_combine (XEXP (x
, i
));
13610 else if (fmt
[i
] == 'E')
13614 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13615 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13621 /* Remove register number REGNO from the dead registers list of INSN.
13623 Return the note used to record the death, if there was one. */
13626 remove_death (unsigned int regno
, rtx_insn
*insn
)
13628 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13631 remove_note (insn
, note
);
13636 /* For each register (hardware or pseudo) used within expression X, if its
13637 death is in an instruction with luid between FROM_LUID (inclusive) and
13638 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13639 list headed by PNOTES.
13641 That said, don't move registers killed by maybe_kill_insn.
13643 This is done when X is being merged by combination into TO_INSN. These
13644 notes will then be distributed as needed. */
13647 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13652 enum rtx_code code
= GET_CODE (x
);
13656 unsigned int regno
= REGNO (x
);
13657 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13659 /* Don't move the register if it gets killed in between from and to. */
13660 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13661 && ! reg_referenced_p (x
, maybe_kill_insn
))
13665 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13666 && DF_INSN_LUID (where_dead
) >= from_luid
13667 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13669 rtx note
= remove_death (regno
, where_dead
);
13671 /* It is possible for the call above to return 0. This can occur
13672 when last_death points to I2 or I1 that we combined with.
13673 In that case make a new note.
13675 We must also check for the case where X is a hard register
13676 and NOTE is a death note for a range of hard registers
13677 including X. In that case, we must put REG_DEAD notes for
13678 the remaining registers in place of NOTE. */
13680 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13681 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13682 > GET_MODE_SIZE (GET_MODE (x
))))
13684 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13685 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13686 unsigned int ourend
= END_REGNO (x
);
13689 for (i
= deadregno
; i
< deadend
; i
++)
13690 if (i
< regno
|| i
>= ourend
)
13691 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13694 /* If we didn't find any note, or if we found a REG_DEAD note that
13695 covers only part of the given reg, and we have a multi-reg hard
13696 register, then to be safe we must check for REG_DEAD notes
13697 for each register other than the first. They could have
13698 their own REG_DEAD notes lying around. */
13699 else if ((note
== 0
13701 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13702 < GET_MODE_SIZE (GET_MODE (x
)))))
13703 && regno
< FIRST_PSEUDO_REGISTER
13704 && REG_NREGS (x
) > 1)
13706 unsigned int ourend
= END_REGNO (x
);
13707 unsigned int i
, offset
;
13711 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13715 for (i
= regno
+ offset
; i
< ourend
; i
++)
13716 move_deaths (regno_reg_rtx
[i
],
13717 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13720 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13722 XEXP (note
, 1) = *pnotes
;
13726 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13732 else if (GET_CODE (x
) == SET
)
13734 rtx dest
= SET_DEST (x
);
13736 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13738 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13739 that accesses one word of a multi-word item, some
13740 piece of everything register in the expression is used by
13741 this insn, so remove any old death. */
13742 /* ??? So why do we test for equality of the sizes? */
13744 if (GET_CODE (dest
) == ZERO_EXTRACT
13745 || GET_CODE (dest
) == STRICT_LOW_PART
13746 || (GET_CODE (dest
) == SUBREG
13747 && (((GET_MODE_SIZE (GET_MODE (dest
))
13748 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13749 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13750 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13752 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13756 /* If this is some other SUBREG, we know it replaces the entire
13757 value, so use that as the destination. */
13758 if (GET_CODE (dest
) == SUBREG
)
13759 dest
= SUBREG_REG (dest
);
13761 /* If this is a MEM, adjust deaths of anything used in the address.
13762 For a REG (the only other possibility), the entire value is
13763 being replaced so the old value is not used in this insn. */
13766 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13771 else if (GET_CODE (x
) == CLOBBER
)
13774 len
= GET_RTX_LENGTH (code
);
13775 fmt
= GET_RTX_FORMAT (code
);
13777 for (i
= 0; i
< len
; i
++)
13782 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13783 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13786 else if (fmt
[i
] == 'e')
13787 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13791 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13792 pattern of an insn. X must be a REG. */
13795 reg_bitfield_target_p (rtx x
, rtx body
)
13799 if (GET_CODE (body
) == SET
)
13801 rtx dest
= SET_DEST (body
);
13803 unsigned int regno
, tregno
, endregno
, endtregno
;
13805 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13806 target
= XEXP (dest
, 0);
13807 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13808 target
= SUBREG_REG (XEXP (dest
, 0));
13812 if (GET_CODE (target
) == SUBREG
)
13813 target
= SUBREG_REG (target
);
13815 if (!REG_P (target
))
13818 tregno
= REGNO (target
), regno
= REGNO (x
);
13819 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13820 return target
== x
;
13822 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13823 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13825 return endregno
> tregno
&& regno
< endtregno
;
13828 else if (GET_CODE (body
) == PARALLEL
)
13829 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13830 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13836 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13837 as appropriate. I3 and I2 are the insns resulting from the combination
13838 insns including FROM (I2 may be zero).
13840 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13841 not need REG_DEAD notes because they are being substituted for. This
13842 saves searching in the most common cases.
13844 Each note in the list is either ignored or placed on some insns, depending
13845 on the type of note. */
13848 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
13849 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
13851 rtx note
, next_note
;
13853 rtx_insn
*tem_insn
;
13855 for (note
= notes
; note
; note
= next_note
)
13857 rtx_insn
*place
= 0, *place2
= 0;
13859 next_note
= XEXP (note
, 1);
13860 switch (REG_NOTE_KIND (note
))
13864 /* Doesn't matter much where we put this, as long as it's somewhere.
13865 It is preferable to keep these notes on branches, which is most
13866 likely to be i3. */
13870 case REG_NON_LOCAL_GOTO
:
13875 gcc_assert (i2
&& JUMP_P (i2
));
13880 case REG_EH_REGION
:
13881 /* These notes must remain with the call or trapping instruction. */
13884 else if (i2
&& CALL_P (i2
))
13888 gcc_assert (cfun
->can_throw_non_call_exceptions
);
13889 if (may_trap_p (i3
))
13891 else if (i2
&& may_trap_p (i2
))
13893 /* ??? Otherwise assume we've combined things such that we
13894 can now prove that the instructions can't trap. Drop the
13895 note in this case. */
13899 case REG_ARGS_SIZE
:
13900 /* ??? How to distribute between i3-i1. Assume i3 contains the
13901 entire adjustment. Assert i3 contains at least some adjust. */
13902 if (!noop_move_p (i3
))
13904 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
13905 /* fixup_args_size_notes looks at REG_NORETURN note,
13906 so ensure the note is placed there first. */
13910 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
13911 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
13915 XEXP (n
, 1) = REG_NOTES (i3
);
13916 REG_NOTES (i3
) = n
;
13920 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
13921 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13922 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13923 gcc_assert (old_size
!= args_size
13925 && !ACCUMULATE_OUTGOING_ARGS
13926 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
13933 case REG_CALL_DECL
:
13934 /* These notes must remain with the call. It should not be
13935 possible for both I2 and I3 to be a call. */
13940 gcc_assert (i2
&& CALL_P (i2
));
13946 /* Any clobbers for i3 may still exist, and so we must process
13947 REG_UNUSED notes from that insn.
13949 Any clobbers from i2 or i1 can only exist if they were added by
13950 recog_for_combine. In that case, recog_for_combine created the
13951 necessary REG_UNUSED notes. Trying to keep any original
13952 REG_UNUSED notes from these insns can cause incorrect output
13953 if it is for the same register as the original i3 dest.
13954 In that case, we will notice that the register is set in i3,
13955 and then add a REG_UNUSED note for the destination of i3, which
13956 is wrong. However, it is possible to have REG_UNUSED notes from
13957 i2 or i1 for register which were both used and clobbered, so
13958 we keep notes from i2 or i1 if they will turn into REG_DEAD
13961 /* If this register is set or clobbered in I3, put the note there
13962 unless there is one already. */
13963 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
13965 if (from_insn
!= i3
)
13968 if (! (REG_P (XEXP (note
, 0))
13969 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
13970 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
13973 /* Otherwise, if this register is used by I3, then this register
13974 now dies here, so we must put a REG_DEAD note here unless there
13976 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
13977 && ! (REG_P (XEXP (note
, 0))
13978 ? find_regno_note (i3
, REG_DEAD
,
13979 REGNO (XEXP (note
, 0)))
13980 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
13982 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
13990 /* These notes say something about results of an insn. We can
13991 only support them if they used to be on I3 in which case they
13992 remain on I3. Otherwise they are ignored.
13994 If the note refers to an expression that is not a constant, we
13995 must also ignore the note since we cannot tell whether the
13996 equivalence is still true. It might be possible to do
13997 slightly better than this (we only have a problem if I2DEST
13998 or I1DEST is present in the expression), but it doesn't
13999 seem worth the trouble. */
14001 if (from_insn
== i3
14002 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14007 /* These notes say something about how a register is used. They must
14008 be present on any use of the register in I2 or I3. */
14009 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14012 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14021 case REG_LABEL_TARGET
:
14022 case REG_LABEL_OPERAND
:
14023 /* This can show up in several ways -- either directly in the
14024 pattern, or hidden off in the constant pool with (or without?)
14025 a REG_EQUAL note. */
14026 /* ??? Ignore the without-reg_equal-note problem for now. */
14027 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14028 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14029 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14030 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14034 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14035 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14036 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14037 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14045 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14046 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14048 if (place
&& JUMP_P (place
)
14049 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14050 && (JUMP_LABEL (place
) == NULL
14051 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14053 rtx label
= JUMP_LABEL (place
);
14056 JUMP_LABEL (place
) = XEXP (note
, 0);
14057 else if (LABEL_P (label
))
14058 LABEL_NUSES (label
)--;
14061 if (place2
&& JUMP_P (place2
)
14062 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14063 && (JUMP_LABEL (place2
) == NULL
14064 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14066 rtx label
= JUMP_LABEL (place2
);
14069 JUMP_LABEL (place2
) = XEXP (note
, 0);
14070 else if (LABEL_P (label
))
14071 LABEL_NUSES (label
)--;
14077 /* This note says something about the value of a register prior
14078 to the execution of an insn. It is too much trouble to see
14079 if the note is still correct in all situations. It is better
14080 to simply delete it. */
14084 /* If we replaced the right hand side of FROM_INSN with a
14085 REG_EQUAL note, the original use of the dying register
14086 will not have been combined into I3 and I2. In such cases,
14087 FROM_INSN is guaranteed to be the first of the combined
14088 instructions, so we simply need to search back before
14089 FROM_INSN for the previous use or set of this register,
14090 then alter the notes there appropriately.
14092 If the register is used as an input in I3, it dies there.
14093 Similarly for I2, if it is nonzero and adjacent to I3.
14095 If the register is not used as an input in either I3 or I2
14096 and it is not one of the registers we were supposed to eliminate,
14097 there are two possibilities. We might have a non-adjacent I2
14098 or we might have somehow eliminated an additional register
14099 from a computation. For example, we might have had A & B where
14100 we discover that B will always be zero. In this case we will
14101 eliminate the reference to A.
14103 In both cases, we must search to see if we can find a previous
14104 use of A and put the death note there. */
14107 && from_insn
== i2mod
14108 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14109 tem_insn
= from_insn
;
14113 && CALL_P (from_insn
)
14114 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14116 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14118 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14119 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14121 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14123 && reg_overlap_mentioned_p (XEXP (note
, 0),
14125 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14126 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14129 /* If the new I2 sets the same register that is marked dead
14130 in the note, we do not know where to put the note.
14132 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14138 basic_block bb
= this_basic_block
;
14140 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14142 if (!NONDEBUG_INSN_P (tem_insn
))
14144 if (tem_insn
== BB_HEAD (bb
))
14149 /* If the register is being set at TEM_INSN, see if that is all
14150 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14151 into a REG_UNUSED note instead. Don't delete sets to
14152 global register vars. */
14153 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14154 || !global_regs
[REGNO (XEXP (note
, 0))])
14155 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14157 rtx set
= single_set (tem_insn
);
14158 rtx inner_dest
= 0;
14159 rtx_insn
*cc0_setter
= NULL
;
14162 for (inner_dest
= SET_DEST (set
);
14163 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14164 || GET_CODE (inner_dest
) == SUBREG
14165 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14166 inner_dest
= XEXP (inner_dest
, 0))
14169 /* Verify that it was the set, and not a clobber that
14170 modified the register.
14172 CC0 targets must be careful to maintain setter/user
14173 pairs. If we cannot delete the setter due to side
14174 effects, mark the user with an UNUSED note instead
14177 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14178 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14180 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14181 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14182 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14184 /* Move the notes and links of TEM_INSN elsewhere.
14185 This might delete other dead insns recursively.
14186 First set the pattern to something that won't use
14188 rtx old_notes
= REG_NOTES (tem_insn
);
14190 PATTERN (tem_insn
) = pc_rtx
;
14191 REG_NOTES (tem_insn
) = NULL
;
14193 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14194 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14195 distribute_links (LOG_LINKS (tem_insn
));
14197 SET_INSN_DELETED (tem_insn
);
14198 if (tem_insn
== i2
)
14201 /* Delete the setter too. */
14204 PATTERN (cc0_setter
) = pc_rtx
;
14205 old_notes
= REG_NOTES (cc0_setter
);
14206 REG_NOTES (cc0_setter
) = NULL
;
14208 distribute_notes (old_notes
, cc0_setter
,
14210 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14211 distribute_links (LOG_LINKS (cc0_setter
));
14213 SET_INSN_DELETED (cc0_setter
);
14214 if (cc0_setter
== i2
)
14220 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14222 /* If there isn't already a REG_UNUSED note, put one
14223 here. Do not place a REG_DEAD note, even if
14224 the register is also used here; that would not
14225 match the algorithm used in lifetime analysis
14226 and can cause the consistency check in the
14227 scheduler to fail. */
14228 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14229 REGNO (XEXP (note
, 0))))
14234 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14235 || (CALL_P (tem_insn
)
14236 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14240 /* If we are doing a 3->2 combination, and we have a
14241 register which formerly died in i3 and was not used
14242 by i2, which now no longer dies in i3 and is used in
14243 i2 but does not die in i2, and place is between i2
14244 and i3, then we may need to move a link from place to
14246 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14248 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14249 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14251 struct insn_link
*links
= LOG_LINKS (place
);
14252 LOG_LINKS (place
) = NULL
;
14253 distribute_links (links
);
14258 if (tem_insn
== BB_HEAD (bb
))
14264 /* If the register is set or already dead at PLACE, we needn't do
14265 anything with this note if it is still a REG_DEAD note.
14266 We check here if it is set at all, not if is it totally replaced,
14267 which is what `dead_or_set_p' checks, so also check for it being
14270 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14272 unsigned int regno
= REGNO (XEXP (note
, 0));
14273 reg_stat_type
*rsp
= ®_stat
[regno
];
14275 if (dead_or_set_p (place
, XEXP (note
, 0))
14276 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14278 /* Unless the register previously died in PLACE, clear
14279 last_death. [I no longer understand why this is
14281 if (rsp
->last_death
!= place
)
14282 rsp
->last_death
= 0;
14286 rsp
->last_death
= place
;
14288 /* If this is a death note for a hard reg that is occupying
14289 multiple registers, ensure that we are still using all
14290 parts of the object. If we find a piece of the object
14291 that is unused, we must arrange for an appropriate REG_DEAD
14292 note to be added for it. However, we can't just emit a USE
14293 and tag the note to it, since the register might actually
14294 be dead; so we recourse, and the recursive call then finds
14295 the previous insn that used this register. */
14297 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14299 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14300 bool all_used
= true;
14303 for (i
= regno
; i
< endregno
; i
++)
14304 if ((! refers_to_regno_p (i
, PATTERN (place
))
14305 && ! find_regno_fusage (place
, USE
, i
))
14306 || dead_or_set_regno_p (place
, i
))
14314 /* Put only REG_DEAD notes for pieces that are
14315 not already dead or set. */
14317 for (i
= regno
; i
< endregno
;
14318 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
14320 rtx piece
= regno_reg_rtx
[i
];
14321 basic_block bb
= this_basic_block
;
14323 if (! dead_or_set_p (place
, piece
)
14324 && ! reg_bitfield_target_p (piece
,
14327 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14330 distribute_notes (new_note
, place
, place
,
14331 NULL
, NULL_RTX
, NULL_RTX
,
14334 else if (! refers_to_regno_p (i
, PATTERN (place
))
14335 && ! find_regno_fusage (place
, USE
, i
))
14336 for (tem_insn
= PREV_INSN (place
); ;
14337 tem_insn
= PREV_INSN (tem_insn
))
14339 if (!NONDEBUG_INSN_P (tem_insn
))
14341 if (tem_insn
== BB_HEAD (bb
))
14345 if (dead_or_set_p (tem_insn
, piece
)
14346 || reg_bitfield_target_p (piece
,
14347 PATTERN (tem_insn
)))
14349 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14362 /* Any other notes should not be present at this point in the
14364 gcc_unreachable ();
14369 XEXP (note
, 1) = REG_NOTES (place
);
14370 REG_NOTES (place
) = note
;
14374 add_shallow_copy_of_reg_note (place2
, note
);
14378 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14379 I3, I2, and I1 to new locations. This is also called to add a link
14380 pointing at I3 when I3's destination is changed. */
14383 distribute_links (struct insn_link
*links
)
14385 struct insn_link
*link
, *next_link
;
14387 for (link
= links
; link
; link
= next_link
)
14389 rtx_insn
*place
= 0;
14393 next_link
= link
->next
;
14395 /* If the insn that this link points to is a NOTE, ignore it. */
14396 if (NOTE_P (link
->insn
))
14400 rtx pat
= PATTERN (link
->insn
);
14401 if (GET_CODE (pat
) == SET
)
14403 else if (GET_CODE (pat
) == PARALLEL
)
14406 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14408 set
= XVECEXP (pat
, 0, i
);
14409 if (GET_CODE (set
) != SET
)
14412 reg
= SET_DEST (set
);
14413 while (GET_CODE (reg
) == ZERO_EXTRACT
14414 || GET_CODE (reg
) == STRICT_LOW_PART
14415 || GET_CODE (reg
) == SUBREG
)
14416 reg
= XEXP (reg
, 0);
14421 if (REGNO (reg
) == link
->regno
)
14424 if (i
== XVECLEN (pat
, 0))
14430 reg
= SET_DEST (set
);
14432 while (GET_CODE (reg
) == ZERO_EXTRACT
14433 || GET_CODE (reg
) == STRICT_LOW_PART
14434 || GET_CODE (reg
) == SUBREG
)
14435 reg
= XEXP (reg
, 0);
14437 /* A LOG_LINK is defined as being placed on the first insn that uses
14438 a register and points to the insn that sets the register. Start
14439 searching at the next insn after the target of the link and stop
14440 when we reach a set of the register or the end of the basic block.
14442 Note that this correctly handles the link that used to point from
14443 I3 to I2. Also note that not much searching is typically done here
14444 since most links don't point very far away. */
14446 for (insn
= NEXT_INSN (link
->insn
);
14447 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14448 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14449 insn
= NEXT_INSN (insn
))
14450 if (DEBUG_INSN_P (insn
))
14452 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14454 if (reg_referenced_p (reg
, PATTERN (insn
)))
14458 else if (CALL_P (insn
)
14459 && find_reg_fusage (insn
, USE
, reg
))
14464 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14467 /* If we found a place to put the link, place it there unless there
14468 is already a link to the same insn as LINK at that point. */
14472 struct insn_link
*link2
;
14474 FOR_EACH_LOG_LINK (link2
, place
)
14475 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14480 link
->next
= LOG_LINKS (place
);
14481 LOG_LINKS (place
) = link
;
14483 /* Set added_links_insn to the earliest insn we added a
14485 if (added_links_insn
== 0
14486 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14487 added_links_insn
= place
;
14493 /* Check for any register or memory mentioned in EQUIV that is not
14494 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14495 of EXPR where some registers may have been replaced by constants. */
14498 unmentioned_reg_p (rtx equiv
, rtx expr
)
14500 subrtx_iterator::array_type array
;
14501 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14503 const_rtx x
= *iter
;
14504 if ((REG_P (x
) || MEM_P (x
))
14505 && !reg_mentioned_p (x
, expr
))
14511 DEBUG_FUNCTION
void
14512 dump_combine_stats (FILE *file
)
14516 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14517 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14521 dump_combine_total_stats (FILE *file
)
14525 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14526 total_attempts
, total_merges
, total_extras
, total_successes
);
14529 /* Try combining insns through substitution. */
14530 static unsigned int
14531 rest_of_handle_combine (void)
14533 int rebuild_jump_labels_after_combine
;
14535 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14536 df_note_add_problem ();
14539 regstat_init_n_sets_and_refs ();
14540 reg_n_sets_max
= max_reg_num ();
14542 rebuild_jump_labels_after_combine
14543 = combine_instructions (get_insns (), max_reg_num ());
14545 /* Combining insns may have turned an indirect jump into a
14546 direct jump. Rebuild the JUMP_LABEL fields of jumping
14548 if (rebuild_jump_labels_after_combine
)
14550 if (dom_info_available_p (CDI_DOMINATORS
))
14551 free_dominance_info (CDI_DOMINATORS
);
14552 timevar_push (TV_JUMP
);
14553 rebuild_jump_labels (get_insns ());
14555 timevar_pop (TV_JUMP
);
14558 regstat_free_n_sets_and_refs ();
14564 const pass_data pass_data_combine
=
14566 RTL_PASS
, /* type */
14567 "combine", /* name */
14568 OPTGROUP_NONE
, /* optinfo_flags */
14569 TV_COMBINE
, /* tv_id */
14570 PROP_cfglayout
, /* properties_required */
14571 0, /* properties_provided */
14572 0, /* properties_destroyed */
14573 0, /* todo_flags_start */
14574 TODO_df_finish
, /* todo_flags_finish */
14577 class pass_combine
: public rtl_opt_pass
14580 pass_combine (gcc::context
*ctxt
)
14581 : rtl_opt_pass (pass_data_combine
, ctxt
)
14584 /* opt_pass methods: */
14585 virtual bool gate (function
*) { return (optimize
> 0); }
14586 virtual unsigned int execute (function
*)
14588 return rest_of_handle_combine ();
14591 }; // class pass_combine
14593 } // anon namespace
14596 make_pass_combine (gcc::context
*ctxt
)
14598 return new pass_combine (ctxt
);