1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
95 #include "stor-layout.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
135 static rtx_insn
*i2mod
;
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 struct reg_stat_type
{
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn
*last_death
;
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
255 static vec
<reg_stat_type
> reg_stat
;
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
265 static unsigned int reg_n_sets_max
;
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
270 static int mem_last_set
;
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
275 static int last_call_luid
;
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
283 static rtx_insn
*subst_insn
;
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
292 static int subst_low_luid
;
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
297 static HARD_REG_SET newpat_used_regs
;
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
303 static rtx_insn
*added_links_insn
;
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block
;
307 static bool optimize_this_for_speed_p
;
310 /* Length of the currently allocated uid_insn_cost array. */
312 static int max_uid_known
;
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
317 static int *uid_insn_cost
;
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
325 struct insn_link
*next
;
328 static struct insn_link
**uid_log_links
;
330 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
331 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
333 #define FOR_EACH_LOG_LINK(L, INSN) \
334 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
336 /* Links for LOG_LINKS are allocated from this obstack. */
338 static struct obstack insn_link_obstack
;
340 /* Allocate a link. */
342 static inline struct insn_link
*
343 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
346 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
347 sizeof (struct insn_link
));
354 /* Incremented for each basic block. */
356 static int label_tick
;
358 /* Reset to label_tick for each extended basic block in scanning order. */
360 static int label_tick_ebb_start
;
362 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
363 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
365 static machine_mode nonzero_bits_mode
;
367 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
368 be safely used. It is zero while computing them and after combine has
369 completed. This former test prevents propagating values based on
370 previously set values, which can be incorrect if a variable is modified
373 static int nonzero_sign_valid
;
376 /* Record one modification to rtl structure
377 to be undone by storing old_contents into *where. */
379 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
385 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
386 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
389 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
390 num_undo says how many are currently recorded.
392 other_insn is nonzero if we have modified some other insn in the process
393 of working on subst_insn. It must be verified too. */
399 rtx_insn
*other_insn
;
402 static struct undobuf undobuf
;
404 /* Number of times the pseudo being substituted for
405 was found and replaced. */
407 static int n_occurrences
;
409 static rtx
reg_nonzero_bits_for_combine (const_rtx
, machine_mode
, const_rtx
,
411 unsigned HOST_WIDE_INT
,
412 unsigned HOST_WIDE_INT
*);
413 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, machine_mode
, const_rtx
,
415 unsigned int, unsigned int *);
416 static void do_SUBST (rtx
*, rtx
);
417 static void do_SUBST_INT (int *, int);
418 static void init_reg_last (void);
419 static void setup_incoming_promotions (rtx_insn
*);
420 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
421 static int cant_combine_insn_p (rtx_insn
*);
422 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
423 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
424 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
425 static int contains_muldiv (rtx
);
426 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
428 static void undo_all (void);
429 static void undo_commit (void);
430 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
431 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
432 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
433 static rtx
simplify_if_then_else (rtx
);
434 static rtx
simplify_set (rtx
);
435 static rtx
simplify_logical (rtx
);
436 static rtx
expand_compound_operation (rtx
);
437 static const_rtx
expand_field_assignment (const_rtx
);
438 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
439 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
440 static rtx
extract_left_shift (rtx
, int);
441 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
442 unsigned HOST_WIDE_INT
*);
443 static rtx
canon_reg_for_combine (rtx
, rtx
);
444 static rtx
force_to_mode (rtx
, machine_mode
,
445 unsigned HOST_WIDE_INT
, int);
446 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
447 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
448 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
449 static rtx
make_field_assignment (rtx
);
450 static rtx
apply_distributive_law (rtx
);
451 static rtx
distribute_and_simplify_rtx (rtx
, int);
452 static rtx
simplify_and_const_int_1 (machine_mode
, rtx
,
453 unsigned HOST_WIDE_INT
);
454 static rtx
simplify_and_const_int (rtx
, machine_mode
, rtx
,
455 unsigned HOST_WIDE_INT
);
456 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
457 HOST_WIDE_INT
, machine_mode
, int *);
458 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
459 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
461 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
462 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
463 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
465 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
466 static void update_table_tick (rtx
);
467 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
468 static void check_promoted_subreg (rtx_insn
*, rtx
);
469 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
470 static void record_dead_and_set_regs (rtx_insn
*);
471 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
472 static rtx
get_last_value (const_rtx
);
473 static int use_crosses_set_p (const_rtx
, int);
474 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
475 static int reg_dead_at_p (rtx
, rtx_insn
*);
476 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
477 static int reg_bitfield_target_p (rtx
, rtx
);
478 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
479 static void distribute_links (struct insn_link
*);
480 static void mark_used_regs_combine (rtx
);
481 static void record_promoted_value (rtx_insn
*, rtx
);
482 static bool unmentioned_reg_p (rtx
, rtx
);
483 static void record_truncated_values (rtx
*, void *);
484 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
485 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
488 /* It is not safe to use ordinary gen_lowpart in combine.
489 See comments in gen_lowpart_for_combine. */
490 #undef RTL_HOOKS_GEN_LOWPART
491 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
493 /* Our implementation of gen_lowpart never emits a new pseudo. */
494 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
495 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
497 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
498 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
500 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
501 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
503 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
504 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
506 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
509 /* Convenience wrapper for the canonicalize_comparison target hook.
510 Target hooks cannot use enum rtx_code. */
512 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
513 bool op0_preserve_value
)
515 int code_int
= (int)*code
;
516 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
517 *code
= (enum rtx_code
)code_int
;
520 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
521 PATTERN can not be split. Otherwise, it returns an insn sequence.
522 This is a wrapper around split_insns which ensures that the
523 reg_stat vector is made larger if the splitter creates a new
527 combine_split_insns (rtx pattern
, rtx_insn
*insn
)
532 ret
= split_insns (pattern
, insn
);
533 nregs
= max_reg_num ();
534 if (nregs
> reg_stat
.length ())
535 reg_stat
.safe_grow_cleared (nregs
);
539 /* This is used by find_single_use to locate an rtx in LOC that
540 contains exactly one use of DEST, which is typically either a REG
541 or CC0. It returns a pointer to the innermost rtx expression
542 containing DEST. Appearances of DEST that are being used to
543 totally replace it are not counted. */
546 find_single_use_1 (rtx dest
, rtx
*loc
)
549 enum rtx_code code
= GET_CODE (x
);
565 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
566 of a REG that occupies all of the REG, the insn uses DEST if
567 it is mentioned in the destination or the source. Otherwise, we
568 need just check the source. */
569 if (GET_CODE (SET_DEST (x
)) != CC0
570 && GET_CODE (SET_DEST (x
)) != PC
571 && !REG_P (SET_DEST (x
))
572 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
573 && REG_P (SUBREG_REG (SET_DEST (x
)))
574 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
575 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
576 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
577 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
580 return find_single_use_1 (dest
, &SET_SRC (x
));
584 return find_single_use_1 (dest
, &XEXP (x
, 0));
590 /* If it wasn't one of the common cases above, check each expression and
591 vector of this code. Look for a unique usage of DEST. */
593 fmt
= GET_RTX_FORMAT (code
);
594 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
598 if (dest
== XEXP (x
, i
)
599 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
600 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
603 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
606 result
= this_result
;
607 else if (this_result
)
608 /* Duplicate usage. */
611 else if (fmt
[i
] == 'E')
615 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
617 if (XVECEXP (x
, i
, j
) == dest
619 && REG_P (XVECEXP (x
, i
, j
))
620 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
623 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
626 result
= this_result
;
627 else if (this_result
)
637 /* See if DEST, produced in INSN, is used only a single time in the
638 sequel. If so, return a pointer to the innermost rtx expression in which
641 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
643 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
644 care about REG_DEAD notes or LOG_LINKS.
646 Otherwise, we find the single use by finding an insn that has a
647 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
648 only referenced once in that insn, we know that it must be the first
649 and last insn referencing DEST. */
652 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
657 struct insn_link
*link
;
661 next
= NEXT_INSN (insn
);
663 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
666 result
= find_single_use_1 (dest
, &PATTERN (next
));
675 bb
= BLOCK_FOR_INSN (insn
);
676 for (next
= NEXT_INSN (insn
);
677 next
&& BLOCK_FOR_INSN (next
) == bb
;
678 next
= NEXT_INSN (next
))
679 if (INSN_P (next
) && dead_or_set_p (next
, dest
))
681 FOR_EACH_LOG_LINK (link
, next
)
682 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
687 result
= find_single_use_1 (dest
, &PATTERN (next
));
697 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
698 insn. The substitution can be undone by undo_all. If INTO is already
699 set to NEWVAL, do not record this change. Because computing NEWVAL might
700 also call SUBST, we have to compute it before we put anything into
704 do_SUBST (rtx
*into
, rtx newval
)
709 if (oldval
== newval
)
712 /* We'd like to catch as many invalid transformations here as
713 possible. Unfortunately, there are way too many mode changes
714 that are perfectly valid, so we'd waste too much effort for
715 little gain doing the checks here. Focus on catching invalid
716 transformations involving integer constants. */
717 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
718 && CONST_INT_P (newval
))
720 /* Sanity check that we're replacing oldval with a CONST_INT
721 that is a valid sign-extension for the original mode. */
722 gcc_assert (INTVAL (newval
)
723 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
725 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
726 CONST_INT is not valid, because after the replacement, the
727 original mode would be gone. Unfortunately, we can't tell
728 when do_SUBST is called to replace the operand thereof, so we
729 perform this test on oldval instead, checking whether an
730 invalid replacement took place before we got here. */
731 gcc_assert (!(GET_CODE (oldval
) == SUBREG
732 && CONST_INT_P (SUBREG_REG (oldval
))));
733 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
734 && CONST_INT_P (XEXP (oldval
, 0))));
738 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
740 buf
= XNEW (struct undo
);
742 buf
->kind
= UNDO_RTX
;
744 buf
->old_contents
.r
= oldval
;
747 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
750 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
752 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
753 for the value of a HOST_WIDE_INT value (including CONST_INT) is
757 do_SUBST_INT (int *into
, int newval
)
762 if (oldval
== newval
)
766 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
768 buf
= XNEW (struct undo
);
770 buf
->kind
= UNDO_INT
;
772 buf
->old_contents
.i
= oldval
;
775 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
778 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
780 /* Similar to SUBST, but just substitute the mode. This is used when
781 changing the mode of a pseudo-register, so that any other
782 references to the entry in the regno_reg_rtx array will change as
786 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
789 machine_mode oldval
= GET_MODE (*into
);
791 if (oldval
== newval
)
795 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
797 buf
= XNEW (struct undo
);
799 buf
->kind
= UNDO_MODE
;
801 buf
->old_contents
.m
= oldval
;
802 adjust_reg_mode (*into
, newval
);
804 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
807 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
809 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
812 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
815 struct insn_link
* oldval
= *into
;
817 if (oldval
== newval
)
821 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
823 buf
= XNEW (struct undo
);
825 buf
->kind
= UNDO_LINKS
;
827 buf
->old_contents
.l
= oldval
;
830 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
833 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
835 /* Subroutine of try_combine. Determine whether the replacement patterns
836 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
837 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
838 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
839 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
840 of all the instructions can be estimated and the replacements are more
841 expensive than the original sequence. */
844 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
845 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
847 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
848 int new_i2_cost
, new_i3_cost
;
849 int old_cost
, new_cost
;
851 /* Lookup the original insn_rtx_costs. */
852 i2_cost
= INSN_COST (i2
);
853 i3_cost
= INSN_COST (i3
);
857 i1_cost
= INSN_COST (i1
);
860 i0_cost
= INSN_COST (i0
);
861 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
862 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
866 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
867 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
873 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
874 i1_cost
= i0_cost
= 0;
877 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
879 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
883 /* Calculate the replacement insn_rtx_costs. */
884 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
887 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
888 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
889 ? new_i2_cost
+ new_i3_cost
: 0;
893 new_cost
= new_i3_cost
;
897 if (undobuf
.other_insn
)
899 int old_other_cost
, new_other_cost
;
901 old_other_cost
= INSN_COST (undobuf
.other_insn
);
902 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
903 if (old_other_cost
> 0 && new_other_cost
> 0)
905 old_cost
+= old_other_cost
;
906 new_cost
+= new_other_cost
;
912 /* Disallow this combination if both new_cost and old_cost are greater than
913 zero, and new_cost is greater than old cost. */
914 int reject
= old_cost
> 0 && new_cost
> old_cost
;
918 fprintf (dump_file
, "%s combination of insns ",
919 reject
? "rejecting" : "allowing");
921 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
922 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
923 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
924 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
926 fprintf (dump_file
, "original costs ");
928 fprintf (dump_file
, "%d + ", i0_cost
);
929 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
930 fprintf (dump_file
, "%d + ", i1_cost
);
931 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
934 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
935 new_i2_cost
, new_i3_cost
, new_cost
);
937 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
943 /* Update the uid_insn_cost array with the replacement costs. */
944 INSN_COST (i2
) = new_i2_cost
;
945 INSN_COST (i3
) = new_i3_cost
;
957 /* Delete any insns that copy a register to itself. */
960 delete_noop_moves (void)
962 rtx_insn
*insn
, *next
;
965 FOR_EACH_BB_FN (bb
, cfun
)
967 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
969 next
= NEXT_INSN (insn
);
970 if (INSN_P (insn
) && noop_move_p (insn
))
973 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
975 delete_insn_and_edges (insn
);
982 /* Return false if we do not want to (or cannot) combine DEF. */
984 can_combine_def_p (df_ref def
)
986 /* Do not consider if it is pre/post modification in MEM. */
987 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
990 unsigned int regno
= DF_REF_REGNO (def
);
992 /* Do not combine frame pointer adjustments. */
993 if ((regno
== FRAME_POINTER_REGNUM
994 && (!reload_completed
|| frame_pointer_needed
))
995 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
996 && regno
== HARD_FRAME_POINTER_REGNUM
997 && (!reload_completed
|| frame_pointer_needed
))
998 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
999 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
]))
1005 /* Return false if we do not want to (or cannot) combine USE. */
1007 can_combine_use_p (df_ref use
)
1009 /* Do not consider the usage of the stack pointer by function call. */
1010 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1016 /* Fill in log links field for all insns. */
1019 create_log_links (void)
1022 rtx_insn
**next_use
;
1026 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1028 /* Pass through each block from the end, recording the uses of each
1029 register and establishing log links when def is encountered.
1030 Note that we do not clear next_use array in order to save time,
1031 so we have to test whether the use is in the same basic block as def.
1033 There are a few cases below when we do not consider the definition or
1034 usage -- these are taken from original flow.c did. Don't ask me why it is
1035 done this way; I don't know and if it works, I don't want to know. */
1037 FOR_EACH_BB_FN (bb
, cfun
)
1039 FOR_BB_INSNS_REVERSE (bb
, insn
)
1041 if (!NONDEBUG_INSN_P (insn
))
1044 /* Log links are created only once. */
1045 gcc_assert (!LOG_LINKS (insn
));
1047 FOR_EACH_INSN_DEF (def
, insn
)
1049 unsigned int regno
= DF_REF_REGNO (def
);
1052 if (!next_use
[regno
])
1055 if (!can_combine_def_p (def
))
1058 use_insn
= next_use
[regno
];
1059 next_use
[regno
] = NULL
;
1061 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1066 We don't build a LOG_LINK for hard registers contained
1067 in ASM_OPERANDs. If these registers get replaced,
1068 we might wind up changing the semantics of the insn,
1069 even if reload can make what appear to be valid
1070 assignments later. */
1071 if (regno
< FIRST_PSEUDO_REGISTER
1072 && asm_noperands (PATTERN (use_insn
)) >= 0)
1075 /* Don't add duplicate links between instructions. */
1076 struct insn_link
*links
;
1077 FOR_EACH_LOG_LINK (links
, use_insn
)
1078 if (insn
== links
->insn
&& regno
== links
->regno
)
1082 LOG_LINKS (use_insn
)
1083 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1086 FOR_EACH_INSN_USE (use
, insn
)
1087 if (can_combine_use_p (use
))
1088 next_use
[DF_REF_REGNO (use
)] = insn
;
1095 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1096 true if we found a LOG_LINK that proves that A feeds B. This only works
1097 if there are no instructions between A and B which could have a link
1098 depending on A, since in that case we would not record a link for B.
1099 We also check the implicit dependency created by a cc0 setter/user
1103 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1105 struct insn_link
*links
;
1106 FOR_EACH_LOG_LINK (links
, b
)
1107 if (links
->insn
== a
)
1109 if (HAVE_cc0
&& sets_cc0_p (a
))
1114 /* Main entry point for combiner. F is the first insn of the function.
1115 NREGS is the first unused pseudo-reg number.
1117 Return nonzero if the combiner has turned an indirect jump
1118 instruction into a direct jump. */
1120 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1122 rtx_insn
*insn
, *next
;
1124 struct insn_link
*links
, *nextlinks
;
1126 basic_block last_bb
;
1128 int new_direct_jump_p
= 0;
1130 for (first
= f
; first
&& !INSN_P (first
); )
1131 first
= NEXT_INSN (first
);
1135 combine_attempts
= 0;
1138 combine_successes
= 0;
1140 rtl_hooks
= combine_rtl_hooks
;
1142 reg_stat
.safe_grow_cleared (nregs
);
1144 init_recog_no_volatile ();
1146 /* Allocate array for insn info. */
1147 max_uid_known
= get_max_uid ();
1148 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1149 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1150 gcc_obstack_init (&insn_link_obstack
);
1152 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1154 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1155 problems when, for example, we have j <<= 1 in a loop. */
1157 nonzero_sign_valid
= 0;
1158 label_tick
= label_tick_ebb_start
= 1;
1160 /* Scan all SETs and see if we can deduce anything about what
1161 bits are known to be zero for some registers and how many copies
1162 of the sign bit are known to exist for those registers.
1164 Also set any known values so that we can use it while searching
1165 for what bits are known to be set. */
1167 setup_incoming_promotions (first
);
1168 /* Allow the entry block and the first block to fall into the same EBB.
1169 Conceptually the incoming promotions are assigned to the entry block. */
1170 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1172 create_log_links ();
1173 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1175 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1180 if (!single_pred_p (this_basic_block
)
1181 || single_pred (this_basic_block
) != last_bb
)
1182 label_tick_ebb_start
= label_tick
;
1183 last_bb
= this_basic_block
;
1185 FOR_BB_INSNS (this_basic_block
, insn
)
1186 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1190 subst_low_luid
= DF_INSN_LUID (insn
);
1193 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1195 record_dead_and_set_regs (insn
);
1198 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1199 if (REG_NOTE_KIND (links
) == REG_INC
)
1200 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1203 /* Record the current insn_rtx_cost of this instruction. */
1204 if (NONJUMP_INSN_P (insn
))
1205 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1206 optimize_this_for_speed_p
);
1208 fprintf (dump_file
, "insn_cost %d: %d\n",
1209 INSN_UID (insn
), INSN_COST (insn
));
1213 nonzero_sign_valid
= 1;
1215 /* Now scan all the insns in forward order. */
1216 label_tick
= label_tick_ebb_start
= 1;
1218 setup_incoming_promotions (first
);
1219 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1220 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1222 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1224 rtx_insn
*last_combined_insn
= NULL
;
1225 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1230 if (!single_pred_p (this_basic_block
)
1231 || single_pred (this_basic_block
) != last_bb
)
1232 label_tick_ebb_start
= label_tick
;
1233 last_bb
= this_basic_block
;
1235 rtl_profile_for_bb (this_basic_block
);
1236 for (insn
= BB_HEAD (this_basic_block
);
1237 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1238 insn
= next
? next
: NEXT_INSN (insn
))
1241 if (!NONDEBUG_INSN_P (insn
))
1244 while (last_combined_insn
1245 && last_combined_insn
->deleted ())
1246 last_combined_insn
= PREV_INSN (last_combined_insn
);
1247 if (last_combined_insn
== NULL_RTX
1248 || BARRIER_P (last_combined_insn
)
1249 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1250 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1251 last_combined_insn
= insn
;
1253 /* See if we know about function return values before this
1254 insn based upon SUBREG flags. */
1255 check_promoted_subreg (insn
, PATTERN (insn
));
1257 /* See if we can find hardregs and subreg of pseudos in
1258 narrower modes. This could help turning TRUNCATEs
1260 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1262 /* Try this insn with each insn it links back to. */
1264 FOR_EACH_LOG_LINK (links
, insn
)
1265 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1266 NULL
, &new_direct_jump_p
,
1267 last_combined_insn
)) != 0)
1269 statistics_counter_event (cfun
, "two-insn combine", 1);
1273 /* Try each sequence of three linked insns ending with this one. */
1275 if (max_combine
>= 3)
1276 FOR_EACH_LOG_LINK (links
, insn
)
1278 rtx_insn
*link
= links
->insn
;
1280 /* If the linked insn has been replaced by a note, then there
1281 is no point in pursuing this chain any further. */
1285 FOR_EACH_LOG_LINK (nextlinks
, link
)
1286 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1287 NULL
, &new_direct_jump_p
,
1288 last_combined_insn
)) != 0)
1290 statistics_counter_event (cfun
, "three-insn combine", 1);
1295 /* Try to combine a jump insn that uses CC0
1296 with a preceding insn that sets CC0, and maybe with its
1297 logical predecessor as well.
1298 This is how we make decrement-and-branch insns.
1299 We need this special code because data flow connections
1300 via CC0 do not get entered in LOG_LINKS. */
1304 && (prev
= prev_nonnote_insn (insn
)) != 0
1305 && NONJUMP_INSN_P (prev
)
1306 && sets_cc0_p (PATTERN (prev
)))
1308 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1310 last_combined_insn
)) != 0)
1313 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1314 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1315 NULL
, &new_direct_jump_p
,
1316 last_combined_insn
)) != 0)
1320 /* Do the same for an insn that explicitly references CC0. */
1321 if (HAVE_cc0
&& NONJUMP_INSN_P (insn
)
1322 && (prev
= prev_nonnote_insn (insn
)) != 0
1323 && NONJUMP_INSN_P (prev
)
1324 && sets_cc0_p (PATTERN (prev
))
1325 && GET_CODE (PATTERN (insn
)) == SET
1326 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1328 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1330 last_combined_insn
)) != 0)
1333 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1334 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1335 NULL
, &new_direct_jump_p
,
1336 last_combined_insn
)) != 0)
1340 /* Finally, see if any of the insns that this insn links to
1341 explicitly references CC0. If so, try this insn, that insn,
1342 and its predecessor if it sets CC0. */
1345 FOR_EACH_LOG_LINK (links
, insn
)
1346 if (NONJUMP_INSN_P (links
->insn
)
1347 && GET_CODE (PATTERN (links
->insn
)) == SET
1348 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1349 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1350 && NONJUMP_INSN_P (prev
)
1351 && sets_cc0_p (PATTERN (prev
))
1352 && (next
= try_combine (insn
, links
->insn
,
1353 prev
, NULL
, &new_direct_jump_p
,
1354 last_combined_insn
)) != 0)
1358 /* Try combining an insn with two different insns whose results it
1360 if (max_combine
>= 3)
1361 FOR_EACH_LOG_LINK (links
, insn
)
1362 for (nextlinks
= links
->next
; nextlinks
;
1363 nextlinks
= nextlinks
->next
)
1364 if ((next
= try_combine (insn
, links
->insn
,
1365 nextlinks
->insn
, NULL
,
1367 last_combined_insn
)) != 0)
1370 statistics_counter_event (cfun
, "three-insn combine", 1);
1374 /* Try four-instruction combinations. */
1375 if (max_combine
>= 4)
1376 FOR_EACH_LOG_LINK (links
, insn
)
1378 struct insn_link
*next1
;
1379 rtx_insn
*link
= links
->insn
;
1381 /* If the linked insn has been replaced by a note, then there
1382 is no point in pursuing this chain any further. */
1386 FOR_EACH_LOG_LINK (next1
, link
)
1388 rtx_insn
*link1
= next1
->insn
;
1391 /* I0 -> I1 -> I2 -> I3. */
1392 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1393 if ((next
= try_combine (insn
, link
, link1
,
1396 last_combined_insn
)) != 0)
1398 statistics_counter_event (cfun
, "four-insn combine", 1);
1401 /* I0, I1 -> I2, I2 -> I3. */
1402 for (nextlinks
= next1
->next
; nextlinks
;
1403 nextlinks
= nextlinks
->next
)
1404 if ((next
= try_combine (insn
, link
, link1
,
1407 last_combined_insn
)) != 0)
1409 statistics_counter_event (cfun
, "four-insn combine", 1);
1414 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1416 rtx_insn
*link1
= next1
->insn
;
1419 /* I0 -> I2; I1, I2 -> I3. */
1420 FOR_EACH_LOG_LINK (nextlinks
, link
)
1421 if ((next
= try_combine (insn
, link
, link1
,
1424 last_combined_insn
)) != 0)
1426 statistics_counter_event (cfun
, "four-insn combine", 1);
1429 /* I0 -> I1; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1442 /* Try this insn with each REG_EQUAL note it links back to. */
1443 FOR_EACH_LOG_LINK (links
, insn
)
1446 rtx_insn
*temp
= links
->insn
;
1447 if ((set
= single_set (temp
)) != 0
1448 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1449 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1450 /* Avoid using a register that may already been marked
1451 dead by an earlier instruction. */
1452 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1453 && (GET_MODE (note
) == VOIDmode
1454 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1455 : (GET_MODE (SET_DEST (set
)) == GET_MODE (note
)
1456 && (GET_CODE (SET_DEST (set
)) != ZERO_EXTRACT
1457 || (GET_MODE (XEXP (SET_DEST (set
), 0))
1458 == GET_MODE (note
))))))
1460 /* Temporarily replace the set's source with the
1461 contents of the REG_EQUAL note. The insn will
1462 be deleted or recognized by try_combine. */
1463 rtx orig_src
= SET_SRC (set
);
1464 rtx orig_dest
= SET_DEST (set
);
1465 if (GET_CODE (SET_DEST (set
)) == ZERO_EXTRACT
)
1466 SET_DEST (set
) = XEXP (SET_DEST (set
), 0);
1467 SET_SRC (set
) = note
;
1469 i2mod_old_rhs
= copy_rtx (orig_src
);
1470 i2mod_new_rhs
= copy_rtx (note
);
1471 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1473 last_combined_insn
);
1477 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1480 SET_SRC (set
) = orig_src
;
1481 SET_DEST (set
) = orig_dest
;
1486 record_dead_and_set_regs (insn
);
1493 default_rtl_profile ();
1495 new_direct_jump_p
|= purge_all_dead_edges ();
1496 delete_noop_moves ();
1499 obstack_free (&insn_link_obstack
, NULL
);
1500 free (uid_log_links
);
1501 free (uid_insn_cost
);
1502 reg_stat
.release ();
1505 struct undo
*undo
, *next
;
1506 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1514 total_attempts
+= combine_attempts
;
1515 total_merges
+= combine_merges
;
1516 total_extras
+= combine_extras
;
1517 total_successes
+= combine_successes
;
1519 nonzero_sign_valid
= 0;
1520 rtl_hooks
= general_rtl_hooks
;
1522 /* Make recognizer allow volatile MEMs again. */
1525 return new_direct_jump_p
;
1528 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1531 init_reg_last (void)
1536 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1537 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1540 /* Set up any promoted values for incoming argument registers. */
1543 setup_incoming_promotions (rtx_insn
*first
)
1546 bool strictly_local
= false;
1548 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1549 arg
= DECL_CHAIN (arg
))
1551 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1553 machine_mode mode1
, mode2
, mode3
, mode4
;
1555 /* Only continue if the incoming argument is in a register. */
1559 /* Determine, if possible, whether all call sites of the current
1560 function lie within the current compilation unit. (This does
1561 take into account the exporting of a function via taking its
1562 address, and so forth.) */
1563 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1565 /* The mode and signedness of the argument before any promotions happen
1566 (equal to the mode of the pseudo holding it at that stage). */
1567 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1568 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1570 /* The mode and signedness of the argument after any source language and
1571 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1572 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1573 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1575 /* The mode and signedness of the argument as it is actually passed,
1576 see assign_parm_setup_reg in function.c. */
1577 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1578 TREE_TYPE (cfun
->decl
), 0);
1580 /* The mode of the register in which the argument is being passed. */
1581 mode4
= GET_MODE (reg
);
1583 /* Eliminate sign extensions in the callee when:
1584 (a) A mode promotion has occurred; */
1587 /* (b) The mode of the register is the same as the mode of
1588 the argument as it is passed; */
1591 /* (c) There's no language level extension; */
1594 /* (c.1) All callers are from the current compilation unit. If that's
1595 the case we don't have to rely on an ABI, we only have to know
1596 what we're generating right now, and we know that we will do the
1597 mode1 to mode2 promotion with the given sign. */
1598 else if (!strictly_local
)
1600 /* (c.2) The combination of the two promotions is useful. This is
1601 true when the signs match, or if the first promotion is unsigned.
1602 In the later case, (sign_extend (zero_extend x)) is the same as
1603 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1609 /* Record that the value was promoted from mode1 to mode3,
1610 so that any sign extension at the head of the current
1611 function may be eliminated. */
1612 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1613 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1614 record_value_for_reg (reg
, first
, x
);
1618 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1619 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1620 because some machines (maybe most) will actually do the sign-extension and
1621 this is the conservative approach.
1623 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1627 sign_extend_short_imm (rtx src
, machine_mode mode
, unsigned int prec
)
1629 if (GET_MODE_PRECISION (mode
) < prec
1630 && CONST_INT_P (src
)
1632 && val_signbit_known_set_p (mode
, INTVAL (src
)))
1633 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (mode
));
1638 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1642 update_rsp_from_reg_equal (reg_stat_type
*rsp
, rtx_insn
*insn
, const_rtx set
,
1645 rtx reg_equal_note
= insn
? find_reg_equal_equiv_note (insn
) : NULL_RTX
;
1646 unsigned HOST_WIDE_INT bits
= 0;
1647 rtx reg_equal
= NULL
, src
= SET_SRC (set
);
1648 unsigned int num
= 0;
1651 reg_equal
= XEXP (reg_equal_note
, 0);
1653 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
1655 src
= sign_extend_short_imm (src
, GET_MODE (x
), BITS_PER_WORD
);
1657 reg_equal
= sign_extend_short_imm (reg_equal
, GET_MODE (x
), BITS_PER_WORD
);
1660 /* Don't call nonzero_bits if it cannot change anything. */
1661 if (rsp
->nonzero_bits
!= HOST_WIDE_INT_M1U
)
1663 bits
= nonzero_bits (src
, nonzero_bits_mode
);
1664 if (reg_equal
&& bits
)
1665 bits
&= nonzero_bits (reg_equal
, nonzero_bits_mode
);
1666 rsp
->nonzero_bits
|= bits
;
1669 /* Don't call num_sign_bit_copies if it cannot change anything. */
1670 if (rsp
->sign_bit_copies
!= 1)
1672 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1673 if (reg_equal
&& num
!= GET_MODE_PRECISION (GET_MODE (x
)))
1675 unsigned int numeq
= num_sign_bit_copies (reg_equal
, GET_MODE (x
));
1676 if (num
== 0 || numeq
> num
)
1679 if (rsp
->sign_bit_copies
== 0 || num
< rsp
->sign_bit_copies
)
1680 rsp
->sign_bit_copies
= num
;
1684 /* Called via note_stores. If X is a pseudo that is narrower than
1685 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1687 If we are setting only a portion of X and we can't figure out what
1688 portion, assume all bits will be used since we don't know what will
1691 Similarly, set how many bits of X are known to be copies of the sign bit
1692 at all locations in the function. This is the smallest number implied
1696 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1698 rtx_insn
*insn
= (rtx_insn
*) data
;
1701 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1702 /* If this register is undefined at the start of the file, we can't
1703 say what its contents were. */
1704 && ! REGNO_REG_SET_P
1705 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1706 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
1708 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1710 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1712 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1713 rsp
->sign_bit_copies
= 1;
1717 /* If this register is being initialized using itself, and the
1718 register is uninitialized in this basic block, and there are
1719 no LOG_LINKS which set the register, then part of the
1720 register is uninitialized. In that case we can't assume
1721 anything about the number of nonzero bits.
1723 ??? We could do better if we checked this in
1724 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1725 could avoid making assumptions about the insn which initially
1726 sets the register, while still using the information in other
1727 insns. We would have to be careful to check every insn
1728 involved in the combination. */
1731 && reg_referenced_p (x
, PATTERN (insn
))
1732 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1735 struct insn_link
*link
;
1737 FOR_EACH_LOG_LINK (link
, insn
)
1738 if (dead_or_set_p (link
->insn
, x
))
1742 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1743 rsp
->sign_bit_copies
= 1;
1748 /* If this is a complex assignment, see if we can convert it into a
1749 simple assignment. */
1750 set
= expand_field_assignment (set
);
1752 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1753 set what we know about X. */
1755 if (SET_DEST (set
) == x
1756 || (paradoxical_subreg_p (SET_DEST (set
))
1757 && SUBREG_REG (SET_DEST (set
)) == x
))
1758 update_rsp_from_reg_equal (rsp
, insn
, set
, x
);
1761 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1762 rsp
->sign_bit_copies
= 1;
1767 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1768 optionally insns that were previously combined into I3 or that will be
1769 combined into the merger of INSN and I3. The order is PRED, PRED2,
1770 INSN, SUCC, SUCC2, I3.
1772 Return 0 if the combination is not allowed for any reason.
1774 If the combination is allowed, *PDEST will be set to the single
1775 destination of INSN and *PSRC to the single source, and this function
1779 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1780 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1781 rtx
*pdest
, rtx
*psrc
)
1788 bool all_adjacent
= true;
1789 int (*is_volatile_p
) (const_rtx
);
1795 if (next_active_insn (succ2
) != i3
)
1796 all_adjacent
= false;
1797 if (next_active_insn (succ
) != succ2
)
1798 all_adjacent
= false;
1800 else if (next_active_insn (succ
) != i3
)
1801 all_adjacent
= false;
1802 if (next_active_insn (insn
) != succ
)
1803 all_adjacent
= false;
1805 else if (next_active_insn (insn
) != i3
)
1806 all_adjacent
= false;
1808 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1809 or a PARALLEL consisting of such a SET and CLOBBERs.
1811 If INSN has CLOBBER parallel parts, ignore them for our processing.
1812 By definition, these happen during the execution of the insn. When it
1813 is merged with another insn, all bets are off. If they are, in fact,
1814 needed and aren't also supplied in I3, they may be added by
1815 recog_for_combine. Otherwise, it won't match.
1817 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1820 Get the source and destination of INSN. If more than one, can't
1823 if (GET_CODE (PATTERN (insn
)) == SET
)
1824 set
= PATTERN (insn
);
1825 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1826 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1828 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1830 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1832 switch (GET_CODE (elt
))
1834 /* This is important to combine floating point insns
1835 for the SH4 port. */
1837 /* Combining an isolated USE doesn't make sense.
1838 We depend here on combinable_i3pat to reject them. */
1839 /* The code below this loop only verifies that the inputs of
1840 the SET in INSN do not change. We call reg_set_between_p
1841 to verify that the REG in the USE does not change between
1843 If the USE in INSN was for a pseudo register, the matching
1844 insn pattern will likely match any register; combining this
1845 with any other USE would only be safe if we knew that the
1846 used registers have identical values, or if there was
1847 something to tell them apart, e.g. different modes. For
1848 now, we forgo such complicated tests and simply disallow
1849 combining of USES of pseudo registers with any other USE. */
1850 if (REG_P (XEXP (elt
, 0))
1851 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1853 rtx i3pat
= PATTERN (i3
);
1854 int i
= XVECLEN (i3pat
, 0) - 1;
1855 unsigned int regno
= REGNO (XEXP (elt
, 0));
1859 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1861 if (GET_CODE (i3elt
) == USE
1862 && REG_P (XEXP (i3elt
, 0))
1863 && (REGNO (XEXP (i3elt
, 0)) == regno
1864 ? reg_set_between_p (XEXP (elt
, 0),
1865 PREV_INSN (insn
), i3
)
1866 : regno
>= FIRST_PSEUDO_REGISTER
))
1873 /* We can ignore CLOBBERs. */
1878 /* Ignore SETs whose result isn't used but not those that
1879 have side-effects. */
1880 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1881 && insn_nothrow_p (insn
)
1882 && !side_effects_p (elt
))
1885 /* If we have already found a SET, this is a second one and
1886 so we cannot combine with this insn. */
1894 /* Anything else means we can't combine. */
1900 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1901 so don't do anything with it. */
1902 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1911 /* The simplification in expand_field_assignment may call back to
1912 get_last_value, so set safe guard here. */
1913 subst_low_luid
= DF_INSN_LUID (insn
);
1915 set
= expand_field_assignment (set
);
1916 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1918 /* Do not eliminate user-specified register if it is in an
1919 asm input because we may break the register asm usage defined
1920 in GCC manual if allow to do so.
1921 Be aware that this may cover more cases than we expect but this
1922 should be harmless. */
1923 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1924 && extract_asm_operands (PATTERN (i3
)))
1927 /* Don't eliminate a store in the stack pointer. */
1928 if (dest
== stack_pointer_rtx
1929 /* Don't combine with an insn that sets a register to itself if it has
1930 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1931 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1932 /* Can't merge an ASM_OPERANDS. */
1933 || GET_CODE (src
) == ASM_OPERANDS
1934 /* Can't merge a function call. */
1935 || GET_CODE (src
) == CALL
1936 /* Don't eliminate a function call argument. */
1938 && (find_reg_fusage (i3
, USE
, dest
)
1940 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1941 && global_regs
[REGNO (dest
)])))
1942 /* Don't substitute into an incremented register. */
1943 || FIND_REG_INC_NOTE (i3
, dest
)
1944 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1945 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1946 /* Don't substitute into a non-local goto, this confuses CFG. */
1947 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1948 /* Make sure that DEST is not used after SUCC but before I3. */
1951 && (reg_used_between_p (dest
, succ2
, i3
)
1952 || reg_used_between_p (dest
, succ
, succ2
)))
1953 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))))
1954 /* Make sure that the value that is to be substituted for the register
1955 does not use any registers whose values alter in between. However,
1956 If the insns are adjacent, a use can't cross a set even though we
1957 think it might (this can happen for a sequence of insns each setting
1958 the same destination; last_set of that register might point to
1959 a NOTE). If INSN has a REG_EQUIV note, the register is always
1960 equivalent to the memory so the substitution is valid even if there
1961 are intervening stores. Also, don't move a volatile asm or
1962 UNSPEC_VOLATILE across any other insns. */
1965 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1966 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1967 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1968 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1969 /* Don't combine across a CALL_INSN, because that would possibly
1970 change whether the life span of some REGs crosses calls or not,
1971 and it is a pain to update that information.
1972 Exception: if source is a constant, moving it later can't hurt.
1973 Accept that as a special case. */
1974 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1977 /* DEST must either be a REG or CC0. */
1980 /* If register alignment is being enforced for multi-word items in all
1981 cases except for parameters, it is possible to have a register copy
1982 insn referencing a hard register that is not allowed to contain the
1983 mode being copied and which would not be valid as an operand of most
1984 insns. Eliminate this problem by not combining with such an insn.
1986 Also, on some machines we don't want to extend the life of a hard
1990 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
1991 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
1992 /* Don't extend the life of a hard register unless it is
1993 user variable (if we have few registers) or it can't
1994 fit into the desired register (meaning something special
1996 Also avoid substituting a return register into I3, because
1997 reload can't handle a conflict with constraints of other
1999 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
2000 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
2003 else if (GET_CODE (dest
) != CC0
)
2007 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2008 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2009 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2011 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2013 /* If the clobber represents an earlyclobber operand, we must not
2014 substitute an expression containing the clobbered register.
2015 As we do not analyze the constraint strings here, we have to
2016 make the conservative assumption. However, if the register is
2017 a fixed hard reg, the clobber cannot represent any operand;
2018 we leave it up to the machine description to either accept or
2019 reject use-and-clobber patterns. */
2021 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2022 || !fixed_regs
[REGNO (reg
)])
2023 if (reg_overlap_mentioned_p (reg
, src
))
2027 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2028 or not), reject, unless nothing volatile comes between it and I3 */
2030 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2032 /* Make sure neither succ nor succ2 contains a volatile reference. */
2033 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2035 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2037 /* We'll check insns between INSN and I3 below. */
2040 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2041 to be an explicit register variable, and was chosen for a reason. */
2043 if (GET_CODE (src
) == ASM_OPERANDS
2044 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2047 /* If INSN contains volatile references (specifically volatile MEMs),
2048 we cannot combine across any other volatile references.
2049 Even if INSN doesn't contain volatile references, any intervening
2050 volatile insn might affect machine state. */
2052 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2056 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2057 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2060 /* If INSN contains an autoincrement or autodecrement, make sure that
2061 register is not used between there and I3, and not already used in
2062 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2063 Also insist that I3 not be a jump; if it were one
2064 and the incremented register were spilled, we would lose. */
2067 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2068 if (REG_NOTE_KIND (link
) == REG_INC
2070 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2071 || (pred
!= NULL_RTX
2072 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2073 || (pred2
!= NULL_RTX
2074 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2075 || (succ
!= NULL_RTX
2076 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2077 || (succ2
!= NULL_RTX
2078 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2079 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2082 /* Don't combine an insn that follows a CC0-setting insn.
2083 An insn that uses CC0 must not be separated from the one that sets it.
2084 We do, however, allow I2 to follow a CC0-setting insn if that insn
2085 is passed as I1; in that case it will be deleted also.
2086 We also allow combining in this case if all the insns are adjacent
2087 because that would leave the two CC0 insns adjacent as well.
2088 It would be more logical to test whether CC0 occurs inside I1 or I2,
2089 but that would be much slower, and this ought to be equivalent. */
2093 p
= prev_nonnote_insn (insn
);
2094 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2099 /* If we get here, we have passed all the tests and the combination is
2108 /* LOC is the location within I3 that contains its pattern or the component
2109 of a PARALLEL of the pattern. We validate that it is valid for combining.
2111 One problem is if I3 modifies its output, as opposed to replacing it
2112 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2113 doing so would produce an insn that is not equivalent to the original insns.
2117 (set (reg:DI 101) (reg:DI 100))
2118 (set (subreg:SI (reg:DI 101) 0) <foo>)
2120 This is NOT equivalent to:
2122 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2123 (set (reg:DI 101) (reg:DI 100))])
2125 Not only does this modify 100 (in which case it might still be valid
2126 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2128 We can also run into a problem if I2 sets a register that I1
2129 uses and I1 gets directly substituted into I3 (not via I2). In that
2130 case, we would be getting the wrong value of I2DEST into I3, so we
2131 must reject the combination. This case occurs when I2 and I1 both
2132 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2133 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2134 of a SET must prevent combination from occurring. The same situation
2135 can occur for I0, in which case I0_NOT_IN_SRC is set.
2137 Before doing the above check, we first try to expand a field assignment
2138 into a set of logical operations.
2140 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2141 we place a register that is both set and used within I3. If more than one
2142 such register is detected, we fail.
2144 Return 1 if the combination is valid, zero otherwise. */
2147 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2148 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2152 if (GET_CODE (x
) == SET
)
2155 rtx dest
= SET_DEST (set
);
2156 rtx src
= SET_SRC (set
);
2157 rtx inner_dest
= dest
;
2160 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2161 || GET_CODE (inner_dest
) == SUBREG
2162 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2163 inner_dest
= XEXP (inner_dest
, 0);
2165 /* Check for the case where I3 modifies its output, as discussed
2166 above. We don't want to prevent pseudos from being combined
2167 into the address of a MEM, so only prevent the combination if
2168 i1 or i2 set the same MEM. */
2169 if ((inner_dest
!= dest
&&
2170 (!MEM_P (inner_dest
)
2171 || rtx_equal_p (i2dest
, inner_dest
)
2172 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2173 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2174 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2175 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2176 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2178 /* This is the same test done in can_combine_p except we can't test
2179 all_adjacent; we don't have to, since this instruction will stay
2180 in place, thus we are not considering increasing the lifetime of
2183 Also, if this insn sets a function argument, combining it with
2184 something that might need a spill could clobber a previous
2185 function argument; the all_adjacent test in can_combine_p also
2186 checks this; here, we do a more specific test for this case. */
2188 || (REG_P (inner_dest
)
2189 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2190 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2191 GET_MODE (inner_dest
))))
2192 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2193 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2196 /* If DEST is used in I3, it is being killed in this insn, so
2197 record that for later. We have to consider paradoxical
2198 subregs here, since they kill the whole register, but we
2199 ignore partial subregs, STRICT_LOW_PART, etc.
2200 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2201 STACK_POINTER_REGNUM, since these are always considered to be
2202 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2204 if (GET_CODE (subdest
) == SUBREG
2205 && (GET_MODE_SIZE (GET_MODE (subdest
))
2206 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2207 subdest
= SUBREG_REG (subdest
);
2210 && reg_referenced_p (subdest
, PATTERN (i3
))
2211 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2212 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2213 || REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
)
2214 && (FRAME_POINTER_REGNUM
== ARG_POINTER_REGNUM
2215 || (REGNO (subdest
) != ARG_POINTER_REGNUM
2216 || ! fixed_regs
[REGNO (subdest
)]))
2217 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2219 if (*pi3dest_killed
)
2222 *pi3dest_killed
= subdest
;
2226 else if (GET_CODE (x
) == PARALLEL
)
2230 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2231 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2232 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2239 /* Return 1 if X is an arithmetic expression that contains a multiplication
2240 and division. We don't count multiplications by powers of two here. */
2243 contains_muldiv (rtx x
)
2245 switch (GET_CODE (x
))
2247 case MOD
: case DIV
: case UMOD
: case UDIV
:
2251 return ! (CONST_INT_P (XEXP (x
, 1))
2252 && pow2p_hwi (UINTVAL (XEXP (x
, 1))));
2255 return contains_muldiv (XEXP (x
, 0))
2256 || contains_muldiv (XEXP (x
, 1));
2259 return contains_muldiv (XEXP (x
, 0));
2265 /* Determine whether INSN can be used in a combination. Return nonzero if
2266 not. This is used in try_combine to detect early some cases where we
2267 can't perform combinations. */
2270 cant_combine_insn_p (rtx_insn
*insn
)
2275 /* If this isn't really an insn, we can't do anything.
2276 This can occur when flow deletes an insn that it has merged into an
2277 auto-increment address. */
2278 if (! INSN_P (insn
))
2281 /* Never combine loads and stores involving hard regs that are likely
2282 to be spilled. The register allocator can usually handle such
2283 reg-reg moves by tying. If we allow the combiner to make
2284 substitutions of likely-spilled regs, reload might die.
2285 As an exception, we allow combinations involving fixed regs; these are
2286 not available to the register allocator so there's no risk involved. */
2288 set
= single_set (insn
);
2291 src
= SET_SRC (set
);
2292 dest
= SET_DEST (set
);
2293 if (GET_CODE (src
) == SUBREG
)
2294 src
= SUBREG_REG (src
);
2295 if (GET_CODE (dest
) == SUBREG
)
2296 dest
= SUBREG_REG (dest
);
2297 if (REG_P (src
) && REG_P (dest
)
2298 && ((HARD_REGISTER_P (src
)
2299 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2300 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2301 || (HARD_REGISTER_P (dest
)
2302 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2303 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2309 struct likely_spilled_retval_info
2311 unsigned regno
, nregs
;
2315 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2316 hard registers that are known to be written to / clobbered in full. */
2318 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2320 struct likely_spilled_retval_info
*const info
=
2321 (struct likely_spilled_retval_info
*) data
;
2322 unsigned regno
, nregs
;
2325 if (!REG_P (XEXP (set
, 0)))
2328 if (regno
>= info
->regno
+ info
->nregs
)
2330 nregs
= REG_NREGS (x
);
2331 if (regno
+ nregs
<= info
->regno
)
2333 new_mask
= (2U << (nregs
- 1)) - 1;
2334 if (regno
< info
->regno
)
2335 new_mask
>>= info
->regno
- regno
;
2337 new_mask
<<= regno
- info
->regno
;
2338 info
->mask
&= ~new_mask
;
2341 /* Return nonzero iff part of the return value is live during INSN, and
2342 it is likely spilled. This can happen when more than one insn is needed
2343 to copy the return value, e.g. when we consider to combine into the
2344 second copy insn for a complex value. */
2347 likely_spilled_retval_p (rtx_insn
*insn
)
2349 rtx_insn
*use
= BB_END (this_basic_block
);
2352 unsigned regno
, nregs
;
2353 /* We assume here that no machine mode needs more than
2354 32 hard registers when the value overlaps with a register
2355 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2357 struct likely_spilled_retval_info info
;
2359 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2361 reg
= XEXP (PATTERN (use
), 0);
2362 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2364 regno
= REGNO (reg
);
2365 nregs
= REG_NREGS (reg
);
2368 mask
= (2U << (nregs
- 1)) - 1;
2370 /* Disregard parts of the return value that are set later. */
2374 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2376 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2379 /* Check if any of the (probably) live return value registers is
2384 if ((mask
& 1 << nregs
)
2385 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2391 /* Adjust INSN after we made a change to its destination.
2393 Changing the destination can invalidate notes that say something about
2394 the results of the insn and a LOG_LINK pointing to the insn. */
2397 adjust_for_new_dest (rtx_insn
*insn
)
2399 /* For notes, be conservative and simply remove them. */
2400 remove_reg_equal_equiv_notes (insn
);
2402 /* The new insn will have a destination that was previously the destination
2403 of an insn just above it. Call distribute_links to make a LOG_LINK from
2404 the next use of that destination. */
2406 rtx set
= single_set (insn
);
2409 rtx reg
= SET_DEST (set
);
2411 while (GET_CODE (reg
) == ZERO_EXTRACT
2412 || GET_CODE (reg
) == STRICT_LOW_PART
2413 || GET_CODE (reg
) == SUBREG
)
2414 reg
= XEXP (reg
, 0);
2415 gcc_assert (REG_P (reg
));
2417 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2419 df_insn_rescan (insn
);
2422 /* Return TRUE if combine can reuse reg X in mode MODE.
2423 ADDED_SETS is nonzero if the original set is still required. */
2425 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2433 /* Allow hard registers if the new mode is legal, and occupies no more
2434 registers than the old mode. */
2435 if (regno
< FIRST_PSEUDO_REGISTER
)
2436 return (HARD_REGNO_MODE_OK (regno
, mode
)
2437 && REG_NREGS (x
) >= hard_regno_nregs
[regno
][mode
]);
2439 /* Or a pseudo that is only used once. */
2440 return (regno
< reg_n_sets_max
2441 && REG_N_SETS (regno
) == 1
2443 && !REG_USERVAR_P (x
));
2447 /* Check whether X, the destination of a set, refers to part of
2448 the register specified by REG. */
2451 reg_subword_p (rtx x
, rtx reg
)
2453 /* Check that reg is an integer mode register. */
2454 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2457 if (GET_CODE (x
) == STRICT_LOW_PART
2458 || GET_CODE (x
) == ZERO_EXTRACT
)
2461 return GET_CODE (x
) == SUBREG
2462 && SUBREG_REG (x
) == reg
2463 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2466 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2467 Note that the INSN should be deleted *after* removing dead edges, so
2468 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2469 but not for a (set (pc) (label_ref FOO)). */
2472 update_cfg_for_uncondjump (rtx_insn
*insn
)
2474 basic_block bb
= BLOCK_FOR_INSN (insn
);
2475 gcc_assert (BB_END (bb
) == insn
);
2477 purge_dead_edges (bb
);
2480 if (EDGE_COUNT (bb
->succs
) == 1)
2484 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2486 /* Remove barriers from the footer if there are any. */
2487 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2488 if (BARRIER_P (insn
))
2490 if (PREV_INSN (insn
))
2491 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2493 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2494 if (NEXT_INSN (insn
))
2495 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2497 else if (LABEL_P (insn
))
2502 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2503 by an arbitrary number of CLOBBERs. */
2505 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2507 if (GET_CODE (pat
) != PARALLEL
)
2510 int len
= XVECLEN (pat
, 0);
2515 for (i
= 0; i
< n
; i
++)
2516 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2517 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2519 for ( ; i
< len
; i
++)
2520 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
2521 || XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
2527 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2528 CLOBBERs), can be split into individual SETs in that order, without
2529 changing semantics. */
2531 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2533 if (!insn_nothrow_p (insn
))
2536 rtx pat
= PATTERN (insn
);
2539 for (i
= 0; i
< n
; i
++)
2541 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2544 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2546 for (j
= i
+ 1; j
< n
; j
++)
2547 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2554 /* Try to combine the insns I0, I1 and I2 into I3.
2555 Here I0, I1 and I2 appear earlier than I3.
2556 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2559 If we are combining more than two insns and the resulting insn is not
2560 recognized, try splitting it into two insns. If that happens, I2 and I3
2561 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2562 Otherwise, I0, I1 and I2 are pseudo-deleted.
2564 Return 0 if the combination does not work. Then nothing is changed.
2565 If we did the combination, return the insn at which combine should
2568 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2569 new direct jump instruction.
2571 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2572 been I3 passed to an earlier try_combine within the same basic
2576 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2577 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2579 /* New patterns for I3 and I2, respectively. */
2580 rtx newpat
, newi2pat
= 0;
2581 rtvec newpat_vec_with_clobbers
= 0;
2582 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2583 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2585 int added_sets_0
, added_sets_1
, added_sets_2
;
2586 /* Total number of SETs to put into I3. */
2588 /* Nonzero if I2's or I1's body now appears in I3. */
2589 int i2_is_used
= 0, i1_is_used
= 0;
2590 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2591 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2592 /* Contains I3 if the destination of I3 is used in its source, which means
2593 that the old life of I3 is being killed. If that usage is placed into
2594 I2 and not in I3, a REG_DEAD note must be made. */
2595 rtx i3dest_killed
= 0;
2596 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2597 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2598 /* Copy of SET_SRC of I1 and I0, if needed. */
2599 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2600 /* Set if I2DEST was reused as a scratch register. */
2601 bool i2scratch
= false;
2602 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2603 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2604 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2605 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2606 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2607 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2608 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2609 /* Notes that must be added to REG_NOTES in I3 and I2. */
2610 rtx new_i3_notes
, new_i2_notes
;
2611 /* Notes that we substituted I3 into I2 instead of the normal case. */
2612 int i3_subst_into_i2
= 0;
2613 /* Notes that I1, I2 or I3 is a MULT operation. */
2616 int changed_i3_dest
= 0;
2619 rtx_insn
*temp_insn
;
2621 struct insn_link
*link
;
2623 rtx new_other_notes
;
2626 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2628 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2631 /* Only try four-insn combinations when there's high likelihood of
2632 success. Look for simple insns, such as loads of constants or
2633 binary operations involving a constant. */
2641 if (!flag_expensive_optimizations
)
2644 for (i
= 0; i
< 4; i
++)
2646 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2647 rtx set
= single_set (insn
);
2651 src
= SET_SRC (set
);
2652 if (CONSTANT_P (src
))
2657 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2659 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2660 || GET_CODE (src
) == LSHIFTRT
)
2664 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2665 are likely manipulating its value. Ideally we'll be able to combine
2666 all four insns into a bitfield insertion of some kind.
2668 Note the source in I0 might be inside a sign/zero extension and the
2669 memory modes in I0 and I3 might be different. So extract the address
2670 from the destination of I3 and search for it in the source of I0.
2672 In the event that there's a match but the source/dest do not actually
2673 refer to the same memory, the worst that happens is we try some
2674 combinations that we wouldn't have otherwise. */
2675 if ((set0
= single_set (i0
))
2676 /* Ensure the source of SET0 is a MEM, possibly buried inside
2678 && (GET_CODE (SET_SRC (set0
)) == MEM
2679 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2680 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2681 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2682 && (set3
= single_set (i3
))
2683 /* Ensure the destination of SET3 is a MEM. */
2684 && GET_CODE (SET_DEST (set3
)) == MEM
2685 /* Would it be better to extract the base address for the MEM
2686 in SET3 and look for that? I don't have cases where it matters
2687 but I could envision such cases. */
2688 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2691 if (ngood
< 2 && nshift
< 2)
2695 /* Exit early if one of the insns involved can't be used for
2698 || (i1
&& CALL_P (i1
))
2699 || (i0
&& CALL_P (i0
))
2700 || cant_combine_insn_p (i3
)
2701 || cant_combine_insn_p (i2
)
2702 || (i1
&& cant_combine_insn_p (i1
))
2703 || (i0
&& cant_combine_insn_p (i0
))
2704 || likely_spilled_retval_p (i3
))
2708 undobuf
.other_insn
= 0;
2710 /* Reset the hard register usage information. */
2711 CLEAR_HARD_REG_SET (newpat_used_regs
);
2713 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2716 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2717 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2719 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2720 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2722 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2723 INSN_UID (i2
), INSN_UID (i3
));
2726 /* If multiple insns feed into one of I2 or I3, they can be in any
2727 order. To simplify the code below, reorder them in sequence. */
2728 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2730 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2732 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2735 added_links_insn
= 0;
2737 /* First check for one important special case that the code below will
2738 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2739 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2740 we may be able to replace that destination with the destination of I3.
2741 This occurs in the common code where we compute both a quotient and
2742 remainder into a structure, in which case we want to do the computation
2743 directly into the structure to avoid register-register copies.
2745 Note that this case handles both multiple sets in I2 and also cases
2746 where I2 has a number of CLOBBERs inside the PARALLEL.
2748 We make very conservative checks below and only try to handle the
2749 most common cases of this. For example, we only handle the case
2750 where I2 and I3 are adjacent to avoid making difficult register
2753 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2754 && REG_P (SET_SRC (PATTERN (i3
)))
2755 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2756 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2757 && GET_CODE (PATTERN (i2
)) == PARALLEL
2758 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2759 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2760 below would need to check what is inside (and reg_overlap_mentioned_p
2761 doesn't support those codes anyway). Don't allow those destinations;
2762 the resulting insn isn't likely to be recognized anyway. */
2763 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2764 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2765 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2766 SET_DEST (PATTERN (i3
)))
2767 && next_active_insn (i2
) == i3
)
2769 rtx p2
= PATTERN (i2
);
2771 /* Make sure that the destination of I3,
2772 which we are going to substitute into one output of I2,
2773 is not used within another output of I2. We must avoid making this:
2774 (parallel [(set (mem (reg 69)) ...)
2775 (set (reg 69) ...)])
2776 which is not well-defined as to order of actions.
2777 (Besides, reload can't handle output reloads for this.)
2779 The problem can also happen if the dest of I3 is a memory ref,
2780 if another dest in I2 is an indirect memory ref. */
2781 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2782 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2783 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2784 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2785 SET_DEST (XVECEXP (p2
, 0, i
))))
2788 /* Make sure this PARALLEL is not an asm. We do not allow combining
2789 that usually (see can_combine_p), so do not here either. */
2790 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2791 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2792 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2795 if (i
== XVECLEN (p2
, 0))
2796 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2797 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2798 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2803 subst_low_luid
= DF_INSN_LUID (i2
);
2805 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2806 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2807 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2808 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2810 /* Replace the dest in I2 with our dest and make the resulting
2811 insn the new pattern for I3. Then skip to where we validate
2812 the pattern. Everything was set up above. */
2813 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2815 i3_subst_into_i2
= 1;
2816 goto validate_replacement
;
2820 /* If I2 is setting a pseudo to a constant and I3 is setting some
2821 sub-part of it to another constant, merge them by making a new
2824 && (temp_expr
= single_set (i2
)) != 0
2825 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2826 && GET_CODE (PATTERN (i3
)) == SET
2827 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2828 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2830 rtx dest
= SET_DEST (PATTERN (i3
));
2834 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2836 if (CONST_INT_P (XEXP (dest
, 1))
2837 && CONST_INT_P (XEXP (dest
, 2)))
2839 width
= INTVAL (XEXP (dest
, 1));
2840 offset
= INTVAL (XEXP (dest
, 2));
2841 dest
= XEXP (dest
, 0);
2842 if (BITS_BIG_ENDIAN
)
2843 offset
= GET_MODE_PRECISION (GET_MODE (dest
)) - width
- offset
;
2848 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2849 dest
= XEXP (dest
, 0);
2850 width
= GET_MODE_PRECISION (GET_MODE (dest
));
2856 /* If this is the low part, we're done. */
2857 if (subreg_lowpart_p (dest
))
2859 /* Handle the case where inner is twice the size of outer. */
2860 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr
)))
2861 == 2 * GET_MODE_PRECISION (GET_MODE (dest
)))
2862 offset
+= GET_MODE_PRECISION (GET_MODE (dest
));
2863 /* Otherwise give up for now. */
2870 rtx inner
= SET_SRC (PATTERN (i3
));
2871 rtx outer
= SET_SRC (temp_expr
);
2874 = wi::insert (rtx_mode_t (outer
, GET_MODE (SET_DEST (temp_expr
))),
2875 rtx_mode_t (inner
, GET_MODE (dest
)),
2880 subst_low_luid
= DF_INSN_LUID (i2
);
2881 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2882 i2dest
= SET_DEST (temp_expr
);
2883 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2885 /* Replace the source in I2 with the new constant and make the
2886 resulting insn the new pattern for I3. Then skip to where we
2887 validate the pattern. Everything was set up above. */
2888 SUBST (SET_SRC (temp_expr
),
2889 immed_wide_int_const (o
, GET_MODE (SET_DEST (temp_expr
))));
2891 newpat
= PATTERN (i2
);
2893 /* The dest of I3 has been replaced with the dest of I2. */
2894 changed_i3_dest
= 1;
2895 goto validate_replacement
;
2899 /* If we have no I1 and I2 looks like:
2900 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2902 make up a dummy I1 that is
2905 (set (reg:CC X) (compare:CC Y (const_int 0)))
2907 (We can ignore any trailing CLOBBERs.)
2909 This undoes a previous combination and allows us to match a branch-and-
2912 if (!HAVE_cc0
&& i1
== 0
2913 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2914 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2916 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2917 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2918 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2919 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2920 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2921 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2923 /* We make I1 with the same INSN_UID as I2. This gives it
2924 the same DF_INSN_LUID for value tracking. Our fake I1 will
2925 never appear in the insn stream so giving it the same INSN_UID
2926 as I2 will not cause a problem. */
2928 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2929 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2931 INSN_UID (i1
) = INSN_UID (i2
);
2933 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2934 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2935 SET_DEST (PATTERN (i1
)));
2936 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2937 SUBST_LINK (LOG_LINKS (i2
),
2938 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2941 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2942 make those two SETs separate I1 and I2 insns, and make an I0 that is
2944 if (!HAVE_cc0
&& i0
== 0
2945 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2946 && can_split_parallel_of_n_reg_sets (i2
, 2)
2947 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2948 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2950 /* If there is no I1, there is no I0 either. */
2953 /* We make I1 with the same INSN_UID as I2. This gives it
2954 the same DF_INSN_LUID for value tracking. Our fake I1 will
2955 never appear in the insn stream so giving it the same INSN_UID
2956 as I2 will not cause a problem. */
2958 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2959 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2961 INSN_UID (i1
) = INSN_UID (i2
);
2963 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2966 /* Verify that I2 and I1 are valid for combining. */
2967 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
2968 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
2970 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
2977 /* Record whether I2DEST is used in I2SRC and similarly for the other
2978 cases. Knowing this will help in register status updating below. */
2979 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
2980 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
2981 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
2982 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
2983 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
2984 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
2985 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2986 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
2987 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
2989 /* For the earlier insns, determine which of the subsequent ones they
2991 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
2992 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
2993 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
2994 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
2995 && reg_overlap_mentioned_p (i0dest
, i2src
))));
2997 /* Ensure that I3's pattern can be the destination of combines. */
2998 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
2999 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3000 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3001 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3008 /* See if any of the insns is a MULT operation. Unless one is, we will
3009 reject a combination that is, since it must be slower. Be conservative
3011 if (GET_CODE (i2src
) == MULT
3012 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3013 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3014 || (GET_CODE (PATTERN (i3
)) == SET
3015 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3018 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3019 We used to do this EXCEPT in one case: I3 has a post-inc in an
3020 output operand. However, that exception can give rise to insns like
3022 which is a famous insn on the PDP-11 where the value of r3 used as the
3023 source was model-dependent. Avoid this sort of thing. */
3026 if (!(GET_CODE (PATTERN (i3
)) == SET
3027 && REG_P (SET_SRC (PATTERN (i3
)))
3028 && MEM_P (SET_DEST (PATTERN (i3
)))
3029 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3030 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3031 /* It's not the exception. */
3036 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3037 if (REG_NOTE_KIND (link
) == REG_INC
3038 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3040 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3047 /* See if the SETs in I1 or I2 need to be kept around in the merged
3048 instruction: whenever the value set there is still needed past I3.
3049 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3051 For the SET in I1, we have two cases: if I1 and I2 independently feed
3052 into I3, the set in I1 needs to be kept around unless I1DEST dies
3053 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3054 in I1 needs to be kept around unless I1DEST dies or is set in either
3055 I2 or I3. The same considerations apply to I0. */
3057 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3060 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3061 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3066 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3067 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3068 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3069 && dead_or_set_p (i2
, i0dest
)));
3073 /* We are about to copy insns for the case where they need to be kept
3074 around. Check that they can be copied in the merged instruction. */
3076 if (targetm
.cannot_copy_insn_p
3077 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3078 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3079 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3085 /* If the set in I2 needs to be kept around, we must make a copy of
3086 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3087 PATTERN (I2), we are only substituting for the original I1DEST, not into
3088 an already-substituted copy. This also prevents making self-referential
3089 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3094 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3095 i2pat
= gen_rtx_SET (i2dest
, copy_rtx (i2src
));
3097 i2pat
= copy_rtx (PATTERN (i2
));
3102 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3103 i1pat
= gen_rtx_SET (i1dest
, copy_rtx (i1src
));
3105 i1pat
= copy_rtx (PATTERN (i1
));
3110 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3111 i0pat
= gen_rtx_SET (i0dest
, copy_rtx (i0src
));
3113 i0pat
= copy_rtx (PATTERN (i0
));
3118 /* Substitute in the latest insn for the regs set by the earlier ones. */
3120 maxreg
= max_reg_num ();
3124 /* Many machines that don't use CC0 have insns that can both perform an
3125 arithmetic operation and set the condition code. These operations will
3126 be represented as a PARALLEL with the first element of the vector
3127 being a COMPARE of an arithmetic operation with the constant zero.
3128 The second element of the vector will set some pseudo to the result
3129 of the same arithmetic operation. If we simplify the COMPARE, we won't
3130 match such a pattern and so will generate an extra insn. Here we test
3131 for this case, where both the comparison and the operation result are
3132 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3133 I2SRC. Later we will make the PARALLEL that contains I2. */
3135 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3136 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3137 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3138 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3141 rtx
*cc_use_loc
= NULL
;
3142 rtx_insn
*cc_use_insn
= NULL
;
3143 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3144 machine_mode compare_mode
, orig_compare_mode
;
3145 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3147 newpat
= PATTERN (i3
);
3148 newpat_dest
= SET_DEST (newpat
);
3149 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3151 if (undobuf
.other_insn
== 0
3152 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3155 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3156 compare_code
= simplify_compare_const (compare_code
,
3157 GET_MODE (i2dest
), op0
, &op1
);
3158 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3161 /* Do the rest only if op1 is const0_rtx, which may be the
3162 result of simplification. */
3163 if (op1
== const0_rtx
)
3165 /* If a single use of the CC is found, prepare to modify it
3166 when SELECT_CC_MODE returns a new CC-class mode, or when
3167 the above simplify_compare_const() returned a new comparison
3168 operator. undobuf.other_insn is assigned the CC use insn
3169 when modifying it. */
3172 #ifdef SELECT_CC_MODE
3173 machine_mode new_mode
3174 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3175 if (new_mode
!= orig_compare_mode
3176 && can_change_dest_mode (SET_DEST (newpat
),
3177 added_sets_2
, new_mode
))
3179 unsigned int regno
= REGNO (newpat_dest
);
3180 compare_mode
= new_mode
;
3181 if (regno
< FIRST_PSEUDO_REGISTER
)
3182 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3185 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3186 newpat_dest
= regno_reg_rtx
[regno
];
3190 /* Cases for modifying the CC-using comparison. */
3191 if (compare_code
!= orig_compare_code
3192 /* ??? Do we need to verify the zero rtx? */
3193 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3195 /* Replace cc_use_loc with entire new RTX. */
3197 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3198 newpat_dest
, const0_rtx
));
3199 undobuf
.other_insn
= cc_use_insn
;
3201 else if (compare_mode
!= orig_compare_mode
)
3203 /* Just replace the CC reg with a new mode. */
3204 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3205 undobuf
.other_insn
= cc_use_insn
;
3209 /* Now we modify the current newpat:
3210 First, SET_DEST(newpat) is updated if the CC mode has been
3211 altered. For targets without SELECT_CC_MODE, this should be
3213 if (compare_mode
!= orig_compare_mode
)
3214 SUBST (SET_DEST (newpat
), newpat_dest
);
3215 /* This is always done to propagate i2src into newpat. */
3216 SUBST (SET_SRC (newpat
),
3217 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3218 /* Create new version of i2pat if needed; the below PARALLEL
3219 creation needs this to work correctly. */
3220 if (! rtx_equal_p (i2src
, op0
))
3221 i2pat
= gen_rtx_SET (i2dest
, op0
);
3226 if (i2_is_used
== 0)
3228 /* It is possible that the source of I2 or I1 may be performing
3229 an unneeded operation, such as a ZERO_EXTEND of something
3230 that is known to have the high part zero. Handle that case
3231 by letting subst look at the inner insns.
3233 Another way to do this would be to have a function that tries
3234 to simplify a single insn instead of merging two or more
3235 insns. We don't do this because of the potential of infinite
3236 loops and because of the potential extra memory required.
3237 However, doing it the way we are is a bit of a kludge and
3238 doesn't catch all cases.
3240 But only do this if -fexpensive-optimizations since it slows
3241 things down and doesn't usually win.
3243 This is not done in the COMPARE case above because the
3244 unmodified I2PAT is used in the PARALLEL and so a pattern
3245 with a modified I2SRC would not match. */
3247 if (flag_expensive_optimizations
)
3249 /* Pass pc_rtx so no substitutions are done, just
3253 subst_low_luid
= DF_INSN_LUID (i1
);
3254 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3257 subst_low_luid
= DF_INSN_LUID (i2
);
3258 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3261 n_occurrences
= 0; /* `subst' counts here */
3262 subst_low_luid
= DF_INSN_LUID (i2
);
3264 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3265 copy of I2SRC each time we substitute it, in order to avoid creating
3266 self-referential RTL when we will be substituting I1SRC for I1DEST
3267 later. Likewise if I0 feeds into I2, either directly or indirectly
3268 through I1, and I0DEST is in I0SRC. */
3269 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3270 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3271 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3272 && i0dest_in_i0src
));
3275 /* Record whether I2's body now appears within I3's body. */
3276 i2_is_used
= n_occurrences
;
3279 /* If we already got a failure, don't try to do more. Otherwise, try to
3280 substitute I1 if we have it. */
3282 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3284 /* Check that an autoincrement side-effect on I1 has not been lost.
3285 This happens if I1DEST is mentioned in I2 and dies there, and
3286 has disappeared from the new pattern. */
3287 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3289 && dead_or_set_p (i2
, i1dest
)
3290 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3291 /* Before we can do this substitution, we must redo the test done
3292 above (see detailed comments there) that ensures I1DEST isn't
3293 mentioned in any SETs in NEWPAT that are field assignments. */
3294 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3302 subst_low_luid
= DF_INSN_LUID (i1
);
3304 /* If the following substitution will modify I1SRC, make a copy of it
3305 for the case where it is substituted for I1DEST in I2PAT later. */
3306 if (added_sets_2
&& i1_feeds_i2_n
)
3307 i1src_copy
= copy_rtx (i1src
);
3309 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3310 copy of I1SRC each time we substitute it, in order to avoid creating
3311 self-referential RTL when we will be substituting I0SRC for I0DEST
3313 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3314 i0_feeds_i1_n
&& i0dest_in_i0src
);
3317 /* Record whether I1's body now appears within I3's body. */
3318 i1_is_used
= n_occurrences
;
3321 /* Likewise for I0 if we have it. */
3323 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3325 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3326 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3327 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3328 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3329 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3336 /* If the following substitution will modify I0SRC, make a copy of it
3337 for the case where it is substituted for I0DEST in I1PAT later. */
3338 if (added_sets_1
&& i0_feeds_i1_n
)
3339 i0src_copy
= copy_rtx (i0src
);
3340 /* And a copy for I0DEST in I2PAT substitution. */
3341 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3342 || (i0_feeds_i2_n
)))
3343 i0src_copy2
= copy_rtx (i0src
);
3346 subst_low_luid
= DF_INSN_LUID (i0
);
3347 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3351 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3352 to count all the ways that I2SRC and I1SRC can be used. */
3353 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3354 && i2_is_used
+ added_sets_2
> 1)
3355 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3356 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3358 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3359 && (n_occurrences
+ added_sets_0
3360 + (added_sets_1
&& i0_feeds_i1_n
)
3361 + (added_sets_2
&& i0_feeds_i2_n
)
3363 /* Fail if we tried to make a new register. */
3364 || max_reg_num () != maxreg
3365 /* Fail if we couldn't do something and have a CLOBBER. */
3366 || GET_CODE (newpat
) == CLOBBER
3367 /* Fail if this new pattern is a MULT and we didn't have one before
3368 at the outer level. */
3369 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3376 /* If the actions of the earlier insns must be kept
3377 in addition to substituting them into the latest one,
3378 we must make a new PARALLEL for the latest insn
3379 to hold additional the SETs. */
3381 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3383 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3386 if (GET_CODE (newpat
) == PARALLEL
)
3388 rtvec old
= XVEC (newpat
, 0);
3389 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3390 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3391 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3392 sizeof (old
->elem
[0]) * old
->num_elem
);
3397 total_sets
= 1 + extra_sets
;
3398 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3399 XVECEXP (newpat
, 0, 0) = old
;
3403 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3409 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3411 XVECEXP (newpat
, 0, --total_sets
) = t
;
3417 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3418 i0_feeds_i1_n
&& i0dest_in_i0src
);
3419 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3420 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3422 XVECEXP (newpat
, 0, --total_sets
) = t
;
3426 validate_replacement
:
3428 /* Note which hard regs this insn has as inputs. */
3429 mark_used_regs_combine (newpat
);
3431 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3432 consider splitting this pattern, we might need these clobbers. */
3433 if (i1
&& GET_CODE (newpat
) == PARALLEL
3434 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3436 int len
= XVECLEN (newpat
, 0);
3438 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3439 for (i
= 0; i
< len
; i
++)
3440 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3443 /* We have recognized nothing yet. */
3444 insn_code_number
= -1;
3446 /* See if this is a PARALLEL of two SETs where one SET's destination is
3447 a register that is unused and this isn't marked as an instruction that
3448 might trap in an EH region. In that case, we just need the other SET.
3449 We prefer this over the PARALLEL.
3451 This can occur when simplifying a divmod insn. We *must* test for this
3452 case here because the code below that splits two independent SETs doesn't
3453 handle this case correctly when it updates the register status.
3455 It's pointless doing this if we originally had two sets, one from
3456 i3, and one from i2. Combining then splitting the parallel results
3457 in the original i2 again plus an invalid insn (which we delete).
3458 The net effect is only to move instructions around, which makes
3459 debug info less accurate. */
3461 if (!(added_sets_2
&& i1
== 0)
3462 && is_parallel_of_n_reg_sets (newpat
, 2)
3463 && asm_noperands (newpat
) < 0)
3465 rtx set0
= XVECEXP (newpat
, 0, 0);
3466 rtx set1
= XVECEXP (newpat
, 0, 1);
3467 rtx oldpat
= newpat
;
3469 if (((REG_P (SET_DEST (set1
))
3470 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3471 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3472 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3473 && insn_nothrow_p (i3
)
3474 && !side_effects_p (SET_SRC (set1
)))
3477 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3480 else if (((REG_P (SET_DEST (set0
))
3481 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3482 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3483 && find_reg_note (i3
, REG_UNUSED
,
3484 SUBREG_REG (SET_DEST (set0
)))))
3485 && insn_nothrow_p (i3
)
3486 && !side_effects_p (SET_SRC (set0
)))
3489 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3491 if (insn_code_number
>= 0)
3492 changed_i3_dest
= 1;
3495 if (insn_code_number
< 0)
3499 /* Is the result of combination a valid instruction? */
3500 if (insn_code_number
< 0)
3501 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3503 /* If we were combining three insns and the result is a simple SET
3504 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3505 insns. There are two ways to do this. It can be split using a
3506 machine-specific method (like when you have an addition of a large
3507 constant) or by combine in the function find_split_point. */
3509 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3510 && asm_noperands (newpat
) < 0)
3512 rtx parallel
, *split
;
3513 rtx_insn
*m_split_insn
;
3515 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3516 use I2DEST as a scratch register will help. In the latter case,
3517 convert I2DEST to the mode of the source of NEWPAT if we can. */
3519 m_split_insn
= combine_split_insns (newpat
, i3
);
3521 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3522 inputs of NEWPAT. */
3524 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3525 possible to try that as a scratch reg. This would require adding
3526 more code to make it work though. */
3528 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3530 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3532 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3533 (temporarily, until we are committed to this instruction
3534 combination) does not work: for example, any call to nonzero_bits
3535 on the register (from a splitter in the MD file, for example)
3536 will get the old information, which is invalid.
3538 Since nowadays we can create registers during combine just fine,
3539 we should just create a new one here, not reuse i2dest. */
3541 /* First try to split using the original register as a
3542 scratch register. */
3543 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3544 gen_rtvec (2, newpat
,
3545 gen_rtx_CLOBBER (VOIDmode
,
3547 m_split_insn
= combine_split_insns (parallel
, i3
);
3549 /* If that didn't work, try changing the mode of I2DEST if
3551 if (m_split_insn
== 0
3552 && new_mode
!= GET_MODE (i2dest
)
3553 && new_mode
!= VOIDmode
3554 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3556 machine_mode old_mode
= GET_MODE (i2dest
);
3559 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3560 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3563 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3564 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3567 parallel
= (gen_rtx_PARALLEL
3569 gen_rtvec (2, newpat
,
3570 gen_rtx_CLOBBER (VOIDmode
,
3572 m_split_insn
= combine_split_insns (parallel
, i3
);
3574 if (m_split_insn
== 0
3575 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3579 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3580 buf
= undobuf
.undos
;
3581 undobuf
.undos
= buf
->next
;
3582 buf
->next
= undobuf
.frees
;
3583 undobuf
.frees
= buf
;
3587 i2scratch
= m_split_insn
!= 0;
3590 /* If recog_for_combine has discarded clobbers, try to use them
3591 again for the split. */
3592 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3594 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3595 m_split_insn
= combine_split_insns (parallel
, i3
);
3598 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3600 rtx m_split_pat
= PATTERN (m_split_insn
);
3601 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3602 if (insn_code_number
>= 0)
3603 newpat
= m_split_pat
;
3605 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3606 && (next_nonnote_nondebug_insn (i2
) == i3
3607 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3610 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3611 newi2pat
= PATTERN (m_split_insn
);
3613 i3set
= single_set (NEXT_INSN (m_split_insn
));
3614 i2set
= single_set (m_split_insn
);
3616 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3618 /* If I2 or I3 has multiple SETs, we won't know how to track
3619 register status, so don't use these insns. If I2's destination
3620 is used between I2 and I3, we also can't use these insns. */
3622 if (i2_code_number
>= 0 && i2set
&& i3set
3623 && (next_nonnote_nondebug_insn (i2
) == i3
3624 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3625 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3627 if (insn_code_number
>= 0)
3630 /* It is possible that both insns now set the destination of I3.
3631 If so, we must show an extra use of it. */
3633 if (insn_code_number
>= 0)
3635 rtx new_i3_dest
= SET_DEST (i3set
);
3636 rtx new_i2_dest
= SET_DEST (i2set
);
3638 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3639 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3640 || GET_CODE (new_i3_dest
) == SUBREG
)
3641 new_i3_dest
= XEXP (new_i3_dest
, 0);
3643 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3644 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3645 || GET_CODE (new_i2_dest
) == SUBREG
)
3646 new_i2_dest
= XEXP (new_i2_dest
, 0);
3648 if (REG_P (new_i3_dest
)
3649 && REG_P (new_i2_dest
)
3650 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3651 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3652 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3656 /* If we can split it and use I2DEST, go ahead and see if that
3657 helps things be recognized. Verify that none of the registers
3658 are set between I2 and I3. */
3659 if (insn_code_number
< 0
3660 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3661 && (!HAVE_cc0
|| REG_P (i2dest
))
3662 /* We need I2DEST in the proper mode. If it is a hard register
3663 or the only use of a pseudo, we can change its mode.
3664 Make sure we don't change a hard register to have a mode that
3665 isn't valid for it, or change the number of registers. */
3666 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3667 || GET_MODE (*split
) == VOIDmode
3668 || can_change_dest_mode (i2dest
, added_sets_2
,
3670 && (next_nonnote_nondebug_insn (i2
) == i3
3671 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3672 /* We can't overwrite I2DEST if its value is still used by
3674 && ! reg_referenced_p (i2dest
, newpat
))
3676 rtx newdest
= i2dest
;
3677 enum rtx_code split_code
= GET_CODE (*split
);
3678 machine_mode split_mode
= GET_MODE (*split
);
3679 bool subst_done
= false;
3680 newi2pat
= NULL_RTX
;
3684 /* *SPLIT may be part of I2SRC, so make sure we have the
3685 original expression around for later debug processing.
3686 We should not need I2SRC any more in other cases. */
3687 if (MAY_HAVE_DEBUG_INSNS
)
3688 i2src
= copy_rtx (i2src
);
3692 /* Get NEWDEST as a register in the proper mode. We have already
3693 validated that we can do this. */
3694 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3696 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3697 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3700 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3701 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3705 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3706 an ASHIFT. This can occur if it was inside a PLUS and hence
3707 appeared to be a memory address. This is a kludge. */
3708 if (split_code
== MULT
3709 && CONST_INT_P (XEXP (*split
, 1))
3710 && INTVAL (XEXP (*split
, 1)) > 0
3711 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3713 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3714 XEXP (*split
, 0), GEN_INT (i
)));
3715 /* Update split_code because we may not have a multiply
3717 split_code
= GET_CODE (*split
);
3720 /* Similarly for (plus (mult FOO (const_int pow2))). */
3721 if (split_code
== PLUS
3722 && GET_CODE (XEXP (*split
, 0)) == MULT
3723 && CONST_INT_P (XEXP (XEXP (*split
, 0), 1))
3724 && INTVAL (XEXP (XEXP (*split
, 0), 1)) > 0
3725 && (i
= exact_log2 (UINTVAL (XEXP (XEXP (*split
, 0), 1)))) >= 0)
3727 rtx nsplit
= XEXP (*split
, 0);
3728 SUBST (XEXP (*split
, 0), gen_rtx_ASHIFT (GET_MODE (nsplit
),
3729 XEXP (nsplit
, 0), GEN_INT (i
)));
3730 /* Update split_code because we may not have a multiply
3732 split_code
= GET_CODE (*split
);
3735 #ifdef INSN_SCHEDULING
3736 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3737 be written as a ZERO_EXTEND. */
3738 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3740 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3741 what it really is. */
3742 if (load_extend_op (GET_MODE (SUBREG_REG (*split
)))
3744 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3745 SUBREG_REG (*split
)));
3747 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3748 SUBREG_REG (*split
)));
3752 /* Attempt to split binary operators using arithmetic identities. */
3753 if (BINARY_P (SET_SRC (newpat
))
3754 && split_mode
== GET_MODE (SET_SRC (newpat
))
3755 && ! side_effects_p (SET_SRC (newpat
)))
3757 rtx setsrc
= SET_SRC (newpat
);
3758 machine_mode mode
= GET_MODE (setsrc
);
3759 enum rtx_code code
= GET_CODE (setsrc
);
3760 rtx src_op0
= XEXP (setsrc
, 0);
3761 rtx src_op1
= XEXP (setsrc
, 1);
3763 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3764 if (rtx_equal_p (src_op0
, src_op1
))
3766 newi2pat
= gen_rtx_SET (newdest
, src_op0
);
3767 SUBST (XEXP (setsrc
, 0), newdest
);
3768 SUBST (XEXP (setsrc
, 1), newdest
);
3771 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3772 else if ((code
== PLUS
|| code
== MULT
)
3773 && GET_CODE (src_op0
) == code
3774 && GET_CODE (XEXP (src_op0
, 0)) == code
3775 && (INTEGRAL_MODE_P (mode
)
3776 || (FLOAT_MODE_P (mode
)
3777 && flag_unsafe_math_optimizations
)))
3779 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3780 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3781 rtx r
= XEXP (src_op0
, 1);
3784 /* Split both "((X op Y) op X) op Y" and
3785 "((X op Y) op Y) op X" as "T op T" where T is
3787 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3788 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3790 newi2pat
= gen_rtx_SET (newdest
, XEXP (src_op0
, 0));
3791 SUBST (XEXP (setsrc
, 0), newdest
);
3792 SUBST (XEXP (setsrc
, 1), newdest
);
3795 /* Split "((X op X) op Y) op Y)" as "T op T" where
3797 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3799 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3800 newi2pat
= gen_rtx_SET (newdest
, tmp
);
3801 SUBST (XEXP (setsrc
, 0), newdest
);
3802 SUBST (XEXP (setsrc
, 1), newdest
);
3810 newi2pat
= gen_rtx_SET (newdest
, *split
);
3811 SUBST (*split
, newdest
);
3814 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3816 /* recog_for_combine might have added CLOBBERs to newi2pat.
3817 Make sure NEWPAT does not depend on the clobbered regs. */
3818 if (GET_CODE (newi2pat
) == PARALLEL
)
3819 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3820 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3822 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3823 if (reg_overlap_mentioned_p (reg
, newpat
))
3830 /* If the split point was a MULT and we didn't have one before,
3831 don't use one now. */
3832 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3833 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3837 /* Check for a case where we loaded from memory in a narrow mode and
3838 then sign extended it, but we need both registers. In that case,
3839 we have a PARALLEL with both loads from the same memory location.
3840 We can split this into a load from memory followed by a register-register
3841 copy. This saves at least one insn, more if register allocation can
3844 We cannot do this if the destination of the first assignment is a
3845 condition code register or cc0. We eliminate this case by making sure
3846 the SET_DEST and SET_SRC have the same mode.
3848 We cannot do this if the destination of the second assignment is
3849 a register that we have already assumed is zero-extended. Similarly
3850 for a SUBREG of such a register. */
3852 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3853 && GET_CODE (newpat
) == PARALLEL
3854 && XVECLEN (newpat
, 0) == 2
3855 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3856 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3857 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3858 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3859 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3860 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3861 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3862 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3864 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3865 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3866 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3868 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3869 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3870 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3871 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3872 != GET_MODE_MASK (word_mode
))))
3873 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3874 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3876 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3877 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3878 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3879 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3880 != GET_MODE_MASK (word_mode
)))))
3881 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3882 SET_SRC (XVECEXP (newpat
, 0, 1)))
3883 && ! find_reg_note (i3
, REG_UNUSED
,
3884 SET_DEST (XVECEXP (newpat
, 0, 0))))
3888 newi2pat
= XVECEXP (newpat
, 0, 0);
3889 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3890 newpat
= XVECEXP (newpat
, 0, 1);
3891 SUBST (SET_SRC (newpat
),
3892 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3893 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3895 if (i2_code_number
>= 0)
3896 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3898 if (insn_code_number
>= 0)
3902 /* Similarly, check for a case where we have a PARALLEL of two independent
3903 SETs but we started with three insns. In this case, we can do the sets
3904 as two separate insns. This case occurs when some SET allows two
3905 other insns to combine, but the destination of that SET is still live.
3907 Also do this if we started with two insns and (at least) one of the
3908 resulting sets is a noop; this noop will be deleted later. */
3910 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3911 && GET_CODE (newpat
) == PARALLEL
3912 && XVECLEN (newpat
, 0) == 2
3913 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3914 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3915 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3916 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3917 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3918 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3919 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3920 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3921 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3922 XVECEXP (newpat
, 0, 0))
3923 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3924 XVECEXP (newpat
, 0, 1))
3925 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3926 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3928 rtx set0
= XVECEXP (newpat
, 0, 0);
3929 rtx set1
= XVECEXP (newpat
, 0, 1);
3931 /* Normally, it doesn't matter which of the two is done first,
3932 but the one that references cc0 can't be the second, and
3933 one which uses any regs/memory set in between i2 and i3 can't
3934 be first. The PARALLEL might also have been pre-existing in i3,
3935 so we need to make sure that we won't wrongly hoist a SET to i2
3936 that would conflict with a death note present in there. */
3937 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3938 && !(REG_P (SET_DEST (set1
))
3939 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3940 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3941 && find_reg_note (i2
, REG_DEAD
,
3942 SUBREG_REG (SET_DEST (set1
))))
3943 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3944 /* If I3 is a jump, ensure that set0 is a jump so that
3945 we do not create invalid RTL. */
3946 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3952 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3953 && !(REG_P (SET_DEST (set0
))
3954 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3955 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3956 && find_reg_note (i2
, REG_DEAD
,
3957 SUBREG_REG (SET_DEST (set0
))))
3958 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3959 /* If I3 is a jump, ensure that set1 is a jump so that
3960 we do not create invalid RTL. */
3961 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3973 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3975 if (i2_code_number
>= 0)
3977 /* recog_for_combine might have added CLOBBERs to newi2pat.
3978 Make sure NEWPAT does not depend on the clobbered regs. */
3979 if (GET_CODE (newi2pat
) == PARALLEL
)
3981 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3982 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3984 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3985 if (reg_overlap_mentioned_p (reg
, newpat
))
3993 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3997 /* If it still isn't recognized, fail and change things back the way they
3999 if ((insn_code_number
< 0
4000 /* Is the result a reasonable ASM_OPERANDS? */
4001 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
4007 /* If we had to change another insn, make sure it is valid also. */
4008 if (undobuf
.other_insn
)
4010 CLEAR_HARD_REG_SET (newpat_used_regs
);
4012 other_pat
= PATTERN (undobuf
.other_insn
);
4013 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
4016 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4023 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4024 they are adjacent to each other or not. */
4027 rtx_insn
*p
= prev_nonnote_insn (i3
);
4028 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4029 && sets_cc0_p (newi2pat
))
4036 /* Only allow this combination if insn_rtx_costs reports that the
4037 replacement instructions are cheaper than the originals. */
4038 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4044 if (MAY_HAVE_DEBUG_INSNS
)
4048 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4049 if (undo
->kind
== UNDO_MODE
)
4051 rtx reg
= *undo
->where
.r
;
4052 machine_mode new_mode
= GET_MODE (reg
);
4053 machine_mode old_mode
= undo
->old_contents
.m
;
4055 /* Temporarily revert mode back. */
4056 adjust_reg_mode (reg
, old_mode
);
4058 if (reg
== i2dest
&& i2scratch
)
4060 /* If we used i2dest as a scratch register with a
4061 different mode, substitute it for the original
4062 i2src while its original mode is temporarily
4063 restored, and then clear i2scratch so that we don't
4064 do it again later. */
4065 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4068 /* Put back the new mode. */
4069 adjust_reg_mode (reg
, new_mode
);
4073 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4074 rtx_insn
*first
, *last
;
4079 last
= last_combined_insn
;
4084 last
= undobuf
.other_insn
;
4086 if (DF_INSN_LUID (last
)
4087 < DF_INSN_LUID (last_combined_insn
))
4088 last
= last_combined_insn
;
4091 /* We're dealing with a reg that changed mode but not
4092 meaning, so we want to turn it into a subreg for
4093 the new mode. However, because of REG sharing and
4094 because its mode had already changed, we have to do
4095 it in two steps. First, replace any debug uses of
4096 reg, with its original mode temporarily restored,
4097 with this copy we have created; then, replace the
4098 copy with the SUBREG of the original shared reg,
4099 once again changed to the new mode. */
4100 propagate_for_debug (first
, last
, reg
, tempreg
,
4102 adjust_reg_mode (reg
, new_mode
);
4103 propagate_for_debug (first
, last
, tempreg
,
4104 lowpart_subreg (old_mode
, reg
, new_mode
),
4110 /* If we will be able to accept this, we have made a
4111 change to the destination of I3. This requires us to
4112 do a few adjustments. */
4114 if (changed_i3_dest
)
4116 PATTERN (i3
) = newpat
;
4117 adjust_for_new_dest (i3
);
4120 /* We now know that we can do this combination. Merge the insns and
4121 update the status of registers and LOG_LINKS. */
4123 if (undobuf
.other_insn
)
4127 PATTERN (undobuf
.other_insn
) = other_pat
;
4129 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4130 ensure that they are still valid. Then add any non-duplicate
4131 notes added by recog_for_combine. */
4132 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4134 next
= XEXP (note
, 1);
4136 if ((REG_NOTE_KIND (note
) == REG_DEAD
4137 && !reg_referenced_p (XEXP (note
, 0),
4138 PATTERN (undobuf
.other_insn
)))
4139 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4140 && !reg_set_p (XEXP (note
, 0),
4141 PATTERN (undobuf
.other_insn
))))
4142 remove_note (undobuf
.other_insn
, note
);
4145 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4146 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4153 struct insn_link
*link
;
4156 /* I3 now uses what used to be its destination and which is now
4157 I2's destination. This requires us to do a few adjustments. */
4158 PATTERN (i3
) = newpat
;
4159 adjust_for_new_dest (i3
);
4161 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4164 However, some later insn might be using I2's dest and have
4165 a LOG_LINK pointing at I3. We must remove this link.
4166 The simplest way to remove the link is to point it at I1,
4167 which we know will be a NOTE. */
4169 /* newi2pat is usually a SET here; however, recog_for_combine might
4170 have added some clobbers. */
4171 if (GET_CODE (newi2pat
) == PARALLEL
)
4172 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4174 ni2dest
= SET_DEST (newi2pat
);
4176 for (insn
= NEXT_INSN (i3
);
4177 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4178 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4179 insn
= NEXT_INSN (insn
))
4181 if (INSN_P (insn
) && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4183 FOR_EACH_LOG_LINK (link
, insn
)
4184 if (link
->insn
== i3
)
4193 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4194 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4197 /* Compute which registers we expect to eliminate. newi2pat may be setting
4198 either i3dest or i2dest, so we must check it. */
4199 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4200 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4203 /* For i1, we need to compute both local elimination and global
4204 elimination information with respect to newi2pat because i1dest
4205 may be the same as i3dest, in which case newi2pat may be setting
4206 i1dest. Global information is used when distributing REG_DEAD
4207 note for i2 and i3, in which case it does matter if newi2pat sets
4210 Local information is used when distributing REG_DEAD note for i1,
4211 in which case it doesn't matter if newi2pat sets i1dest or not.
4212 See PR62151, if we have four insns combination:
4214 i1: r1 <- i1src (using r0)
4216 i2: r0 <- i2src (using r1)
4217 i3: r3 <- i3src (using r0)
4219 From i1's point of view, r0 is eliminated, no matter if it is set
4220 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4221 should be discarded.
4223 Note local information only affects cases in forms like "I1->I2->I3",
4224 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4225 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4227 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4230 rtx elim_i1
= (local_elim_i1
== 0
4231 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4233 /* Same case as i1. */
4234 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4236 rtx elim_i0
= (local_elim_i0
== 0
4237 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4240 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4242 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4243 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4245 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4247 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4249 /* Ensure that we do not have something that should not be shared but
4250 occurs multiple times in the new insns. Check this by first
4251 resetting all the `used' flags and then copying anything is shared. */
4253 reset_used_flags (i3notes
);
4254 reset_used_flags (i2notes
);
4255 reset_used_flags (i1notes
);
4256 reset_used_flags (i0notes
);
4257 reset_used_flags (newpat
);
4258 reset_used_flags (newi2pat
);
4259 if (undobuf
.other_insn
)
4260 reset_used_flags (PATTERN (undobuf
.other_insn
));
4262 i3notes
= copy_rtx_if_shared (i3notes
);
4263 i2notes
= copy_rtx_if_shared (i2notes
);
4264 i1notes
= copy_rtx_if_shared (i1notes
);
4265 i0notes
= copy_rtx_if_shared (i0notes
);
4266 newpat
= copy_rtx_if_shared (newpat
);
4267 newi2pat
= copy_rtx_if_shared (newi2pat
);
4268 if (undobuf
.other_insn
)
4269 reset_used_flags (PATTERN (undobuf
.other_insn
));
4271 INSN_CODE (i3
) = insn_code_number
;
4272 PATTERN (i3
) = newpat
;
4274 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4276 rtx call_usage
= CALL_INSN_FUNCTION_USAGE (i3
);
4278 reset_used_flags (call_usage
);
4279 call_usage
= copy_rtx (call_usage
);
4283 /* I2SRC must still be meaningful at this point. Some splitting
4284 operations can invalidate I2SRC, but those operations do not
4287 replace_rtx (call_usage
, i2dest
, i2src
);
4291 replace_rtx (call_usage
, i1dest
, i1src
);
4293 replace_rtx (call_usage
, i0dest
, i0src
);
4295 CALL_INSN_FUNCTION_USAGE (i3
) = call_usage
;
4298 if (undobuf
.other_insn
)
4299 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4301 /* We had one special case above where I2 had more than one set and
4302 we replaced a destination of one of those sets with the destination
4303 of I3. In that case, we have to update LOG_LINKS of insns later
4304 in this basic block. Note that this (expensive) case is rare.
4306 Also, in this case, we must pretend that all REG_NOTEs for I2
4307 actually came from I3, so that REG_UNUSED notes from I2 will be
4308 properly handled. */
4310 if (i3_subst_into_i2
)
4312 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4313 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4314 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4315 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4316 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4317 && ! find_reg_note (i2
, REG_UNUSED
,
4318 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4319 for (temp_insn
= NEXT_INSN (i2
);
4321 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4322 || BB_HEAD (this_basic_block
) != temp_insn
);
4323 temp_insn
= NEXT_INSN (temp_insn
))
4324 if (temp_insn
!= i3
&& INSN_P (temp_insn
))
4325 FOR_EACH_LOG_LINK (link
, temp_insn
)
4326 if (link
->insn
== i2
)
4332 while (XEXP (link
, 1))
4333 link
= XEXP (link
, 1);
4334 XEXP (link
, 1) = i2notes
;
4341 LOG_LINKS (i3
) = NULL
;
4343 LOG_LINKS (i2
) = NULL
;
4348 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4349 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4351 INSN_CODE (i2
) = i2_code_number
;
4352 PATTERN (i2
) = newi2pat
;
4356 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4357 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4359 SET_INSN_DELETED (i2
);
4364 LOG_LINKS (i1
) = NULL
;
4366 if (MAY_HAVE_DEBUG_INSNS
)
4367 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4369 SET_INSN_DELETED (i1
);
4374 LOG_LINKS (i0
) = NULL
;
4376 if (MAY_HAVE_DEBUG_INSNS
)
4377 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4379 SET_INSN_DELETED (i0
);
4382 /* Get death notes for everything that is now used in either I3 or
4383 I2 and used to die in a previous insn. If we built two new
4384 patterns, move from I1 to I2 then I2 to I3 so that we get the
4385 proper movement on registers that I2 modifies. */
4388 from_luid
= DF_INSN_LUID (i0
);
4390 from_luid
= DF_INSN_LUID (i1
);
4392 from_luid
= DF_INSN_LUID (i2
);
4394 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4395 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4397 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4399 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4400 elim_i2
, elim_i1
, elim_i0
);
4402 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4403 elim_i2
, elim_i1
, elim_i0
);
4405 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4406 elim_i2
, local_elim_i1
, local_elim_i0
);
4408 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4409 elim_i2
, elim_i1
, local_elim_i0
);
4411 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4412 elim_i2
, elim_i1
, elim_i0
);
4414 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4415 know these are REG_UNUSED and want them to go to the desired insn,
4416 so we always pass it as i3. */
4418 if (newi2pat
&& new_i2_notes
)
4419 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4423 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4426 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4427 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4428 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4429 in that case, it might delete I2. Similarly for I2 and I1.
4430 Show an additional death due to the REG_DEAD note we make here. If
4431 we discard it in distribute_notes, we will decrement it again. */
4435 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4436 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4437 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4440 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4441 elim_i2
, elim_i1
, elim_i0
);
4444 if (i2dest_in_i2src
)
4446 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4447 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4448 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4449 NULL_RTX
, NULL_RTX
);
4451 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4452 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4455 if (i1dest_in_i1src
)
4457 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4458 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4459 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4460 NULL_RTX
, NULL_RTX
);
4462 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4463 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4466 if (i0dest_in_i0src
)
4468 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4469 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4470 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4471 NULL_RTX
, NULL_RTX
);
4473 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4474 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4477 distribute_links (i3links
);
4478 distribute_links (i2links
);
4479 distribute_links (i1links
);
4480 distribute_links (i0links
);
4484 struct insn_link
*link
;
4485 rtx_insn
*i2_insn
= 0;
4486 rtx i2_val
= 0, set
;
4488 /* The insn that used to set this register doesn't exist, and
4489 this life of the register may not exist either. See if one of
4490 I3's links points to an insn that sets I2DEST. If it does,
4491 that is now the last known value for I2DEST. If we don't update
4492 this and I2 set the register to a value that depended on its old
4493 contents, we will get confused. If this insn is used, thing
4494 will be set correctly in combine_instructions. */
4495 FOR_EACH_LOG_LINK (link
, i3
)
4496 if ((set
= single_set (link
->insn
)) != 0
4497 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4498 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4500 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4502 /* If the reg formerly set in I2 died only once and that was in I3,
4503 zero its use count so it won't make `reload' do any work. */
4505 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4506 && ! i2dest_in_i2src
4507 && REGNO (i2dest
) < reg_n_sets_max
)
4508 INC_REG_N_SETS (REGNO (i2dest
), -1);
4511 if (i1
&& REG_P (i1dest
))
4513 struct insn_link
*link
;
4514 rtx_insn
*i1_insn
= 0;
4515 rtx i1_val
= 0, set
;
4517 FOR_EACH_LOG_LINK (link
, i3
)
4518 if ((set
= single_set (link
->insn
)) != 0
4519 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4520 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4522 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4525 && ! i1dest_in_i1src
4526 && REGNO (i1dest
) < reg_n_sets_max
)
4527 INC_REG_N_SETS (REGNO (i1dest
), -1);
4530 if (i0
&& REG_P (i0dest
))
4532 struct insn_link
*link
;
4533 rtx_insn
*i0_insn
= 0;
4534 rtx i0_val
= 0, set
;
4536 FOR_EACH_LOG_LINK (link
, i3
)
4537 if ((set
= single_set (link
->insn
)) != 0
4538 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4539 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4541 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4544 && ! i0dest_in_i0src
4545 && REGNO (i0dest
) < reg_n_sets_max
)
4546 INC_REG_N_SETS (REGNO (i0dest
), -1);
4549 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4550 been made to this insn. The order is important, because newi2pat
4551 can affect nonzero_bits of newpat. */
4553 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4554 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4557 if (undobuf
.other_insn
!= NULL_RTX
)
4561 fprintf (dump_file
, "modifying other_insn ");
4562 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4564 df_insn_rescan (undobuf
.other_insn
);
4567 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4571 fprintf (dump_file
, "modifying insn i0 ");
4572 dump_insn_slim (dump_file
, i0
);
4574 df_insn_rescan (i0
);
4577 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4581 fprintf (dump_file
, "modifying insn i1 ");
4582 dump_insn_slim (dump_file
, i1
);
4584 df_insn_rescan (i1
);
4587 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4591 fprintf (dump_file
, "modifying insn i2 ");
4592 dump_insn_slim (dump_file
, i2
);
4594 df_insn_rescan (i2
);
4597 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4601 fprintf (dump_file
, "modifying insn i3 ");
4602 dump_insn_slim (dump_file
, i3
);
4604 df_insn_rescan (i3
);
4607 /* Set new_direct_jump_p if a new return or simple jump instruction
4608 has been created. Adjust the CFG accordingly. */
4609 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4611 *new_direct_jump_p
= 1;
4612 mark_jump_label (PATTERN (i3
), i3
, 0);
4613 update_cfg_for_uncondjump (i3
);
4616 if (undobuf
.other_insn
!= NULL_RTX
4617 && (returnjump_p (undobuf
.other_insn
)
4618 || any_uncondjump_p (undobuf
.other_insn
)))
4620 *new_direct_jump_p
= 1;
4621 update_cfg_for_uncondjump (undobuf
.other_insn
);
4624 if (GET_CODE (PATTERN (i3
)) == TRAP_IF
4625 && XEXP (PATTERN (i3
), 0) == const1_rtx
)
4627 basic_block bb
= BLOCK_FOR_INSN (i3
);
4629 remove_edge (split_block (bb
, i3
));
4630 *new_direct_jump_p
= 1;
4633 if (undobuf
.other_insn
4634 && GET_CODE (PATTERN (undobuf
.other_insn
)) == TRAP_IF
4635 && XEXP (PATTERN (undobuf
.other_insn
), 0) == const1_rtx
)
4637 basic_block bb
= BLOCK_FOR_INSN (undobuf
.other_insn
);
4639 remove_edge (split_block (bb
, undobuf
.other_insn
));
4640 *new_direct_jump_p
= 1;
4643 /* A noop might also need cleaning up of CFG, if it comes from the
4644 simplification of a jump. */
4646 && GET_CODE (newpat
) == SET
4647 && SET_SRC (newpat
) == pc_rtx
4648 && SET_DEST (newpat
) == pc_rtx
)
4650 *new_direct_jump_p
= 1;
4651 update_cfg_for_uncondjump (i3
);
4654 if (undobuf
.other_insn
!= NULL_RTX
4655 && JUMP_P (undobuf
.other_insn
)
4656 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4657 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4658 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4660 *new_direct_jump_p
= 1;
4661 update_cfg_for_uncondjump (undobuf
.other_insn
);
4664 combine_successes
++;
4667 if (added_links_insn
4668 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4669 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4670 return added_links_insn
;
4672 return newi2pat
? i2
: i3
;
4675 /* Get a marker for undoing to the current state. */
4678 get_undo_marker (void)
4680 return undobuf
.undos
;
4683 /* Undo the modifications up to the marker. */
4686 undo_to_marker (void *marker
)
4688 struct undo
*undo
, *next
;
4690 for (undo
= undobuf
.undos
; undo
!= marker
; undo
= next
)
4698 *undo
->where
.r
= undo
->old_contents
.r
;
4701 *undo
->where
.i
= undo
->old_contents
.i
;
4704 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4707 *undo
->where
.l
= undo
->old_contents
.l
;
4713 undo
->next
= undobuf
.frees
;
4714 undobuf
.frees
= undo
;
4717 undobuf
.undos
= (struct undo
*) marker
;
4720 /* Undo all the modifications recorded in undobuf. */
4728 /* We've committed to accepting the changes we made. Move all
4729 of the undos to the free list. */
4734 struct undo
*undo
, *next
;
4736 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4739 undo
->next
= undobuf
.frees
;
4740 undobuf
.frees
= undo
;
4745 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4746 where we have an arithmetic expression and return that point. LOC will
4749 try_combine will call this function to see if an insn can be split into
4753 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4756 enum rtx_code code
= GET_CODE (x
);
4758 unsigned HOST_WIDE_INT len
= 0;
4759 HOST_WIDE_INT pos
= 0;
4761 rtx inner
= NULL_RTX
;
4763 /* First special-case some codes. */
4767 #ifdef INSN_SCHEDULING
4768 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4770 if (MEM_P (SUBREG_REG (x
)))
4773 return find_split_point (&SUBREG_REG (x
), insn
, false);
4776 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4777 using LO_SUM and HIGH. */
4778 if (HAVE_lo_sum
&& (GET_CODE (XEXP (x
, 0)) == CONST
4779 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
))
4781 machine_mode address_mode
= get_address_mode (x
);
4784 gen_rtx_LO_SUM (address_mode
,
4785 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4787 return &XEXP (XEXP (x
, 0), 0);
4790 /* If we have a PLUS whose second operand is a constant and the
4791 address is not valid, perhaps will can split it up using
4792 the machine-specific way to split large constants. We use
4793 the first pseudo-reg (one of the virtual regs) as a placeholder;
4794 it will not remain in the result. */
4795 if (GET_CODE (XEXP (x
, 0)) == PLUS
4796 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4797 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4798 MEM_ADDR_SPACE (x
)))
4800 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4801 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (reg
, XEXP (x
, 0)),
4804 /* This should have produced two insns, each of which sets our
4805 placeholder. If the source of the second is a valid address,
4806 we can make put both sources together and make a split point
4810 && NEXT_INSN (seq
) != NULL_RTX
4811 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4812 && NONJUMP_INSN_P (seq
)
4813 && GET_CODE (PATTERN (seq
)) == SET
4814 && SET_DEST (PATTERN (seq
)) == reg
4815 && ! reg_mentioned_p (reg
,
4816 SET_SRC (PATTERN (seq
)))
4817 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4818 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4819 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4820 && memory_address_addr_space_p
4821 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4822 MEM_ADDR_SPACE (x
)))
4824 rtx src1
= SET_SRC (PATTERN (seq
));
4825 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4827 /* Replace the placeholder in SRC2 with SRC1. If we can
4828 find where in SRC2 it was placed, that can become our
4829 split point and we can replace this address with SRC2.
4830 Just try two obvious places. */
4832 src2
= replace_rtx (src2
, reg
, src1
);
4834 if (XEXP (src2
, 0) == src1
)
4835 split
= &XEXP (src2
, 0);
4836 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4837 && XEXP (XEXP (src2
, 0), 0) == src1
)
4838 split
= &XEXP (XEXP (src2
, 0), 0);
4842 SUBST (XEXP (x
, 0), src2
);
4847 /* If that didn't work, perhaps the first operand is complex and
4848 needs to be computed separately, so make a split point there.
4849 This will occur on machines that just support REG + CONST
4850 and have a constant moved through some previous computation. */
4852 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4853 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4854 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4855 return &XEXP (XEXP (x
, 0), 0);
4858 /* If we have a PLUS whose first operand is complex, try computing it
4859 separately by making a split there. */
4860 if (GET_CODE (XEXP (x
, 0)) == PLUS
4861 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4863 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4864 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4865 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4866 return &XEXP (XEXP (x
, 0), 0);
4870 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4871 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4872 we need to put the operand into a register. So split at that
4875 if (SET_DEST (x
) == cc0_rtx
4876 && GET_CODE (SET_SRC (x
)) != COMPARE
4877 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4878 && !OBJECT_P (SET_SRC (x
))
4879 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4880 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4881 return &SET_SRC (x
);
4883 /* See if we can split SET_SRC as it stands. */
4884 split
= find_split_point (&SET_SRC (x
), insn
, true);
4885 if (split
&& split
!= &SET_SRC (x
))
4888 /* See if we can split SET_DEST as it stands. */
4889 split
= find_split_point (&SET_DEST (x
), insn
, false);
4890 if (split
&& split
!= &SET_DEST (x
))
4893 /* See if this is a bitfield assignment with everything constant. If
4894 so, this is an IOR of an AND, so split it into that. */
4895 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4896 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x
), 0)))
4897 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4898 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4899 && CONST_INT_P (SET_SRC (x
))
4900 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4901 + INTVAL (XEXP (SET_DEST (x
), 2)))
4902 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0))))
4903 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4905 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4906 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4907 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4908 rtx dest
= XEXP (SET_DEST (x
), 0);
4909 machine_mode mode
= GET_MODE (dest
);
4910 unsigned HOST_WIDE_INT mask
4911 = (HOST_WIDE_INT_1U
<< len
) - 1;
4914 if (BITS_BIG_ENDIAN
)
4915 pos
= GET_MODE_PRECISION (mode
) - len
- pos
;
4917 or_mask
= gen_int_mode (src
<< pos
, mode
);
4920 simplify_gen_binary (IOR
, mode
, dest
, or_mask
));
4923 rtx negmask
= gen_int_mode (~(mask
<< pos
), mode
);
4925 simplify_gen_binary (IOR
, mode
,
4926 simplify_gen_binary (AND
, mode
,
4931 SUBST (SET_DEST (x
), dest
);
4933 split
= find_split_point (&SET_SRC (x
), insn
, true);
4934 if (split
&& split
!= &SET_SRC (x
))
4938 /* Otherwise, see if this is an operation that we can split into two.
4939 If so, try to split that. */
4940 code
= GET_CODE (SET_SRC (x
));
4945 /* If we are AND'ing with a large constant that is only a single
4946 bit and the result is only being used in a context where we
4947 need to know if it is zero or nonzero, replace it with a bit
4948 extraction. This will avoid the large constant, which might
4949 have taken more than one insn to make. If the constant were
4950 not a valid argument to the AND but took only one insn to make,
4951 this is no worse, but if it took more than one insn, it will
4954 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4955 && REG_P (XEXP (SET_SRC (x
), 0))
4956 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4957 && REG_P (SET_DEST (x
))
4958 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
4959 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4960 && XEXP (*split
, 0) == SET_DEST (x
)
4961 && XEXP (*split
, 1) == const0_rtx
)
4963 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
4964 XEXP (SET_SRC (x
), 0),
4965 pos
, NULL_RTX
, 1, 1, 0, 0);
4966 if (extraction
!= 0)
4968 SUBST (SET_SRC (x
), extraction
);
4969 return find_split_point (loc
, insn
, false);
4975 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4976 is known to be on, this can be converted into a NEG of a shift. */
4977 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
4978 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
4979 && 1 <= (pos
= exact_log2
4980 (nonzero_bits (XEXP (SET_SRC (x
), 0),
4981 GET_MODE (XEXP (SET_SRC (x
), 0))))))
4983 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
4987 gen_rtx_LSHIFTRT (mode
,
4988 XEXP (SET_SRC (x
), 0),
4991 split
= find_split_point (&SET_SRC (x
), insn
, true);
4992 if (split
&& split
!= &SET_SRC (x
))
4998 inner
= XEXP (SET_SRC (x
), 0);
5000 /* We can't optimize if either mode is a partial integer
5001 mode as we don't know how many bits are significant
5003 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_PARTIAL_INT
5004 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
5008 len
= GET_MODE_PRECISION (GET_MODE (inner
));
5014 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
5015 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
5017 inner
= XEXP (SET_SRC (x
), 0);
5018 len
= INTVAL (XEXP (SET_SRC (x
), 1));
5019 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
5021 if (BITS_BIG_ENDIAN
)
5022 pos
= GET_MODE_PRECISION (GET_MODE (inner
)) - len
- pos
;
5023 unsignedp
= (code
== ZERO_EXTRACT
);
5032 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
)))
5034 machine_mode mode
= GET_MODE (SET_SRC (x
));
5036 /* For unsigned, we have a choice of a shift followed by an
5037 AND or two shifts. Use two shifts for field sizes where the
5038 constant might be too large. We assume here that we can
5039 always at least get 8-bit constants in an AND insn, which is
5040 true for every current RISC. */
5042 if (unsignedp
&& len
<= 8)
5044 unsigned HOST_WIDE_INT mask
5045 = (HOST_WIDE_INT_1U
<< len
) - 1;
5049 (mode
, gen_lowpart (mode
, inner
),
5051 gen_int_mode (mask
, mode
)));
5053 split
= find_split_point (&SET_SRC (x
), insn
, true);
5054 if (split
&& split
!= &SET_SRC (x
))
5061 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5062 gen_rtx_ASHIFT (mode
,
5063 gen_lowpart (mode
, inner
),
5064 GEN_INT (GET_MODE_PRECISION (mode
)
5066 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5068 split
= find_split_point (&SET_SRC (x
), insn
, true);
5069 if (split
&& split
!= &SET_SRC (x
))
5074 /* See if this is a simple operation with a constant as the second
5075 operand. It might be that this constant is out of range and hence
5076 could be used as a split point. */
5077 if (BINARY_P (SET_SRC (x
))
5078 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5079 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5080 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5081 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5082 return &XEXP (SET_SRC (x
), 1);
5084 /* Finally, see if this is a simple operation with its first operand
5085 not in a register. The operation might require this operand in a
5086 register, so return it as a split point. We can always do this
5087 because if the first operand were another operation, we would have
5088 already found it as a split point. */
5089 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5090 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5091 return &XEXP (SET_SRC (x
), 0);
5097 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5098 it is better to write this as (not (ior A B)) so we can split it.
5099 Similarly for IOR. */
5100 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5103 gen_rtx_NOT (GET_MODE (x
),
5104 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5106 XEXP (XEXP (x
, 0), 0),
5107 XEXP (XEXP (x
, 1), 0))));
5108 return find_split_point (loc
, insn
, set_src
);
5111 /* Many RISC machines have a large set of logical insns. If the
5112 second operand is a NOT, put it first so we will try to split the
5113 other operand first. */
5114 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5116 rtx tem
= XEXP (x
, 0);
5117 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5118 SUBST (XEXP (x
, 1), tem
);
5124 /* Canonicalization can produce (minus A (mult B C)), where C is a
5125 constant. It may be better to try splitting (plus (mult B -C) A)
5126 instead if this isn't a multiply by a power of two. */
5127 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5128 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5129 && !pow2p_hwi (INTVAL (XEXP (XEXP (x
, 1), 1))))
5131 machine_mode mode
= GET_MODE (x
);
5132 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5133 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5134 SUBST (*loc
, gen_rtx_PLUS (mode
,
5136 XEXP (XEXP (x
, 1), 0),
5137 gen_int_mode (other_int
,
5140 return find_split_point (loc
, insn
, set_src
);
5143 /* Split at a multiply-accumulate instruction. However if this is
5144 the SET_SRC, we likely do not have such an instruction and it's
5145 worthless to try this split. */
5147 && (GET_CODE (XEXP (x
, 0)) == MULT
5148 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
5149 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
5156 /* Otherwise, select our actions depending on our rtx class. */
5157 switch (GET_RTX_CLASS (code
))
5159 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5161 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5166 case RTX_COMM_ARITH
:
5168 case RTX_COMM_COMPARE
:
5169 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5174 /* Some machines have (and (shift ...) ...) insns. If X is not
5175 an AND, but XEXP (X, 0) is, use it as our split point. */
5176 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5177 return &XEXP (x
, 0);
5179 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5185 /* Otherwise, we don't have a split point. */
5190 /* Throughout X, replace FROM with TO, and return the result.
5191 The result is TO if X is FROM;
5192 otherwise the result is X, but its contents may have been modified.
5193 If they were modified, a record was made in undobuf so that
5194 undo_all will (among other things) return X to its original state.
5196 If the number of changes necessary is too much to record to undo,
5197 the excess changes are not made, so the result is invalid.
5198 The changes already made can still be undone.
5199 undobuf.num_undo is incremented for such changes, so by testing that
5200 the caller can tell whether the result is valid.
5202 `n_occurrences' is incremented each time FROM is replaced.
5204 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5206 IN_COND is nonzero if we are at the top level of a condition.
5208 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5209 by copying if `n_occurrences' is nonzero. */
5212 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5214 enum rtx_code code
= GET_CODE (x
);
5215 machine_mode op0_mode
= VOIDmode
;
5220 /* Two expressions are equal if they are identical copies of a shared
5221 RTX or if they are both registers with the same register number
5224 #define COMBINE_RTX_EQUAL_P(X,Y) \
5226 || (REG_P (X) && REG_P (Y) \
5227 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5229 /* Do not substitute into clobbers of regs -- this will never result in
5231 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5234 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5237 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5240 /* If X and FROM are the same register but different modes, they
5241 will not have been seen as equal above. However, the log links code
5242 will make a LOG_LINKS entry for that case. If we do nothing, we
5243 will try to rerecognize our original insn and, when it succeeds,
5244 we will delete the feeding insn, which is incorrect.
5246 So force this insn not to match in this (rare) case. */
5247 if (! in_dest
&& code
== REG
&& REG_P (from
)
5248 && reg_overlap_mentioned_p (x
, from
))
5249 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5251 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5252 of which may contain things that can be combined. */
5253 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5256 /* It is possible to have a subexpression appear twice in the insn.
5257 Suppose that FROM is a register that appears within TO.
5258 Then, after that subexpression has been scanned once by `subst',
5259 the second time it is scanned, TO may be found. If we were
5260 to scan TO here, we would find FROM within it and create a
5261 self-referent rtl structure which is completely wrong. */
5262 if (COMBINE_RTX_EQUAL_P (x
, to
))
5265 /* Parallel asm_operands need special attention because all of the
5266 inputs are shared across the arms. Furthermore, unsharing the
5267 rtl results in recognition failures. Failure to handle this case
5268 specially can result in circular rtl.
5270 Solve this by doing a normal pass across the first entry of the
5271 parallel, and only processing the SET_DESTs of the subsequent
5274 if (code
== PARALLEL
5275 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5276 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5278 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5280 /* If this substitution failed, this whole thing fails. */
5281 if (GET_CODE (new_rtx
) == CLOBBER
5282 && XEXP (new_rtx
, 0) == const0_rtx
)
5285 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5287 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5289 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5292 && GET_CODE (dest
) != CC0
5293 && GET_CODE (dest
) != PC
)
5295 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5297 /* If this substitution failed, this whole thing fails. */
5298 if (GET_CODE (new_rtx
) == CLOBBER
5299 && XEXP (new_rtx
, 0) == const0_rtx
)
5302 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5308 len
= GET_RTX_LENGTH (code
);
5309 fmt
= GET_RTX_FORMAT (code
);
5311 /* We don't need to process a SET_DEST that is a register, CC0,
5312 or PC, so set up to skip this common case. All other cases
5313 where we want to suppress replacing something inside a
5314 SET_SRC are handled via the IN_DEST operand. */
5316 && (REG_P (SET_DEST (x
))
5317 || GET_CODE (SET_DEST (x
)) == CC0
5318 || GET_CODE (SET_DEST (x
)) == PC
))
5321 /* Trying to simplify the operands of a widening MULT is not likely
5322 to create RTL matching a machine insn. */
5324 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5325 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
5326 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
5327 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
5328 && REG_P (XEXP (XEXP (x
, 0), 0))
5329 && REG_P (XEXP (XEXP (x
, 1), 0))
5334 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5337 op0_mode
= GET_MODE (XEXP (x
, 0));
5339 for (i
= 0; i
< len
; i
++)
5344 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5346 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5348 new_rtx
= (unique_copy
&& n_occurrences
5349 ? copy_rtx (to
) : to
);
5354 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5357 /* If this substitution failed, this whole thing
5359 if (GET_CODE (new_rtx
) == CLOBBER
5360 && XEXP (new_rtx
, 0) == const0_rtx
)
5364 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5367 else if (fmt
[i
] == 'e')
5369 /* If this is a register being set, ignore it. */
5370 new_rtx
= XEXP (x
, i
);
5373 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5375 || code
== STRICT_LOW_PART
))
5378 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5380 /* In general, don't install a subreg involving two
5381 modes not tieable. It can worsen register
5382 allocation, and can even make invalid reload
5383 insns, since the reg inside may need to be copied
5384 from in the outside mode, and that may be invalid
5385 if it is an fp reg copied in integer mode.
5387 We allow two exceptions to this: It is valid if
5388 it is inside another SUBREG and the mode of that
5389 SUBREG and the mode of the inside of TO is
5390 tieable and it is valid if X is a SET that copies
5393 if (GET_CODE (to
) == SUBREG
5394 && ! MODES_TIEABLE_P (GET_MODE (to
),
5395 GET_MODE (SUBREG_REG (to
)))
5396 && ! (code
== SUBREG
5397 && MODES_TIEABLE_P (GET_MODE (x
),
5398 GET_MODE (SUBREG_REG (to
))))
5402 && XEXP (x
, 0) == cc0_rtx
))))
5403 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5407 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5408 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5411 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5413 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5417 /* If we are in a SET_DEST, suppress most cases unless we
5418 have gone inside a MEM, in which case we want to
5419 simplify the address. We assume here that things that
5420 are actually part of the destination have their inner
5421 parts in the first expression. This is true for SUBREG,
5422 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5423 things aside from REG and MEM that should appear in a
5425 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5427 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5428 || code
== ZERO_EXTRACT
))
5431 code
== IF_THEN_ELSE
&& i
== 0,
5434 /* If we found that we will have to reject this combination,
5435 indicate that by returning the CLOBBER ourselves, rather than
5436 an expression containing it. This will speed things up as
5437 well as prevent accidents where two CLOBBERs are considered
5438 to be equal, thus producing an incorrect simplification. */
5440 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5443 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5445 machine_mode mode
= GET_MODE (x
);
5447 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5448 GET_MODE (SUBREG_REG (x
)),
5451 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5453 else if (CONST_SCALAR_INT_P (new_rtx
)
5454 && GET_CODE (x
) == ZERO_EXTEND
)
5456 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5457 new_rtx
, GET_MODE (XEXP (x
, 0)));
5461 SUBST (XEXP (x
, i
), new_rtx
);
5466 /* Check if we are loading something from the constant pool via float
5467 extension; in this case we would undo compress_float_constant
5468 optimization and degenerate constant load to an immediate value. */
5469 if (GET_CODE (x
) == FLOAT_EXTEND
5470 && MEM_P (XEXP (x
, 0))
5471 && MEM_READONLY_P (XEXP (x
, 0)))
5473 rtx tmp
= avoid_constant_pool_reference (x
);
5478 /* Try to simplify X. If the simplification changed the code, it is likely
5479 that further simplification will help, so loop, but limit the number
5480 of repetitions that will be performed. */
5482 for (i
= 0; i
< 4; i
++)
5484 /* If X is sufficiently simple, don't bother trying to do anything
5486 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5487 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5489 if (GET_CODE (x
) == code
)
5492 code
= GET_CODE (x
);
5494 /* We no longer know the original mode of operand 0 since we
5495 have changed the form of X) */
5496 op0_mode
= VOIDmode
;
5502 /* If X is a commutative operation whose operands are not in the canonical
5503 order, use substitutions to swap them. */
5506 maybe_swap_commutative_operands (rtx x
)
5508 if (COMMUTATIVE_ARITH_P (x
)
5509 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5511 rtx temp
= XEXP (x
, 0);
5512 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5513 SUBST (XEXP (x
, 1), temp
);
5517 /* Simplify X, a piece of RTL. We just operate on the expression at the
5518 outer level; call `subst' to simplify recursively. Return the new
5521 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5522 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5526 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5529 enum rtx_code code
= GET_CODE (x
);
5530 machine_mode mode
= GET_MODE (x
);
5534 /* If this is a commutative operation, put a constant last and a complex
5535 expression first. We don't need to do this for comparisons here. */
5536 maybe_swap_commutative_operands (x
);
5538 /* Try to fold this expression in case we have constants that weren't
5541 switch (GET_RTX_CLASS (code
))
5544 if (op0_mode
== VOIDmode
)
5545 op0_mode
= GET_MODE (XEXP (x
, 0));
5546 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5549 case RTX_COMM_COMPARE
:
5551 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5552 if (cmp_mode
== VOIDmode
)
5554 cmp_mode
= GET_MODE (XEXP (x
, 1));
5555 if (cmp_mode
== VOIDmode
)
5556 cmp_mode
= op0_mode
;
5558 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5559 XEXP (x
, 0), XEXP (x
, 1));
5562 case RTX_COMM_ARITH
:
5564 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5566 case RTX_BITFIELD_OPS
:
5568 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5569 XEXP (x
, 1), XEXP (x
, 2));
5578 code
= GET_CODE (temp
);
5579 op0_mode
= VOIDmode
;
5580 mode
= GET_MODE (temp
);
5583 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5584 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5585 things. Check for cases where both arms are testing the same
5588 Don't do anything if all operands are very simple. */
5591 && ((!OBJECT_P (XEXP (x
, 0))
5592 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5593 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5594 || (!OBJECT_P (XEXP (x
, 1))
5595 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5596 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5598 && (!OBJECT_P (XEXP (x
, 0))
5599 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5600 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5602 rtx cond
, true_rtx
, false_rtx
;
5604 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5606 /* If everything is a comparison, what we have is highly unlikely
5607 to be simpler, so don't use it. */
5608 && ! (COMPARISON_P (x
)
5609 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5611 rtx cop1
= const0_rtx
;
5612 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5614 if (cond_code
== NE
&& COMPARISON_P (cond
))
5617 /* Simplify the alternative arms; this may collapse the true and
5618 false arms to store-flag values. Be careful to use copy_rtx
5619 here since true_rtx or false_rtx might share RTL with x as a
5620 result of the if_then_else_cond call above. */
5621 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5622 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5624 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5625 is unlikely to be simpler. */
5626 if (general_operand (true_rtx
, VOIDmode
)
5627 && general_operand (false_rtx
, VOIDmode
))
5629 enum rtx_code reversed
;
5631 /* Restarting if we generate a store-flag expression will cause
5632 us to loop. Just drop through in this case. */
5634 /* If the result values are STORE_FLAG_VALUE and zero, we can
5635 just make the comparison operation. */
5636 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5637 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5639 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5640 && ((reversed
= reversed_comparison_code_parts
5641 (cond_code
, cond
, cop1
, NULL
))
5643 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5646 /* Likewise, we can make the negate of a comparison operation
5647 if the result values are - STORE_FLAG_VALUE and zero. */
5648 else if (CONST_INT_P (true_rtx
)
5649 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5650 && false_rtx
== const0_rtx
)
5651 x
= simplify_gen_unary (NEG
, mode
,
5652 simplify_gen_relational (cond_code
,
5656 else if (CONST_INT_P (false_rtx
)
5657 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5658 && true_rtx
== const0_rtx
5659 && ((reversed
= reversed_comparison_code_parts
5660 (cond_code
, cond
, cop1
, NULL
))
5662 x
= simplify_gen_unary (NEG
, mode
,
5663 simplify_gen_relational (reversed
,
5668 return gen_rtx_IF_THEN_ELSE (mode
,
5669 simplify_gen_relational (cond_code
,
5674 true_rtx
, false_rtx
);
5676 code
= GET_CODE (x
);
5677 op0_mode
= VOIDmode
;
5682 /* First see if we can apply the inverse distributive law. */
5683 if (code
== PLUS
|| code
== MINUS
5684 || code
== AND
|| code
== IOR
|| code
== XOR
)
5686 x
= apply_distributive_law (x
);
5687 code
= GET_CODE (x
);
5688 op0_mode
= VOIDmode
;
5691 /* If CODE is an associative operation not otherwise handled, see if we
5692 can associate some operands. This can win if they are constants or
5693 if they are logically related (i.e. (a & b) & a). */
5694 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5695 || code
== AND
|| code
== IOR
|| code
== XOR
5696 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5697 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5698 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5700 if (GET_CODE (XEXP (x
, 0)) == code
)
5702 rtx other
= XEXP (XEXP (x
, 0), 0);
5703 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5704 rtx inner_op1
= XEXP (x
, 1);
5707 /* Make sure we pass the constant operand if any as the second
5708 one if this is a commutative operation. */
5709 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5710 std::swap (inner_op0
, inner_op1
);
5711 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5712 : code
== DIV
? MULT
5714 mode
, inner_op0
, inner_op1
);
5716 /* For commutative operations, try the other pair if that one
5718 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5720 other
= XEXP (XEXP (x
, 0), 1);
5721 inner
= simplify_binary_operation (code
, mode
,
5722 XEXP (XEXP (x
, 0), 0),
5727 return simplify_gen_binary (code
, mode
, other
, inner
);
5731 /* A little bit of algebraic simplification here. */
5735 /* Ensure that our address has any ASHIFTs converted to MULT in case
5736 address-recognizing predicates are called later. */
5737 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5738 SUBST (XEXP (x
, 0), temp
);
5742 if (op0_mode
== VOIDmode
)
5743 op0_mode
= GET_MODE (SUBREG_REG (x
));
5745 /* See if this can be moved to simplify_subreg. */
5746 if (CONSTANT_P (SUBREG_REG (x
))
5747 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5748 /* Don't call gen_lowpart if the inner mode
5749 is VOIDmode and we cannot simplify it, as SUBREG without
5750 inner mode is invalid. */
5751 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5752 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5753 return gen_lowpart (mode
, SUBREG_REG (x
));
5755 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5759 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5764 /* If op is known to have all lower bits zero, the result is zero. */
5766 && SCALAR_INT_MODE_P (mode
)
5767 && SCALAR_INT_MODE_P (op0_mode
)
5768 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (op0_mode
)
5769 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5770 && HWI_COMPUTABLE_MODE_P (op0_mode
)
5771 && (nonzero_bits (SUBREG_REG (x
), op0_mode
)
5772 & GET_MODE_MASK (mode
)) == 0)
5773 return CONST0_RTX (mode
);
5776 /* Don't change the mode of the MEM if that would change the meaning
5778 if (MEM_P (SUBREG_REG (x
))
5779 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5780 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5781 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5782 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5784 /* Note that we cannot do any narrowing for non-constants since
5785 we might have been counting on using the fact that some bits were
5786 zero. We now do this in the SET. */
5791 temp
= expand_compound_operation (XEXP (x
, 0));
5793 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5794 replaced by (lshiftrt X C). This will convert
5795 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5797 if (GET_CODE (temp
) == ASHIFTRT
5798 && CONST_INT_P (XEXP (temp
, 1))
5799 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5800 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5801 INTVAL (XEXP (temp
, 1)));
5803 /* If X has only a single bit that might be nonzero, say, bit I, convert
5804 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5805 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5806 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5807 or a SUBREG of one since we'd be making the expression more
5808 complex if it was just a register. */
5811 && ! (GET_CODE (temp
) == SUBREG
5812 && REG_P (SUBREG_REG (temp
)))
5813 && (i
= exact_log2 (nonzero_bits (temp
, mode
))) >= 0)
5815 rtx temp1
= simplify_shift_const
5816 (NULL_RTX
, ASHIFTRT
, mode
,
5817 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, temp
,
5818 GET_MODE_PRECISION (mode
) - 1 - i
),
5819 GET_MODE_PRECISION (mode
) - 1 - i
);
5821 /* If all we did was surround TEMP with the two shifts, we
5822 haven't improved anything, so don't use it. Otherwise,
5823 we are better off with TEMP1. */
5824 if (GET_CODE (temp1
) != ASHIFTRT
5825 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5826 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5832 /* We can't handle truncation to a partial integer mode here
5833 because we don't know the real bitsize of the partial
5835 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5838 if (HWI_COMPUTABLE_MODE_P (mode
))
5840 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5841 GET_MODE_MASK (mode
), 0));
5843 /* We can truncate a constant value and return it. */
5844 if (CONST_INT_P (XEXP (x
, 0)))
5845 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5847 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5848 whose value is a comparison can be replaced with a subreg if
5849 STORE_FLAG_VALUE permits. */
5850 if (HWI_COMPUTABLE_MODE_P (mode
)
5851 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5852 && (temp
= get_last_value (XEXP (x
, 0)))
5853 && COMPARISON_P (temp
))
5854 return gen_lowpart (mode
, XEXP (x
, 0));
5858 /* (const (const X)) can become (const X). Do it this way rather than
5859 returning the inner CONST since CONST can be shared with a
5861 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5862 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5866 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5867 can add in an offset. find_split_point will split this address up
5868 again if it doesn't match. */
5869 if (HAVE_lo_sum
&& GET_CODE (XEXP (x
, 0)) == HIGH
5870 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5875 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5876 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5877 bit-field and can be replaced by either a sign_extend or a
5878 sign_extract. The `and' may be a zero_extend and the two
5879 <c>, -<c> constants may be reversed. */
5880 if (GET_CODE (XEXP (x
, 0)) == XOR
5881 && CONST_INT_P (XEXP (x
, 1))
5882 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5883 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5884 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5885 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5886 && HWI_COMPUTABLE_MODE_P (mode
)
5887 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5888 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5889 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5890 == (HOST_WIDE_INT_1U
<< (i
+ 1)) - 1))
5891 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5892 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5893 == (unsigned int) i
+ 1))))
5894 return simplify_shift_const
5895 (NULL_RTX
, ASHIFTRT
, mode
,
5896 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5897 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5898 GET_MODE_PRECISION (mode
) - (i
+ 1)),
5899 GET_MODE_PRECISION (mode
) - (i
+ 1));
5901 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5902 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5903 the bitsize of the mode - 1. This allows simplification of
5904 "a = (b & 8) == 0;" */
5905 if (XEXP (x
, 1) == constm1_rtx
5906 && !REG_P (XEXP (x
, 0))
5907 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5908 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5909 && nonzero_bits (XEXP (x
, 0), mode
) == 1)
5910 return simplify_shift_const (NULL_RTX
, ASHIFTRT
, mode
,
5911 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5912 gen_rtx_XOR (mode
, XEXP (x
, 0), const1_rtx
),
5913 GET_MODE_PRECISION (mode
) - 1),
5914 GET_MODE_PRECISION (mode
) - 1);
5916 /* If we are adding two things that have no bits in common, convert
5917 the addition into an IOR. This will often be further simplified,
5918 for example in cases like ((a & 1) + (a & 2)), which can
5921 if (HWI_COMPUTABLE_MODE_P (mode
)
5922 && (nonzero_bits (XEXP (x
, 0), mode
)
5923 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5925 /* Try to simplify the expression further. */
5926 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5927 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5929 /* If we could, great. If not, do not go ahead with the IOR
5930 replacement, since PLUS appears in many special purpose
5931 address arithmetic instructions. */
5932 if (GET_CODE (temp
) != CLOBBER
5933 && (GET_CODE (temp
) != IOR
5934 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5935 || XEXP (temp
, 1) != XEXP (x
, 1))
5936 && (XEXP (temp
, 0) != XEXP (x
, 1)
5937 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5941 /* Canonicalize x + x into x << 1. */
5942 if (GET_MODE_CLASS (mode
) == MODE_INT
5943 && rtx_equal_p (XEXP (x
, 0), XEXP (x
, 1))
5944 && !side_effects_p (XEXP (x
, 0)))
5945 return simplify_gen_binary (ASHIFT
, mode
, XEXP (x
, 0), const1_rtx
);
5950 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5951 (and <foo> (const_int pow2-1)) */
5952 if (GET_CODE (XEXP (x
, 1)) == AND
5953 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
5954 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x
, 1), 1)))
5955 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
5956 return simplify_and_const_int (NULL_RTX
, mode
, XEXP (x
, 0),
5957 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
5961 /* If we have (mult (plus A B) C), apply the distributive law and then
5962 the inverse distributive law to see if things simplify. This
5963 occurs mostly in addresses, often when unrolling loops. */
5965 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
5967 rtx result
= distribute_and_simplify_rtx (x
, 0);
5972 /* Try simplify a*(b/c) as (a*b)/c. */
5973 if (FLOAT_MODE_P (mode
) && flag_associative_math
5974 && GET_CODE (XEXP (x
, 0)) == DIV
)
5976 rtx tem
= simplify_binary_operation (MULT
, mode
,
5977 XEXP (XEXP (x
, 0), 0),
5980 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
5985 /* If this is a divide by a power of two, treat it as a shift if
5986 its first operand is a shift. */
5987 if (CONST_INT_P (XEXP (x
, 1))
5988 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
5989 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
5990 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5991 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
5992 || GET_CODE (XEXP (x
, 0)) == ROTATE
5993 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
5994 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (x
, 0), i
);
5998 case GT
: case GTU
: case GE
: case GEU
:
5999 case LT
: case LTU
: case LE
: case LEU
:
6000 case UNEQ
: case LTGT
:
6001 case UNGT
: case UNGE
:
6002 case UNLT
: case UNLE
:
6003 case UNORDERED
: case ORDERED
:
6004 /* If the first operand is a condition code, we can't do anything
6006 if (GET_CODE (XEXP (x
, 0)) == COMPARE
6007 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
6008 && ! CC0_P (XEXP (x
, 0))))
6010 rtx op0
= XEXP (x
, 0);
6011 rtx op1
= XEXP (x
, 1);
6012 enum rtx_code new_code
;
6014 if (GET_CODE (op0
) == COMPARE
)
6015 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
6017 /* Simplify our comparison, if possible. */
6018 new_code
= simplify_comparison (code
, &op0
, &op1
);
6020 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6021 if only the low-order bit is possibly nonzero in X (such as when
6022 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6023 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6024 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6027 Remove any ZERO_EXTRACT we made when thinking this was a
6028 comparison. It may now be simpler to use, e.g., an AND. If a
6029 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6030 the call to make_compound_operation in the SET case.
6032 Don't apply these optimizations if the caller would
6033 prefer a comparison rather than a value.
6034 E.g., for the condition in an IF_THEN_ELSE most targets need
6035 an explicit comparison. */
6040 else if (STORE_FLAG_VALUE
== 1
6041 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6042 && op1
== const0_rtx
6043 && mode
== GET_MODE (op0
)
6044 && nonzero_bits (op0
, mode
) == 1)
6045 return gen_lowpart (mode
,
6046 expand_compound_operation (op0
));
6048 else if (STORE_FLAG_VALUE
== 1
6049 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6050 && op1
== const0_rtx
6051 && mode
== GET_MODE (op0
)
6052 && (num_sign_bit_copies (op0
, mode
)
6053 == GET_MODE_PRECISION (mode
)))
6055 op0
= expand_compound_operation (op0
);
6056 return simplify_gen_unary (NEG
, mode
,
6057 gen_lowpart (mode
, op0
),
6061 else if (STORE_FLAG_VALUE
== 1
6062 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6063 && op1
== const0_rtx
6064 && mode
== GET_MODE (op0
)
6065 && nonzero_bits (op0
, mode
) == 1)
6067 op0
= expand_compound_operation (op0
);
6068 return simplify_gen_binary (XOR
, mode
,
6069 gen_lowpart (mode
, op0
),
6073 else if (STORE_FLAG_VALUE
== 1
6074 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6075 && op1
== const0_rtx
6076 && mode
== GET_MODE (op0
)
6077 && (num_sign_bit_copies (op0
, mode
)
6078 == GET_MODE_PRECISION (mode
)))
6080 op0
= expand_compound_operation (op0
);
6081 return plus_constant (mode
, gen_lowpart (mode
, op0
), 1);
6084 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6089 else if (STORE_FLAG_VALUE
== -1
6090 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6091 && op1
== const0_rtx
6092 && mode
== GET_MODE (op0
)
6093 && (num_sign_bit_copies (op0
, mode
)
6094 == GET_MODE_PRECISION (mode
)))
6095 return gen_lowpart (mode
,
6096 expand_compound_operation (op0
));
6098 else if (STORE_FLAG_VALUE
== -1
6099 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6100 && op1
== const0_rtx
6101 && mode
== GET_MODE (op0
)
6102 && nonzero_bits (op0
, mode
) == 1)
6104 op0
= expand_compound_operation (op0
);
6105 return simplify_gen_unary (NEG
, mode
,
6106 gen_lowpart (mode
, op0
),
6110 else if (STORE_FLAG_VALUE
== -1
6111 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6112 && op1
== const0_rtx
6113 && mode
== GET_MODE (op0
)
6114 && (num_sign_bit_copies (op0
, mode
)
6115 == GET_MODE_PRECISION (mode
)))
6117 op0
= expand_compound_operation (op0
);
6118 return simplify_gen_unary (NOT
, mode
,
6119 gen_lowpart (mode
, op0
),
6123 /* If X is 0/1, (eq X 0) is X-1. */
6124 else if (STORE_FLAG_VALUE
== -1
6125 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6126 && op1
== const0_rtx
6127 && mode
== GET_MODE (op0
)
6128 && nonzero_bits (op0
, mode
) == 1)
6130 op0
= expand_compound_operation (op0
);
6131 return plus_constant (mode
, gen_lowpart (mode
, op0
), -1);
6134 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6135 one bit that might be nonzero, we can convert (ne x 0) to
6136 (ashift x c) where C puts the bit in the sign bit. Remove any
6137 AND with STORE_FLAG_VALUE when we are done, since we are only
6138 going to test the sign bit. */
6139 if (new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6140 && HWI_COMPUTABLE_MODE_P (mode
)
6141 && val_signbit_p (mode
, STORE_FLAG_VALUE
)
6142 && op1
== const0_rtx
6143 && mode
== GET_MODE (op0
)
6144 && (i
= exact_log2 (nonzero_bits (op0
, mode
))) >= 0)
6146 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6147 expand_compound_operation (op0
),
6148 GET_MODE_PRECISION (mode
) - 1 - i
);
6149 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6155 /* If the code changed, return a whole new comparison.
6156 We also need to avoid using SUBST in cases where
6157 simplify_comparison has widened a comparison with a CONST_INT,
6158 since in that case the wider CONST_INT may fail the sanity
6159 checks in do_SUBST. */
6160 if (new_code
!= code
6161 || (CONST_INT_P (op1
)
6162 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6163 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6164 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6166 /* Otherwise, keep this operation, but maybe change its operands.
6167 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6168 SUBST (XEXP (x
, 0), op0
);
6169 SUBST (XEXP (x
, 1), op1
);
6174 return simplify_if_then_else (x
);
6180 /* If we are processing SET_DEST, we are done. */
6184 return expand_compound_operation (x
);
6187 return simplify_set (x
);
6191 return simplify_logical (x
);
6198 /* If this is a shift by a constant amount, simplify it. */
6199 if (CONST_INT_P (XEXP (x
, 1)))
6200 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6201 INTVAL (XEXP (x
, 1)));
6203 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6205 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6207 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6219 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6222 simplify_if_then_else (rtx x
)
6224 machine_mode mode
= GET_MODE (x
);
6225 rtx cond
= XEXP (x
, 0);
6226 rtx true_rtx
= XEXP (x
, 1);
6227 rtx false_rtx
= XEXP (x
, 2);
6228 enum rtx_code true_code
= GET_CODE (cond
);
6229 int comparison_p
= COMPARISON_P (cond
);
6232 enum rtx_code false_code
;
6235 /* Simplify storing of the truth value. */
6236 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6237 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6238 XEXP (cond
, 0), XEXP (cond
, 1));
6240 /* Also when the truth value has to be reversed. */
6242 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6243 && (reversed
= reversed_comparison (cond
, mode
)))
6246 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6247 in it is being compared against certain values. Get the true and false
6248 comparisons and see if that says anything about the value of each arm. */
6251 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6253 && REG_P (XEXP (cond
, 0)))
6256 rtx from
= XEXP (cond
, 0);
6257 rtx true_val
= XEXP (cond
, 1);
6258 rtx false_val
= true_val
;
6261 /* If FALSE_CODE is EQ, swap the codes and arms. */
6263 if (false_code
== EQ
)
6265 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6266 std::swap (true_rtx
, false_rtx
);
6269 /* If we are comparing against zero and the expression being tested has
6270 only a single bit that might be nonzero, that is its value when it is
6271 not equal to zero. Similarly if it is known to be -1 or 0. */
6273 if (true_code
== EQ
&& true_val
== const0_rtx
6274 && pow2p_hwi (nzb
= nonzero_bits (from
, GET_MODE (from
))))
6277 false_val
= gen_int_mode (nzb
, GET_MODE (from
));
6279 else if (true_code
== EQ
&& true_val
== const0_rtx
6280 && (num_sign_bit_copies (from
, GET_MODE (from
))
6281 == GET_MODE_PRECISION (GET_MODE (from
))))
6284 false_val
= constm1_rtx
;
6287 /* Now simplify an arm if we know the value of the register in the
6288 branch and it is used in the arm. Be careful due to the potential
6289 of locally-shared RTL. */
6291 if (reg_mentioned_p (from
, true_rtx
))
6292 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6294 pc_rtx
, pc_rtx
, 0, 0, 0);
6295 if (reg_mentioned_p (from
, false_rtx
))
6296 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6298 pc_rtx
, pc_rtx
, 0, 0, 0);
6300 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6301 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6303 true_rtx
= XEXP (x
, 1);
6304 false_rtx
= XEXP (x
, 2);
6305 true_code
= GET_CODE (cond
);
6308 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6309 reversed, do so to avoid needing two sets of patterns for
6310 subtract-and-branch insns. Similarly if we have a constant in the true
6311 arm, the false arm is the same as the first operand of the comparison, or
6312 the false arm is more complicated than the true arm. */
6315 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6316 && (true_rtx
== pc_rtx
6317 || (CONSTANT_P (true_rtx
)
6318 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6319 || true_rtx
== const0_rtx
6320 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6321 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6322 && !OBJECT_P (false_rtx
))
6323 || reg_mentioned_p (true_rtx
, false_rtx
)
6324 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6326 true_code
= reversed_comparison_code (cond
, NULL
);
6327 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6328 SUBST (XEXP (x
, 1), false_rtx
);
6329 SUBST (XEXP (x
, 2), true_rtx
);
6331 std::swap (true_rtx
, false_rtx
);
6334 /* It is possible that the conditional has been simplified out. */
6335 true_code
= GET_CODE (cond
);
6336 comparison_p
= COMPARISON_P (cond
);
6339 /* If the two arms are identical, we don't need the comparison. */
6341 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6344 /* Convert a == b ? b : a to "a". */
6345 if (true_code
== EQ
&& ! side_effects_p (cond
)
6346 && !HONOR_NANS (mode
)
6347 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6348 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6350 else if (true_code
== NE
&& ! side_effects_p (cond
)
6351 && !HONOR_NANS (mode
)
6352 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6353 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6356 /* Look for cases where we have (abs x) or (neg (abs X)). */
6358 if (GET_MODE_CLASS (mode
) == MODE_INT
6360 && XEXP (cond
, 1) == const0_rtx
6361 && GET_CODE (false_rtx
) == NEG
6362 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6363 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6364 && ! side_effects_p (true_rtx
))
6369 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6373 simplify_gen_unary (NEG
, mode
,
6374 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6380 /* Look for MIN or MAX. */
6382 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6384 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6385 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6386 && ! side_effects_p (cond
))
6391 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6394 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6397 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6400 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6405 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6406 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6407 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6408 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6409 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6410 neither 1 or -1, but it isn't worth checking for. */
6412 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6414 && GET_MODE_CLASS (mode
) == MODE_INT
6415 && ! side_effects_p (x
))
6417 rtx t
= make_compound_operation (true_rtx
, SET
);
6418 rtx f
= make_compound_operation (false_rtx
, SET
);
6419 rtx cond_op0
= XEXP (cond
, 0);
6420 rtx cond_op1
= XEXP (cond
, 1);
6421 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6422 machine_mode m
= mode
;
6423 rtx z
= 0, c1
= NULL_RTX
;
6425 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6426 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6427 || GET_CODE (t
) == ASHIFT
6428 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6429 && rtx_equal_p (XEXP (t
, 0), f
))
6430 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6432 /* If an identity-zero op is commutative, check whether there
6433 would be a match if we swapped the operands. */
6434 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6435 || GET_CODE (t
) == XOR
)
6436 && rtx_equal_p (XEXP (t
, 1), f
))
6437 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6438 else if (GET_CODE (t
) == SIGN_EXTEND
6439 && (GET_CODE (XEXP (t
, 0)) == PLUS
6440 || GET_CODE (XEXP (t
, 0)) == MINUS
6441 || GET_CODE (XEXP (t
, 0)) == IOR
6442 || GET_CODE (XEXP (t
, 0)) == XOR
6443 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6444 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6445 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6446 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6447 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6448 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6449 && (num_sign_bit_copies (f
, GET_MODE (f
))
6451 (GET_MODE_PRECISION (mode
)
6452 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6454 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6455 extend_op
= SIGN_EXTEND
;
6456 m
= GET_MODE (XEXP (t
, 0));
6458 else if (GET_CODE (t
) == SIGN_EXTEND
6459 && (GET_CODE (XEXP (t
, 0)) == PLUS
6460 || GET_CODE (XEXP (t
, 0)) == IOR
6461 || GET_CODE (XEXP (t
, 0)) == XOR
)
6462 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6463 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6464 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6465 && (num_sign_bit_copies (f
, GET_MODE (f
))
6467 (GET_MODE_PRECISION (mode
)
6468 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6470 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6471 extend_op
= SIGN_EXTEND
;
6472 m
= GET_MODE (XEXP (t
, 0));
6474 else if (GET_CODE (t
) == ZERO_EXTEND
6475 && (GET_CODE (XEXP (t
, 0)) == PLUS
6476 || GET_CODE (XEXP (t
, 0)) == MINUS
6477 || GET_CODE (XEXP (t
, 0)) == IOR
6478 || GET_CODE (XEXP (t
, 0)) == XOR
6479 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6480 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6481 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6482 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6483 && HWI_COMPUTABLE_MODE_P (mode
)
6484 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6485 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6486 && ((nonzero_bits (f
, GET_MODE (f
))
6487 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6490 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6491 extend_op
= ZERO_EXTEND
;
6492 m
= GET_MODE (XEXP (t
, 0));
6494 else if (GET_CODE (t
) == ZERO_EXTEND
6495 && (GET_CODE (XEXP (t
, 0)) == PLUS
6496 || GET_CODE (XEXP (t
, 0)) == IOR
6497 || GET_CODE (XEXP (t
, 0)) == XOR
)
6498 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6499 && HWI_COMPUTABLE_MODE_P (mode
)
6500 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6501 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6502 && ((nonzero_bits (f
, GET_MODE (f
))
6503 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6506 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6507 extend_op
= ZERO_EXTEND
;
6508 m
= GET_MODE (XEXP (t
, 0));
6513 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6514 cond_op0
, cond_op1
),
6515 pc_rtx
, pc_rtx
, 0, 0, 0);
6516 temp
= simplify_gen_binary (MULT
, m
, temp
,
6517 simplify_gen_binary (MULT
, m
, c1
,
6519 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6520 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6522 if (extend_op
!= UNKNOWN
)
6523 temp
= simplify_gen_unary (extend_op
, mode
, temp
, m
);
6529 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6530 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6531 negation of a single bit, we can convert this operation to a shift. We
6532 can actually do this more generally, but it doesn't seem worth it. */
6534 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6535 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6536 && ((1 == nonzero_bits (XEXP (cond
, 0), mode
)
6537 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6538 || ((num_sign_bit_copies (XEXP (cond
, 0), mode
)
6539 == GET_MODE_PRECISION (mode
))
6540 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6542 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6543 gen_lowpart (mode
, XEXP (cond
, 0)), i
);
6545 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6546 non-zero bit in A is C1. */
6547 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6548 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6549 && INTEGRAL_MODE_P (GET_MODE (XEXP (cond
, 0)))
6550 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6551 == nonzero_bits (XEXP (cond
, 0), GET_MODE (XEXP (cond
, 0)))
6552 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6554 rtx val
= XEXP (cond
, 0);
6555 enum machine_mode val_mode
= GET_MODE (val
);
6556 if (val_mode
== mode
)
6558 else if (GET_MODE_PRECISION (val_mode
) < GET_MODE_PRECISION (mode
))
6559 return simplify_gen_unary (ZERO_EXTEND
, mode
, val
, val_mode
);
6565 /* Simplify X, a SET expression. Return the new expression. */
6568 simplify_set (rtx x
)
6570 rtx src
= SET_SRC (x
);
6571 rtx dest
= SET_DEST (x
);
6573 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6574 rtx_insn
*other_insn
;
6577 /* (set (pc) (return)) gets written as (return). */
6578 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6581 /* Now that we know for sure which bits of SRC we are using, see if we can
6582 simplify the expression for the object knowing that we only need the
6585 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6587 src
= force_to_mode (src
, mode
, HOST_WIDE_INT_M1U
, 0);
6588 SUBST (SET_SRC (x
), src
);
6591 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6592 the comparison result and try to simplify it unless we already have used
6593 undobuf.other_insn. */
6594 if ((GET_MODE_CLASS (mode
) == MODE_CC
6595 || GET_CODE (src
) == COMPARE
6597 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6598 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6599 && COMPARISON_P (*cc_use
)
6600 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6602 enum rtx_code old_code
= GET_CODE (*cc_use
);
6603 enum rtx_code new_code
;
6605 int other_changed
= 0;
6606 rtx inner_compare
= NULL_RTX
;
6607 machine_mode compare_mode
= GET_MODE (dest
);
6609 if (GET_CODE (src
) == COMPARE
)
6611 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6612 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6614 inner_compare
= op0
;
6615 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6619 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6621 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6624 new_code
= old_code
;
6625 else if (!CONSTANT_P (tmp
))
6627 new_code
= GET_CODE (tmp
);
6628 op0
= XEXP (tmp
, 0);
6629 op1
= XEXP (tmp
, 1);
6633 rtx pat
= PATTERN (other_insn
);
6634 undobuf
.other_insn
= other_insn
;
6635 SUBST (*cc_use
, tmp
);
6637 /* Attempt to simplify CC user. */
6638 if (GET_CODE (pat
) == SET
)
6640 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6641 if (new_rtx
!= NULL_RTX
)
6642 SUBST (SET_SRC (pat
), new_rtx
);
6645 /* Convert X into a no-op move. */
6646 SUBST (SET_DEST (x
), pc_rtx
);
6647 SUBST (SET_SRC (x
), pc_rtx
);
6651 /* Simplify our comparison, if possible. */
6652 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6654 #ifdef SELECT_CC_MODE
6655 /* If this machine has CC modes other than CCmode, check to see if we
6656 need to use a different CC mode here. */
6657 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6658 compare_mode
= GET_MODE (op0
);
6659 else if (inner_compare
6660 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6661 && new_code
== old_code
6662 && op0
== XEXP (inner_compare
, 0)
6663 && op1
== XEXP (inner_compare
, 1))
6664 compare_mode
= GET_MODE (inner_compare
);
6666 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6668 /* If the mode changed, we have to change SET_DEST, the mode in the
6669 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6670 a hard register, just build new versions with the proper mode. If it
6671 is a pseudo, we lose unless it is only time we set the pseudo, in
6672 which case we can safely change its mode. */
6673 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6675 if (can_change_dest_mode (dest
, 0, compare_mode
))
6677 unsigned int regno
= REGNO (dest
);
6680 if (regno
< FIRST_PSEUDO_REGISTER
)
6681 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6684 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6685 new_dest
= regno_reg_rtx
[regno
];
6688 SUBST (SET_DEST (x
), new_dest
);
6689 SUBST (XEXP (*cc_use
, 0), new_dest
);
6695 #endif /* SELECT_CC_MODE */
6697 /* If the code changed, we have to build a new comparison in
6698 undobuf.other_insn. */
6699 if (new_code
!= old_code
)
6701 int other_changed_previously
= other_changed
;
6702 unsigned HOST_WIDE_INT mask
;
6703 rtx old_cc_use
= *cc_use
;
6705 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6709 /* If the only change we made was to change an EQ into an NE or
6710 vice versa, OP0 has only one bit that might be nonzero, and OP1
6711 is zero, check if changing the user of the condition code will
6712 produce a valid insn. If it won't, we can keep the original code
6713 in that insn by surrounding our operation with an XOR. */
6715 if (((old_code
== NE
&& new_code
== EQ
)
6716 || (old_code
== EQ
&& new_code
== NE
))
6717 && ! other_changed_previously
&& op1
== const0_rtx
6718 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6719 && pow2p_hwi (mask
= nonzero_bits (op0
, GET_MODE (op0
))))
6721 rtx pat
= PATTERN (other_insn
), note
= 0;
6723 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6724 && ! check_asm_operands (pat
)))
6726 *cc_use
= old_cc_use
;
6729 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6737 undobuf
.other_insn
= other_insn
;
6739 /* Don't generate a compare of a CC with 0, just use that CC. */
6740 if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6742 SUBST (SET_SRC (x
), op0
);
6745 /* Otherwise, if we didn't previously have the same COMPARE we
6746 want, create it from scratch. */
6747 else if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
6748 || XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6750 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6756 /* Get SET_SRC in a form where we have placed back any
6757 compound expressions. Then do the checks below. */
6758 src
= make_compound_operation (src
, SET
);
6759 SUBST (SET_SRC (x
), src
);
6762 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6763 and X being a REG or (subreg (reg)), we may be able to convert this to
6764 (set (subreg:m2 x) (op)).
6766 We can always do this if M1 is narrower than M2 because that means that
6767 we only care about the low bits of the result.
6769 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6770 perform a narrower operation than requested since the high-order bits will
6771 be undefined. On machine where it is defined, this transformation is safe
6772 as long as M1 and M2 have the same number of words. */
6774 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6775 && !OBJECT_P (SUBREG_REG (src
))
6776 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6778 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6779 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6780 && (WORD_REGISTER_OPERATIONS
6781 || (GET_MODE_SIZE (GET_MODE (src
))
6782 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))))
6783 #ifdef CANNOT_CHANGE_MODE_CLASS
6784 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6785 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6786 GET_MODE (SUBREG_REG (src
)),
6790 || (GET_CODE (dest
) == SUBREG
6791 && REG_P (SUBREG_REG (dest
)))))
6793 SUBST (SET_DEST (x
),
6794 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6796 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6798 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6801 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6804 && GET_CODE (src
) == SUBREG
6805 && subreg_lowpart_p (src
)
6806 && (GET_MODE_PRECISION (GET_MODE (src
))
6807 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6809 rtx inner
= SUBREG_REG (src
);
6810 machine_mode inner_mode
= GET_MODE (inner
);
6812 /* Here we make sure that we don't have a sign bit on. */
6813 if (val_signbit_known_clear_p (GET_MODE (src
),
6814 nonzero_bits (inner
, inner_mode
)))
6816 SUBST (SET_SRC (x
), inner
);
6821 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6822 would require a paradoxical subreg. Replace the subreg with a
6823 zero_extend to avoid the reload that would otherwise be required. */
6825 enum rtx_code extend_op
;
6826 if (paradoxical_subreg_p (src
)
6827 && MEM_P (SUBREG_REG (src
))
6828 && (extend_op
= load_extend_op (GET_MODE (SUBREG_REG (src
)))) != UNKNOWN
)
6831 gen_rtx_fmt_e (extend_op
, GET_MODE (src
), SUBREG_REG (src
)));
6836 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6837 are comparing an item known to be 0 or -1 against 0, use a logical
6838 operation instead. Check for one of the arms being an IOR of the other
6839 arm with some value. We compute three terms to be IOR'ed together. In
6840 practice, at most two will be nonzero. Then we do the IOR's. */
6842 if (GET_CODE (dest
) != PC
6843 && GET_CODE (src
) == IF_THEN_ELSE
6844 && GET_MODE_CLASS (GET_MODE (src
)) == MODE_INT
6845 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6846 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6847 && GET_MODE (src
) == GET_MODE (XEXP (XEXP (src
, 0), 0))
6848 && (!HAVE_conditional_move
6849 || ! can_conditionally_move_p (GET_MODE (src
)))
6850 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0),
6851 GET_MODE (XEXP (XEXP (src
, 0), 0)))
6852 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src
, 0), 0))))
6853 && ! side_effects_p (src
))
6855 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6856 ? XEXP (src
, 1) : XEXP (src
, 2));
6857 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6858 ? XEXP (src
, 2) : XEXP (src
, 1));
6859 rtx term1
= const0_rtx
, term2
, term3
;
6861 if (GET_CODE (true_rtx
) == IOR
6862 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6863 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6864 else if (GET_CODE (true_rtx
) == IOR
6865 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6866 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6867 else if (GET_CODE (false_rtx
) == IOR
6868 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6869 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6870 else if (GET_CODE (false_rtx
) == IOR
6871 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6872 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6874 term2
= simplify_gen_binary (AND
, GET_MODE (src
),
6875 XEXP (XEXP (src
, 0), 0), true_rtx
);
6876 term3
= simplify_gen_binary (AND
, GET_MODE (src
),
6877 simplify_gen_unary (NOT
, GET_MODE (src
),
6878 XEXP (XEXP (src
, 0), 0),
6883 simplify_gen_binary (IOR
, GET_MODE (src
),
6884 simplify_gen_binary (IOR
, GET_MODE (src
),
6891 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6892 whole thing fail. */
6893 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6895 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6898 /* Convert this into a field assignment operation, if possible. */
6899 return make_field_assignment (x
);
6902 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6906 simplify_logical (rtx x
)
6908 machine_mode mode
= GET_MODE (x
);
6909 rtx op0
= XEXP (x
, 0);
6910 rtx op1
= XEXP (x
, 1);
6912 switch (GET_CODE (x
))
6915 /* We can call simplify_and_const_int only if we don't lose
6916 any (sign) bits when converting INTVAL (op1) to
6917 "unsigned HOST_WIDE_INT". */
6918 if (CONST_INT_P (op1
)
6919 && (HWI_COMPUTABLE_MODE_P (mode
)
6920 || INTVAL (op1
) > 0))
6922 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6923 if (GET_CODE (x
) != AND
)
6930 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6931 apply the distributive law and then the inverse distributive
6932 law to see if things simplify. */
6933 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
6935 rtx result
= distribute_and_simplify_rtx (x
, 0);
6939 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
6941 rtx result
= distribute_and_simplify_rtx (x
, 1);
6948 /* If we have (ior (and A B) C), apply the distributive law and then
6949 the inverse distributive law to see if things simplify. */
6951 if (GET_CODE (op0
) == AND
)
6953 rtx result
= distribute_and_simplify_rtx (x
, 0);
6958 if (GET_CODE (op1
) == AND
)
6960 rtx result
= distribute_and_simplify_rtx (x
, 1);
6973 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6974 operations" because they can be replaced with two more basic operations.
6975 ZERO_EXTEND is also considered "compound" because it can be replaced with
6976 an AND operation, which is simpler, though only one operation.
6978 The function expand_compound_operation is called with an rtx expression
6979 and will convert it to the appropriate shifts and AND operations,
6980 simplifying at each stage.
6982 The function make_compound_operation is called to convert an expression
6983 consisting of shifts and ANDs into the equivalent compound expression.
6984 It is the inverse of this function, loosely speaking. */
6987 expand_compound_operation (rtx x
)
6989 unsigned HOST_WIDE_INT pos
= 0, len
;
6991 unsigned int modewidth
;
6994 switch (GET_CODE (x
))
7000 /* We can't necessarily use a const_int for a multiword mode;
7001 it depends on implicitly extending the value.
7002 Since we don't know the right way to extend it,
7003 we can't tell whether the implicit way is right.
7005 Even for a mode that is no wider than a const_int,
7006 we can't win, because we need to sign extend one of its bits through
7007 the rest of it, and we don't know which bit. */
7008 if (CONST_INT_P (XEXP (x
, 0)))
7011 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7012 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7013 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7014 reloaded. If not for that, MEM's would very rarely be safe.
7016 Reject MODEs bigger than a word, because we might not be able
7017 to reference a two-register group starting with an arbitrary register
7018 (and currently gen_lowpart might crash for a SUBREG). */
7020 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) > UNITS_PER_WORD
)
7023 /* Reject MODEs that aren't scalar integers because turning vector
7024 or complex modes into shifts causes problems. */
7026 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7029 len
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
7030 /* If the inner object has VOIDmode (the only way this can happen
7031 is if it is an ASM_OPERANDS), we can't do anything since we don't
7032 know how much masking to do. */
7044 /* If the operand is a CLOBBER, just return it. */
7045 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
7048 if (!CONST_INT_P (XEXP (x
, 1))
7049 || !CONST_INT_P (XEXP (x
, 2))
7050 || GET_MODE (XEXP (x
, 0)) == VOIDmode
)
7053 /* Reject MODEs that aren't scalar integers because turning vector
7054 or complex modes into shifts causes problems. */
7056 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
7059 len
= INTVAL (XEXP (x
, 1));
7060 pos
= INTVAL (XEXP (x
, 2));
7062 /* This should stay within the object being extracted, fail otherwise. */
7063 if (len
+ pos
> GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))))
7066 if (BITS_BIG_ENDIAN
)
7067 pos
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))) - len
- pos
;
7074 /* Convert sign extension to zero extension, if we know that the high
7075 bit is not set, as this is easier to optimize. It will be converted
7076 back to cheaper alternative in make_extraction. */
7077 if (GET_CODE (x
) == SIGN_EXTEND
7078 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7079 && ((nonzero_bits (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
7080 & ~(((unsigned HOST_WIDE_INT
)
7081 GET_MODE_MASK (GET_MODE (XEXP (x
, 0))))
7085 machine_mode mode
= GET_MODE (x
);
7086 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, XEXP (x
, 0));
7087 rtx temp2
= expand_compound_operation (temp
);
7089 /* Make sure this is a profitable operation. */
7090 if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7091 > set_src_cost (temp2
, mode
, optimize_this_for_speed_p
))
7093 else if (set_src_cost (x
, mode
, optimize_this_for_speed_p
)
7094 > set_src_cost (temp
, mode
, optimize_this_for_speed_p
))
7100 /* We can optimize some special cases of ZERO_EXTEND. */
7101 if (GET_CODE (x
) == ZERO_EXTEND
)
7103 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7104 know that the last value didn't have any inappropriate bits
7106 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7107 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7108 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7109 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
7110 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7111 return XEXP (XEXP (x
, 0), 0);
7113 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7114 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7115 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7116 && subreg_lowpart_p (XEXP (x
, 0))
7117 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7118 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
7119 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7120 return SUBREG_REG (XEXP (x
, 0));
7122 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7123 is a comparison and STORE_FLAG_VALUE permits. This is like
7124 the first case, but it works even when GET_MODE (x) is larger
7125 than HOST_WIDE_INT. */
7126 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7127 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7128 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7129 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7130 <= HOST_BITS_PER_WIDE_INT
)
7131 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7132 return XEXP (XEXP (x
, 0), 0);
7134 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7135 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7136 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7137 && subreg_lowpart_p (XEXP (x
, 0))
7138 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7139 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7140 <= HOST_BITS_PER_WIDE_INT
)
7141 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7142 return SUBREG_REG (XEXP (x
, 0));
7146 /* If we reach here, we want to return a pair of shifts. The inner
7147 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7148 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7149 logical depending on the value of UNSIGNEDP.
7151 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7152 converted into an AND of a shift.
7154 We must check for the case where the left shift would have a negative
7155 count. This can happen in a case like (x >> 31) & 255 on machines
7156 that can't shift by a constant. On those machines, we would first
7157 combine the shift with the AND to produce a variable-position
7158 extraction. Then the constant of 31 would be substituted in
7159 to produce such a position. */
7161 modewidth
= GET_MODE_PRECISION (GET_MODE (x
));
7162 if (modewidth
>= pos
+ len
)
7164 machine_mode mode
= GET_MODE (x
);
7165 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7166 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7168 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7169 tem
, modewidth
- pos
- len
);
7170 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7171 mode
, tem
, modewidth
- len
);
7173 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7174 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
7175 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7178 (HOST_WIDE_INT_1U
<< len
) - 1);
7180 /* Any other cases we can't handle. */
7183 /* If we couldn't do this for some reason, return the original
7185 if (GET_CODE (tem
) == CLOBBER
)
7191 /* X is a SET which contains an assignment of one object into
7192 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7193 or certain SUBREGS). If possible, convert it into a series of
7196 We half-heartedly support variable positions, but do not at all
7197 support variable lengths. */
7200 expand_field_assignment (const_rtx x
)
7203 rtx pos
; /* Always counts from low bit. */
7205 rtx mask
, cleared
, masked
;
7206 machine_mode compute_mode
;
7208 /* Loop until we find something we can't simplify. */
7211 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7212 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7214 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7215 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7216 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7218 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7219 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7221 inner
= XEXP (SET_DEST (x
), 0);
7222 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7223 pos
= XEXP (SET_DEST (x
), 2);
7225 /* A constant position should stay within the width of INNER. */
7226 if (CONST_INT_P (pos
)
7227 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7230 if (BITS_BIG_ENDIAN
)
7232 if (CONST_INT_P (pos
))
7233 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7235 else if (GET_CODE (pos
) == MINUS
7236 && CONST_INT_P (XEXP (pos
, 1))
7237 && (INTVAL (XEXP (pos
, 1))
7238 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7239 /* If position is ADJUST - X, new position is X. */
7240 pos
= XEXP (pos
, 0);
7243 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7244 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7245 gen_int_mode (prec
- len
,
7252 /* A SUBREG between two modes that occupy the same numbers of words
7253 can be done by moving the SUBREG to the source. */
7254 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7255 /* We need SUBREGs to compute nonzero_bits properly. */
7256 && nonzero_sign_valid
7257 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7258 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7259 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7260 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7262 x
= gen_rtx_SET (SUBREG_REG (SET_DEST (x
)),
7264 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7271 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7272 inner
= SUBREG_REG (inner
);
7274 compute_mode
= GET_MODE (inner
);
7276 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7277 if (! SCALAR_INT_MODE_P (compute_mode
))
7281 /* Don't do anything for vector or complex integral types. */
7282 if (! FLOAT_MODE_P (compute_mode
))
7285 /* Try to find an integral mode to pun with. */
7286 imode
= mode_for_size (GET_MODE_BITSIZE (compute_mode
), MODE_INT
, 0);
7287 if (imode
== BLKmode
)
7290 compute_mode
= imode
;
7291 inner
= gen_lowpart (imode
, inner
);
7294 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7295 if (len
>= HOST_BITS_PER_WIDE_INT
)
7298 /* Don't try to compute in too wide unsupported modes. */
7299 if (!targetm
.scalar_mode_supported_p (compute_mode
))
7302 /* Now compute the equivalent expression. Make a copy of INNER
7303 for the SET_DEST in case it is a MEM into which we will substitute;
7304 we don't want shared RTL in that case. */
7305 mask
= gen_int_mode ((HOST_WIDE_INT_1U
<< len
) - 1,
7307 cleared
= simplify_gen_binary (AND
, compute_mode
,
7308 simplify_gen_unary (NOT
, compute_mode
,
7309 simplify_gen_binary (ASHIFT
,
7314 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7315 simplify_gen_binary (
7317 gen_lowpart (compute_mode
, SET_SRC (x
)),
7321 x
= gen_rtx_SET (copy_rtx (inner
),
7322 simplify_gen_binary (IOR
, compute_mode
,
7329 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7330 it is an RTX that represents the (variable) starting position; otherwise,
7331 POS is the (constant) starting bit position. Both are counted from the LSB.
7333 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7335 IN_DEST is nonzero if this is a reference in the destination of a SET.
7336 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7337 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7340 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7341 ZERO_EXTRACT should be built even for bits starting at bit 0.
7343 MODE is the desired mode of the result (if IN_DEST == 0).
7345 The result is an RTX for the extraction or NULL_RTX if the target
7349 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7350 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7351 int in_dest
, int in_compare
)
7353 /* This mode describes the size of the storage area
7354 to fetch the overall value from. Within that, we
7355 ignore the POS lowest bits, etc. */
7356 machine_mode is_mode
= GET_MODE (inner
);
7357 machine_mode inner_mode
;
7358 machine_mode wanted_inner_mode
;
7359 machine_mode wanted_inner_reg_mode
= word_mode
;
7360 machine_mode pos_mode
= word_mode
;
7361 machine_mode extraction_mode
= word_mode
;
7362 machine_mode tmode
= mode_for_size (len
, MODE_INT
, 1);
7364 rtx orig_pos_rtx
= pos_rtx
;
7365 HOST_WIDE_INT orig_pos
;
7367 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7368 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7370 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7372 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7373 consider just the QI as the memory to extract from.
7374 The subreg adds or removes high bits; its mode is
7375 irrelevant to the meaning of this extraction,
7376 since POS and LEN count from the lsb. */
7377 if (MEM_P (SUBREG_REG (inner
)))
7378 is_mode
= GET_MODE (SUBREG_REG (inner
));
7379 inner
= SUBREG_REG (inner
);
7381 else if (GET_CODE (inner
) == ASHIFT
7382 && CONST_INT_P (XEXP (inner
, 1))
7383 && pos_rtx
== 0 && pos
== 0
7384 && len
> UINTVAL (XEXP (inner
, 1)))
7386 /* We're extracting the least significant bits of an rtx
7387 (ashift X (const_int C)), where LEN > C. Extract the
7388 least significant (LEN - C) bits of X, giving an rtx
7389 whose mode is MODE, then shift it left C times. */
7390 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7391 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7392 unsignedp
, in_dest
, in_compare
);
7394 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7396 else if (GET_CODE (inner
) == TRUNCATE
)
7397 inner
= XEXP (inner
, 0);
7399 inner_mode
= GET_MODE (inner
);
7401 /* See if this can be done without an extraction. We never can if the
7402 width of the field is not the same as that of some integer mode. For
7403 registers, we can only avoid the extraction if the position is at the
7404 low-order bit and this is either not in the destination or we have the
7405 appropriate STRICT_LOW_PART operation available.
7407 For MEM, we can avoid an extract if the field starts on an appropriate
7408 boundary and we can change the mode of the memory reference. */
7410 if (tmode
!= BLKmode
7411 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7413 && (pos
== 0 || REG_P (inner
))
7414 && (inner_mode
== tmode
7416 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7417 || reg_truncated_to_mode (tmode
, inner
))
7420 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7421 || (MEM_P (inner
) && pos_rtx
== 0
7423 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7424 : BITS_PER_UNIT
)) == 0
7425 /* We can't do this if we are widening INNER_MODE (it
7426 may not be aligned, for one thing). */
7427 && GET_MODE_PRECISION (inner_mode
) >= GET_MODE_PRECISION (tmode
)
7428 && (inner_mode
== tmode
7429 || (! mode_dependent_address_p (XEXP (inner
, 0),
7430 MEM_ADDR_SPACE (inner
))
7431 && ! MEM_VOLATILE_P (inner
))))))
7433 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7434 field. If the original and current mode are the same, we need not
7435 adjust the offset. Otherwise, we do if bytes big endian.
7437 If INNER is not a MEM, get a piece consisting of just the field
7438 of interest (in this case POS % BITS_PER_WORD must be 0). */
7442 HOST_WIDE_INT offset
;
7444 /* POS counts from lsb, but make OFFSET count in memory order. */
7445 if (BYTES_BIG_ENDIAN
)
7446 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7448 offset
= pos
/ BITS_PER_UNIT
;
7450 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7452 else if (REG_P (inner
))
7454 if (tmode
!= inner_mode
)
7456 /* We can't call gen_lowpart in a DEST since we
7457 always want a SUBREG (see below) and it would sometimes
7458 return a new hard register. */
7461 HOST_WIDE_INT final_word
= pos
/ BITS_PER_WORD
;
7463 if (WORDS_BIG_ENDIAN
7464 && GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7465 final_word
= ((GET_MODE_SIZE (inner_mode
)
7466 - GET_MODE_SIZE (tmode
))
7467 / UNITS_PER_WORD
) - final_word
;
7469 final_word
*= UNITS_PER_WORD
;
7470 if (BYTES_BIG_ENDIAN
&&
7471 GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (tmode
))
7472 final_word
+= (GET_MODE_SIZE (inner_mode
)
7473 - GET_MODE_SIZE (tmode
)) % UNITS_PER_WORD
;
7475 /* Avoid creating invalid subregs, for example when
7476 simplifying (x>>32)&255. */
7477 if (!validate_subreg (tmode
, inner_mode
, inner
, final_word
))
7480 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, final_word
);
7483 new_rtx
= gen_lowpart (tmode
, inner
);
7489 new_rtx
= force_to_mode (inner
, tmode
,
7490 len
>= HOST_BITS_PER_WIDE_INT
7492 : (HOST_WIDE_INT_1U
<< len
) - 1, 0);
7494 /* If this extraction is going into the destination of a SET,
7495 make a STRICT_LOW_PART unless we made a MEM. */
7498 return (MEM_P (new_rtx
) ? new_rtx
7499 : (GET_CODE (new_rtx
) != SUBREG
7500 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7501 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7506 if (CONST_SCALAR_INT_P (new_rtx
))
7507 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7508 mode
, new_rtx
, tmode
);
7510 /* If we know that no extraneous bits are set, and that the high
7511 bit is not set, convert the extraction to the cheaper of
7512 sign and zero extension, that are equivalent in these cases. */
7513 if (flag_expensive_optimizations
7514 && (HWI_COMPUTABLE_MODE_P (tmode
)
7515 && ((nonzero_bits (new_rtx
, tmode
)
7516 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7519 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7520 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7522 /* Prefer ZERO_EXTENSION, since it gives more information to
7524 if (set_src_cost (temp
, mode
, optimize_this_for_speed_p
)
7525 <= set_src_cost (temp1
, mode
, optimize_this_for_speed_p
))
7530 /* Otherwise, sign- or zero-extend unless we already are in the
7533 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7537 /* Unless this is a COMPARE or we have a funny memory reference,
7538 don't do anything with zero-extending field extracts starting at
7539 the low-order bit since they are simple AND operations. */
7540 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7541 && ! in_compare
&& unsignedp
)
7544 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7545 if the position is not a constant and the length is not 1. In all
7546 other cases, we would only be going outside our object in cases when
7547 an original shift would have been undefined. */
7549 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7550 || (pos_rtx
!= 0 && len
!= 1)))
7553 enum extraction_pattern pattern
= (in_dest
? EP_insv
7554 : unsignedp
? EP_extzv
: EP_extv
);
7556 /* If INNER is not from memory, we want it to have the mode of a register
7557 extraction pattern's structure operand, or word_mode if there is no
7558 such pattern. The same applies to extraction_mode and pos_mode
7559 and their respective operands.
7561 For memory, assume that the desired extraction_mode and pos_mode
7562 are the same as for a register operation, since at present we don't
7563 have named patterns for aligned memory structures. */
7564 struct extraction_insn insn
;
7565 if (get_best_reg_extraction_insn (&insn
, pattern
,
7566 GET_MODE_BITSIZE (inner_mode
), mode
))
7568 wanted_inner_reg_mode
= insn
.struct_mode
;
7569 pos_mode
= insn
.pos_mode
;
7570 extraction_mode
= insn
.field_mode
;
7573 /* Never narrow an object, since that might not be safe. */
7575 if (mode
!= VOIDmode
7576 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7577 extraction_mode
= mode
;
7580 wanted_inner_mode
= wanted_inner_reg_mode
;
7583 /* Be careful not to go beyond the extracted object and maintain the
7584 natural alignment of the memory. */
7585 wanted_inner_mode
= smallest_mode_for_size (len
, MODE_INT
);
7586 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7587 > GET_MODE_BITSIZE (wanted_inner_mode
))
7589 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
);
7590 gcc_assert (wanted_inner_mode
!= VOIDmode
);
7596 if (BITS_BIG_ENDIAN
)
7598 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7599 BITS_BIG_ENDIAN style. If position is constant, compute new
7600 position. Otherwise, build subtraction.
7601 Note that POS is relative to the mode of the original argument.
7602 If it's a MEM we need to recompute POS relative to that.
7603 However, if we're extracting from (or inserting into) a register,
7604 we want to recompute POS relative to wanted_inner_mode. */
7605 int width
= (MEM_P (inner
)
7606 ? GET_MODE_BITSIZE (is_mode
)
7607 : GET_MODE_BITSIZE (wanted_inner_mode
));
7610 pos
= width
- len
- pos
;
7613 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7614 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7616 /* POS may be less than 0 now, but we check for that below.
7617 Note that it can only be less than 0 if !MEM_P (inner). */
7620 /* If INNER has a wider mode, and this is a constant extraction, try to
7621 make it smaller and adjust the byte to point to the byte containing
7623 if (wanted_inner_mode
!= VOIDmode
7624 && inner_mode
!= wanted_inner_mode
7626 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7628 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7629 && ! MEM_VOLATILE_P (inner
))
7633 /* The computations below will be correct if the machine is big
7634 endian in both bits and bytes or little endian in bits and bytes.
7635 If it is mixed, we must adjust. */
7637 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7638 adjust OFFSET to compensate. */
7639 if (BYTES_BIG_ENDIAN
7640 && GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (is_mode
))
7641 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7643 /* We can now move to the desired byte. */
7644 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7645 * GET_MODE_SIZE (wanted_inner_mode
);
7646 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7648 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7649 && is_mode
!= wanted_inner_mode
)
7650 offset
= (GET_MODE_SIZE (is_mode
)
7651 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7653 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7656 /* If INNER is not memory, get it into the proper mode. If we are changing
7657 its mode, POS must be a constant and smaller than the size of the new
7659 else if (!MEM_P (inner
))
7661 /* On the LHS, don't create paradoxical subregs implicitely truncating
7662 the register unless TRULY_NOOP_TRUNCATION. */
7664 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7668 if (GET_MODE (inner
) != wanted_inner_mode
7670 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7676 inner
= force_to_mode (inner
, wanted_inner_mode
,
7678 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7680 : (((HOST_WIDE_INT_1U
<< len
) - 1)
7685 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7686 have to zero extend. Otherwise, we can just use a SUBREG. */
7688 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7690 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7691 GET_MODE (pos_rtx
));
7693 /* If we know that no extraneous bits are set, and that the high
7694 bit is not set, convert extraction to cheaper one - either
7695 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7697 if (flag_expensive_optimizations
7698 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7699 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7700 & ~(((unsigned HOST_WIDE_INT
)
7701 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7705 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7706 GET_MODE (pos_rtx
));
7708 /* Prefer ZERO_EXTENSION, since it gives more information to
7710 if (set_src_cost (temp1
, pos_mode
, optimize_this_for_speed_p
)
7711 < set_src_cost (temp
, pos_mode
, optimize_this_for_speed_p
))
7717 /* Make POS_RTX unless we already have it and it is correct. If we don't
7718 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7720 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7721 pos_rtx
= orig_pos_rtx
;
7723 else if (pos_rtx
== 0)
7724 pos_rtx
= GEN_INT (pos
);
7726 /* Make the required operation. See if we can use existing rtx. */
7727 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7728 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7730 new_rtx
= gen_lowpart (mode
, new_rtx
);
7735 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7736 with any other operations in X. Return X without that shift if so. */
7739 extract_left_shift (rtx x
, int count
)
7741 enum rtx_code code
= GET_CODE (x
);
7742 machine_mode mode
= GET_MODE (x
);
7748 /* This is the shift itself. If it is wide enough, we will return
7749 either the value being shifted if the shift count is equal to
7750 COUNT or a shift for the difference. */
7751 if (CONST_INT_P (XEXP (x
, 1))
7752 && INTVAL (XEXP (x
, 1)) >= count
)
7753 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7754 INTVAL (XEXP (x
, 1)) - count
);
7758 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7759 return simplify_gen_unary (code
, mode
, tem
, mode
);
7763 case PLUS
: case IOR
: case XOR
: case AND
:
7764 /* If we can safely shift this constant and we find the inner shift,
7765 make a new operation. */
7766 if (CONST_INT_P (XEXP (x
, 1))
7767 && (UINTVAL (XEXP (x
, 1))
7768 & (((HOST_WIDE_INT_1U
<< count
)) - 1)) == 0
7769 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7771 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7772 return simplify_gen_binary (code
, mode
, tem
,
7773 gen_int_mode (val
, mode
));
7784 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7785 level of the expression and MODE is its mode. IN_CODE is as for
7786 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7787 that should be used when recursing on operands of *X_PTR.
7789 There are two possible actions:
7791 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7792 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7794 - Return a new rtx, which the caller returns directly. */
7797 make_compound_operation_int (machine_mode mode
, rtx
*x_ptr
,
7798 enum rtx_code in_code
,
7799 enum rtx_code
*next_code_ptr
)
7802 enum rtx_code next_code
= *next_code_ptr
;
7803 enum rtx_code code
= GET_CODE (x
);
7804 int mode_width
= GET_MODE_PRECISION (mode
);
7809 bool equality_comparison
= false;
7813 equality_comparison
= true;
7817 /* Process depending on the code of this operation. If NEW is set
7818 nonzero, it will be returned. */
7823 /* Convert shifts by constants into multiplications if inside
7825 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7826 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7827 && INTVAL (XEXP (x
, 1)) >= 0)
7829 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7830 HOST_WIDE_INT multval
= HOST_WIDE_INT_1
<< count
;
7832 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7833 if (GET_CODE (new_rtx
) == NEG
)
7835 new_rtx
= XEXP (new_rtx
, 0);
7838 multval
= trunc_int_for_mode (multval
, mode
);
7839 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7846 lhs
= make_compound_operation (lhs
, next_code
);
7847 rhs
= make_compound_operation (rhs
, next_code
);
7848 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
)
7850 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7852 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7854 else if (GET_CODE (lhs
) == MULT
7855 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7857 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7858 simplify_gen_unary (NEG
, mode
,
7861 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7865 SUBST (XEXP (x
, 0), lhs
);
7866 SUBST (XEXP (x
, 1), rhs
);
7868 maybe_swap_commutative_operands (x
);
7874 lhs
= make_compound_operation (lhs
, next_code
);
7875 rhs
= make_compound_operation (rhs
, next_code
);
7876 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
)
7878 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7880 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7882 else if (GET_CODE (rhs
) == MULT
7883 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7885 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7886 simplify_gen_unary (NEG
, mode
,
7889 return simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7893 SUBST (XEXP (x
, 0), lhs
);
7894 SUBST (XEXP (x
, 1), rhs
);
7899 /* If the second operand is not a constant, we can't do anything
7901 if (!CONST_INT_P (XEXP (x
, 1)))
7904 /* If the constant is a power of two minus one and the first operand
7905 is a logical right shift, make an extraction. */
7906 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7907 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7909 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7910 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7911 0, in_code
== COMPARE
);
7914 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7915 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7916 && subreg_lowpart_p (XEXP (x
, 0))
7917 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7918 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7920 rtx inner_x0
= SUBREG_REG (XEXP (x
, 0));
7921 machine_mode inner_mode
= GET_MODE (inner_x0
);
7922 new_rtx
= make_compound_operation (XEXP (inner_x0
, 0), next_code
);
7923 new_rtx
= make_extraction (inner_mode
, new_rtx
, 0,
7925 i
, 1, 0, in_code
== COMPARE
);
7929 /* If we narrowed the mode when dropping the subreg, then
7930 we must zero-extend to keep the semantics of the AND. */
7931 if (GET_MODE_SIZE (inner_mode
) >= GET_MODE_SIZE (mode
))
7933 else if (SCALAR_INT_MODE_P (inner_mode
))
7934 new_rtx
= simplify_gen_unary (ZERO_EXTEND
, mode
,
7935 new_rtx
, inner_mode
);
7940 /* If that didn't give anything, see if the AND simplifies on
7942 if (!new_rtx
&& i
>= 0)
7944 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7945 new_rtx
= make_extraction (mode
, new_rtx
, 0, NULL_RTX
, i
, 1,
7946 0, in_code
== COMPARE
);
7949 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7950 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7951 || GET_CODE (XEXP (x
, 0)) == IOR
)
7952 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7953 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7954 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7956 /* Apply the distributive law, and then try to make extractions. */
7957 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7958 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7960 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7962 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7965 /* If we are have (and (rotate X C) M) and C is larger than the number
7966 of bits in M, this is an extraction. */
7968 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
7969 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7970 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
7971 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
7973 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7974 new_rtx
= make_extraction (mode
, new_rtx
,
7975 (GET_MODE_PRECISION (mode
)
7976 - INTVAL (XEXP (XEXP (x
, 0), 1))),
7977 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7980 /* On machines without logical shifts, if the operand of the AND is
7981 a logical shift and our mask turns off all the propagated sign
7982 bits, we can replace the logical shift with an arithmetic shift. */
7983 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7984 && !have_insn_for (LSHIFTRT
, mode
)
7985 && have_insn_for (ASHIFTRT
, mode
)
7986 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7987 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
7988 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
7989 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
7991 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
7993 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
7994 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
7996 gen_rtx_ASHIFTRT (mode
,
7997 make_compound_operation
7998 (XEXP (XEXP (x
, 0), 0), next_code
),
7999 XEXP (XEXP (x
, 0), 1)));
8002 /* If the constant is one less than a power of two, this might be
8003 representable by an extraction even if no shift is present.
8004 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8005 we are in a COMPARE. */
8006 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
8007 new_rtx
= make_extraction (mode
,
8008 make_compound_operation (XEXP (x
, 0),
8010 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
8012 /* If we are in a comparison and this is an AND with a power of two,
8013 convert this into the appropriate bit extract. */
8014 else if (in_code
== COMPARE
8015 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
8016 && (equality_comparison
|| i
< GET_MODE_PRECISION (mode
) - 1))
8017 new_rtx
= make_extraction (mode
,
8018 make_compound_operation (XEXP (x
, 0),
8020 i
, NULL_RTX
, 1, 1, 0, 1);
8022 /* If the one operand is a paradoxical subreg of a register or memory and
8023 the constant (limited to the smaller mode) has only zero bits where
8024 the sub expression has known zero bits, this can be expressed as
8026 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
)
8030 sub
= XEXP (XEXP (x
, 0), 0);
8031 machine_mode sub_mode
= GET_MODE (sub
);
8032 if ((REG_P (sub
) || MEM_P (sub
))
8033 && GET_MODE_PRECISION (sub_mode
) < mode_width
)
8035 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (sub_mode
);
8036 unsigned HOST_WIDE_INT mask
;
8038 /* original AND constant with all the known zero bits set */
8039 mask
= UINTVAL (XEXP (x
, 1)) | (~nonzero_bits (sub
, sub_mode
));
8040 if ((mask
& mode_mask
) == mode_mask
)
8042 new_rtx
= make_compound_operation (sub
, next_code
);
8043 new_rtx
= make_extraction (mode
, new_rtx
, 0, 0,
8044 GET_MODE_PRECISION (sub_mode
),
8045 1, 0, in_code
== COMPARE
);
8053 /* If the sign bit is known to be zero, replace this with an
8054 arithmetic shift. */
8055 if (have_insn_for (ASHIFTRT
, mode
)
8056 && ! have_insn_for (LSHIFTRT
, mode
)
8057 && mode_width
<= HOST_BITS_PER_WIDE_INT
8058 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
8060 new_rtx
= gen_rtx_ASHIFTRT (mode
,
8061 make_compound_operation (XEXP (x
, 0),
8073 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8074 this is a SIGN_EXTRACT. */
8075 if (CONST_INT_P (rhs
)
8076 && GET_CODE (lhs
) == ASHIFT
8077 && CONST_INT_P (XEXP (lhs
, 1))
8078 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
8079 && INTVAL (XEXP (lhs
, 1)) >= 0
8080 && INTVAL (rhs
) < mode_width
)
8082 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
8083 new_rtx
= make_extraction (mode
, new_rtx
,
8084 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
8085 NULL_RTX
, mode_width
- INTVAL (rhs
),
8086 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8090 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8091 If so, try to merge the shifts into a SIGN_EXTEND. We could
8092 also do this for some cases of SIGN_EXTRACT, but it doesn't
8093 seem worth the effort; the case checked for occurs on Alpha. */
8096 && ! (GET_CODE (lhs
) == SUBREG
8097 && (OBJECT_P (SUBREG_REG (lhs
))))
8098 && CONST_INT_P (rhs
)
8099 && INTVAL (rhs
) >= 0
8100 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
8101 && INTVAL (rhs
) < mode_width
8102 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
8103 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
8104 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
8105 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
8110 /* Call ourselves recursively on the inner expression. If we are
8111 narrowing the object and it has a different RTL code from
8112 what it originally did, do this SUBREG as a force_to_mode. */
8114 rtx inner
= SUBREG_REG (x
), simplified
;
8115 enum rtx_code subreg_code
= in_code
;
8117 /* If the SUBREG is masking of a logical right shift,
8118 make an extraction. */
8119 if (GET_CODE (inner
) == LSHIFTRT
8120 && CONST_INT_P (XEXP (inner
, 1))
8121 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8122 && (UINTVAL (XEXP (inner
, 1))
8123 < GET_MODE_PRECISION (GET_MODE (inner
)))
8124 && subreg_lowpart_p (x
))
8126 new_rtx
= make_compound_operation (XEXP (inner
, 0), next_code
);
8127 int width
= GET_MODE_PRECISION (GET_MODE (inner
))
8128 - INTVAL (XEXP (inner
, 1));
8129 if (width
> mode_width
)
8131 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (inner
, 1),
8132 width
, 1, 0, in_code
== COMPARE
);
8136 /* If in_code is COMPARE, it isn't always safe to pass it through
8137 to the recursive make_compound_operation call. */
8138 if (subreg_code
== COMPARE
8139 && (!subreg_lowpart_p (x
)
8140 || GET_CODE (inner
) == SUBREG
8141 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8142 is (const_int 0), rather than
8143 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8144 || (GET_CODE (inner
) == AND
8145 && CONST_INT_P (XEXP (inner
, 1))
8146 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8147 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8148 >= GET_MODE_BITSIZE (mode
))))
8151 tem
= make_compound_operation (inner
, subreg_code
);
8154 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8158 if (GET_CODE (tem
) != GET_CODE (inner
)
8159 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8160 && subreg_lowpart_p (x
))
8163 = force_to_mode (tem
, mode
, HOST_WIDE_INT_M1U
, 0);
8165 /* If we have something other than a SUBREG, we might have
8166 done an expansion, so rerun ourselves. */
8167 if (GET_CODE (newer
) != SUBREG
)
8168 newer
= make_compound_operation (newer
, in_code
);
8170 /* force_to_mode can expand compounds. If it just re-expanded the
8171 compound, use gen_lowpart to convert to the desired mode. */
8172 if (rtx_equal_p (newer
, x
)
8173 /* Likewise if it re-expanded the compound only partially.
8174 This happens for SUBREG of ZERO_EXTRACT if they extract
8175 the same number of bits. */
8176 || (GET_CODE (newer
) == SUBREG
8177 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8178 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8179 && GET_CODE (inner
) == AND
8180 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8181 return gen_lowpart (GET_MODE (x
), tem
);
8196 *x_ptr
= gen_lowpart (mode
, new_rtx
);
8197 *next_code_ptr
= next_code
;
8201 /* Look at the expression rooted at X. Look for expressions
8202 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8203 Form these expressions.
8205 Return the new rtx, usually just X.
8207 Also, for machines like the VAX that don't have logical shift insns,
8208 try to convert logical to arithmetic shift operations in cases where
8209 they are equivalent. This undoes the canonicalizations to logical
8210 shifts done elsewhere.
8212 We try, as much as possible, to re-use rtl expressions to save memory.
8214 IN_CODE says what kind of expression we are processing. Normally, it is
8215 SET. In a memory address it is MEM. When processing the arguments of
8216 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8217 precisely it is an equality comparison against zero. */
8220 make_compound_operation (rtx x
, enum rtx_code in_code
)
8222 enum rtx_code code
= GET_CODE (x
);
8225 enum rtx_code next_code
;
8228 /* Select the code to be used in recursive calls. Once we are inside an
8229 address, we stay there. If we have a comparison, set to COMPARE,
8230 but once inside, go back to our default of SET. */
8232 next_code
= (code
== MEM
? MEM
8233 : ((code
== COMPARE
|| COMPARISON_P (x
))
8234 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
8235 : in_code
== COMPARE
|| in_code
== EQ
? SET
: in_code
);
8237 if (SCALAR_INT_MODE_P (GET_MODE (x
)))
8239 rtx new_rtx
= make_compound_operation_int (GET_MODE (x
), &x
,
8240 in_code
, &next_code
);
8243 code
= GET_CODE (x
);
8246 /* Now recursively process each operand of this operation. We need to
8247 handle ZERO_EXTEND specially so that we don't lose track of the
8249 if (code
== ZERO_EXTEND
)
8251 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8252 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8253 new_rtx
, GET_MODE (XEXP (x
, 0)));
8256 SUBST (XEXP (x
, 0), new_rtx
);
8260 fmt
= GET_RTX_FORMAT (code
);
8261 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8264 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8265 SUBST (XEXP (x
, i
), new_rtx
);
8267 else if (fmt
[i
] == 'E')
8268 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8270 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8271 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8274 maybe_swap_commutative_operands (x
);
8278 /* Given M see if it is a value that would select a field of bits
8279 within an item, but not the entire word. Return -1 if not.
8280 Otherwise, return the starting position of the field, where 0 is the
8283 *PLEN is set to the length of the field. */
8286 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8288 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8289 int pos
= m
? ctz_hwi (m
) : -1;
8293 /* Now shift off the low-order zero bits and see if we have a
8294 power of two minus 1. */
8295 len
= exact_log2 ((m
>> pos
) + 1);
8304 /* If X refers to a register that equals REG in value, replace these
8305 references with REG. */
8307 canon_reg_for_combine (rtx x
, rtx reg
)
8314 enum rtx_code code
= GET_CODE (x
);
8315 switch (GET_RTX_CLASS (code
))
8318 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8319 if (op0
!= XEXP (x
, 0))
8320 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8325 case RTX_COMM_ARITH
:
8326 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8327 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8328 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8329 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8333 case RTX_COMM_COMPARE
:
8334 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8335 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8336 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8337 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8338 GET_MODE (op0
), op0
, op1
);
8342 case RTX_BITFIELD_OPS
:
8343 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8344 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8345 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8346 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8347 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8348 GET_MODE (op0
), op0
, op1
, op2
);
8354 if (rtx_equal_p (get_last_value (reg
), x
)
8355 || rtx_equal_p (reg
, get_last_value (x
)))
8364 fmt
= GET_RTX_FORMAT (code
);
8366 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8369 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8370 if (op
!= XEXP (x
, i
))
8380 else if (fmt
[i
] == 'E')
8383 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8385 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8386 if (op
!= XVECEXP (x
, i
, j
))
8393 XVECEXP (x
, i
, j
) = op
;
8404 /* Return X converted to MODE. If the value is already truncated to
8405 MODE we can just return a subreg even though in the general case we
8406 would need an explicit truncation. */
8409 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8411 if (!CONST_INT_P (x
)
8412 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8413 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8414 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8416 /* Bit-cast X into an integer mode. */
8417 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8418 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)), x
);
8419 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
),
8423 return gen_lowpart (mode
, x
);
8426 /* See if X can be simplified knowing that we will only refer to it in
8427 MODE and will only refer to those bits that are nonzero in MASK.
8428 If other bits are being computed or if masking operations are done
8429 that select a superset of the bits in MASK, they can sometimes be
8432 Return a possibly simplified expression, but always convert X to
8433 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8435 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8436 are all off in X. This is used when X will be complemented, by either
8437 NOT, NEG, or XOR. */
8440 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8443 enum rtx_code code
= GET_CODE (x
);
8444 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8445 machine_mode op_mode
;
8446 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8449 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8450 code below will do the wrong thing since the mode of such an
8451 expression is VOIDmode.
8453 Also do nothing if X is a CLOBBER; this can happen if X was
8454 the return value from a call to gen_lowpart. */
8455 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8458 /* We want to perform the operation in its present mode unless we know
8459 that the operation is valid in MODE, in which case we do the operation
8461 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8462 && have_insn_for (code
, mode
))
8463 ? mode
: GET_MODE (x
));
8465 /* It is not valid to do a right-shift in a narrower mode
8466 than the one it came in with. */
8467 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8468 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8469 op_mode
= GET_MODE (x
);
8471 /* Truncate MASK to fit OP_MODE. */
8473 mask
&= GET_MODE_MASK (op_mode
);
8475 /* When we have an arithmetic operation, or a shift whose count we
8476 do not know, we need to assume that all bits up to the highest-order
8477 bit in MASK will be needed. This is how we form such a mask. */
8478 if (mask
& (HOST_WIDE_INT_1U
<< (HOST_BITS_PER_WIDE_INT
- 1)))
8479 fuller_mask
= HOST_WIDE_INT_M1U
;
8481 fuller_mask
= ((HOST_WIDE_INT_1U
<< (floor_log2 (mask
) + 1))
8484 /* Determine what bits of X are guaranteed to be (non)zero. */
8485 nonzero
= nonzero_bits (x
, mode
);
8487 /* If none of the bits in X are needed, return a zero. */
8488 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8491 /* If X is a CONST_INT, return a new one. Do this here since the
8492 test below will fail. */
8493 if (CONST_INT_P (x
))
8495 if (SCALAR_INT_MODE_P (mode
))
8496 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8499 x
= GEN_INT (INTVAL (x
) & mask
);
8500 return gen_lowpart_common (mode
, x
);
8504 /* If X is narrower than MODE and we want all the bits in X's mode, just
8505 get X in the proper mode. */
8506 if (GET_MODE_SIZE (GET_MODE (x
)) < GET_MODE_SIZE (mode
)
8507 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8508 return gen_lowpart (mode
, x
);
8510 /* We can ignore the effect of a SUBREG if it narrows the mode or
8511 if the constant masks to zero all the bits the mode doesn't have. */
8512 if (GET_CODE (x
) == SUBREG
8513 && subreg_lowpart_p (x
)
8514 && ((GET_MODE_SIZE (GET_MODE (x
))
8515 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8517 & GET_MODE_MASK (GET_MODE (x
))
8518 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8519 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8521 /* The arithmetic simplifications here only work for scalar integer modes. */
8522 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8523 return gen_lowpart_or_truncate (mode
, x
);
8528 /* If X is a (clobber (const_int)), return it since we know we are
8529 generating something that won't match. */
8536 x
= expand_compound_operation (x
);
8537 if (GET_CODE (x
) != code
)
8538 return force_to_mode (x
, mode
, mask
, next_select
);
8542 /* Similarly for a truncate. */
8543 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8546 /* If this is an AND with a constant, convert it into an AND
8547 whose constant is the AND of that constant with MASK. If it
8548 remains an AND of MASK, delete it since it is redundant. */
8550 if (CONST_INT_P (XEXP (x
, 1)))
8552 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8553 mask
& INTVAL (XEXP (x
, 1)));
8555 /* If X is still an AND, see if it is an AND with a mask that
8556 is just some low-order bits. If so, and it is MASK, we don't
8559 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8560 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8564 /* If it remains an AND, try making another AND with the bits
8565 in the mode mask that aren't in MASK turned on. If the
8566 constant in the AND is wide enough, this might make a
8567 cheaper constant. */
8569 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8570 && GET_MODE_MASK (GET_MODE (x
)) != mask
8571 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
8573 unsigned HOST_WIDE_INT cval
8574 = UINTVAL (XEXP (x
, 1))
8575 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8578 y
= simplify_gen_binary (AND
, GET_MODE (x
), XEXP (x
, 0),
8579 gen_int_mode (cval
, GET_MODE (x
)));
8580 if (set_src_cost (y
, GET_MODE (x
), optimize_this_for_speed_p
)
8581 < set_src_cost (x
, GET_MODE (x
), optimize_this_for_speed_p
))
8591 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8592 low-order bits (as in an alignment operation) and FOO is already
8593 aligned to that boundary, mask C1 to that boundary as well.
8594 This may eliminate that PLUS and, later, the AND. */
8597 unsigned int width
= GET_MODE_PRECISION (mode
);
8598 unsigned HOST_WIDE_INT smask
= mask
;
8600 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8601 number, sign extend it. */
8603 if (width
< HOST_BITS_PER_WIDE_INT
8604 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8605 smask
|= HOST_WIDE_INT_M1U
<< width
;
8607 if (CONST_INT_P (XEXP (x
, 1))
8608 && pow2p_hwi (- smask
)
8609 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8610 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8611 return force_to_mode (plus_constant (GET_MODE (x
), XEXP (x
, 0),
8612 (INTVAL (XEXP (x
, 1)) & smask
)),
8613 mode
, smask
, next_select
);
8619 /* Substituting into the operands of a widening MULT is not likely to
8620 create RTL matching a machine insn. */
8622 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
8623 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
8624 && (GET_CODE (XEXP (x
, 1)) == ZERO_EXTEND
8625 || GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
)
8626 && REG_P (XEXP (XEXP (x
, 0), 0))
8627 && REG_P (XEXP (XEXP (x
, 1), 0)))
8628 return gen_lowpart_or_truncate (mode
, x
);
8630 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8631 most significant bit in MASK since carries from those bits will
8632 affect the bits we are interested in. */
8637 /* If X is (minus C Y) where C's least set bit is larger than any bit
8638 in the mask, then we may replace with (neg Y). */
8639 if (CONST_INT_P (XEXP (x
, 0))
8640 && least_bit_hwi (UINTVAL (XEXP (x
, 0))) > mask
)
8642 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8644 return force_to_mode (x
, mode
, mask
, next_select
);
8647 /* Similarly, if C contains every bit in the fuller_mask, then we may
8648 replace with (not Y). */
8649 if (CONST_INT_P (XEXP (x
, 0))
8650 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8652 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8653 XEXP (x
, 1), GET_MODE (x
));
8654 return force_to_mode (x
, mode
, mask
, next_select
);
8662 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8663 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8664 operation which may be a bitfield extraction. Ensure that the
8665 constant we form is not wider than the mode of X. */
8667 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8668 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8669 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8670 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8671 && CONST_INT_P (XEXP (x
, 1))
8672 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8673 + floor_log2 (INTVAL (XEXP (x
, 1))))
8674 < GET_MODE_PRECISION (GET_MODE (x
)))
8675 && (UINTVAL (XEXP (x
, 1))
8676 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8678 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8679 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8681 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8682 XEXP (XEXP (x
, 0), 0), temp
);
8683 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8684 XEXP (XEXP (x
, 0), 1));
8685 return force_to_mode (x
, mode
, mask
, next_select
);
8689 /* For most binary operations, just propagate into the operation and
8690 change the mode if we have an operation of that mode. */
8692 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8693 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8695 /* If we ended up truncating both operands, truncate the result of the
8696 operation instead. */
8697 if (GET_CODE (op0
) == TRUNCATE
8698 && GET_CODE (op1
) == TRUNCATE
)
8700 op0
= XEXP (op0
, 0);
8701 op1
= XEXP (op1
, 0);
8704 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8705 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8707 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8708 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8712 /* For left shifts, do the same, but just for the first operand.
8713 However, we cannot do anything with shifts where we cannot
8714 guarantee that the counts are smaller than the size of the mode
8715 because such a count will have a different meaning in a
8718 if (! (CONST_INT_P (XEXP (x
, 1))
8719 && INTVAL (XEXP (x
, 1)) >= 0
8720 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8721 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8722 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8723 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8726 /* If the shift count is a constant and we can do arithmetic in
8727 the mode of the shift, refine which bits we need. Otherwise, use the
8728 conservative form of the mask. */
8729 if (CONST_INT_P (XEXP (x
, 1))
8730 && INTVAL (XEXP (x
, 1)) >= 0
8731 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8732 && HWI_COMPUTABLE_MODE_P (op_mode
))
8733 mask
>>= INTVAL (XEXP (x
, 1));
8737 op0
= gen_lowpart_or_truncate (op_mode
,
8738 force_to_mode (XEXP (x
, 0), op_mode
,
8739 mask
, next_select
));
8741 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8742 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8746 /* Here we can only do something if the shift count is a constant,
8747 this shift constant is valid for the host, and we can do arithmetic
8750 if (CONST_INT_P (XEXP (x
, 1))
8751 && INTVAL (XEXP (x
, 1)) >= 0
8752 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8753 && HWI_COMPUTABLE_MODE_P (op_mode
))
8755 rtx inner
= XEXP (x
, 0);
8756 unsigned HOST_WIDE_INT inner_mask
;
8758 /* Select the mask of the bits we need for the shift operand. */
8759 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8761 /* We can only change the mode of the shift if we can do arithmetic
8762 in the mode of the shift and INNER_MASK is no wider than the
8763 width of X's mode. */
8764 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8765 op_mode
= GET_MODE (x
);
8767 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8769 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8770 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8773 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8774 shift and AND produces only copies of the sign bit (C2 is one less
8775 than a power of two), we can do this with just a shift. */
8777 if (GET_CODE (x
) == LSHIFTRT
8778 && CONST_INT_P (XEXP (x
, 1))
8779 /* The shift puts one of the sign bit copies in the least significant
8781 && ((INTVAL (XEXP (x
, 1))
8782 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8783 >= GET_MODE_PRECISION (GET_MODE (x
)))
8784 && pow2p_hwi (mask
+ 1)
8785 /* Number of bits left after the shift must be more than the mask
8787 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8788 <= GET_MODE_PRECISION (GET_MODE (x
)))
8789 /* Must be more sign bit copies than the mask needs. */
8790 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8791 >= exact_log2 (mask
+ 1)))
8792 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8793 GEN_INT (GET_MODE_PRECISION (GET_MODE (x
))
8794 - exact_log2 (mask
+ 1)));
8799 /* If we are just looking for the sign bit, we don't need this shift at
8800 all, even if it has a variable count. */
8801 if (val_signbit_p (GET_MODE (x
), mask
))
8802 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8804 /* If this is a shift by a constant, get a mask that contains those bits
8805 that are not copies of the sign bit. We then have two cases: If
8806 MASK only includes those bits, this can be a logical shift, which may
8807 allow simplifications. If MASK is a single-bit field not within
8808 those bits, we are requesting a copy of the sign bit and hence can
8809 shift the sign bit to the appropriate location. */
8811 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8812 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8816 /* If the considered data is wider than HOST_WIDE_INT, we can't
8817 represent a mask for all its bits in a single scalar.
8818 But we only care about the lower bits, so calculate these. */
8820 if (GET_MODE_PRECISION (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8822 nonzero
= HOST_WIDE_INT_M1U
;
8824 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8825 is the number of bits a full-width mask would have set.
8826 We need only shift if these are fewer than nonzero can
8827 hold. If not, we must keep all bits set in nonzero. */
8829 if (GET_MODE_PRECISION (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8830 < HOST_BITS_PER_WIDE_INT
)
8831 nonzero
>>= INTVAL (XEXP (x
, 1))
8832 + HOST_BITS_PER_WIDE_INT
8833 - GET_MODE_PRECISION (GET_MODE (x
)) ;
8837 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8838 nonzero
>>= INTVAL (XEXP (x
, 1));
8841 if ((mask
& ~nonzero
) == 0)
8843 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8844 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8845 if (GET_CODE (x
) != ASHIFTRT
)
8846 return force_to_mode (x
, mode
, mask
, next_select
);
8849 else if ((i
= exact_log2 (mask
)) >= 0)
8851 x
= simplify_shift_const
8852 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8853 GET_MODE_PRECISION (GET_MODE (x
)) - 1 - i
);
8855 if (GET_CODE (x
) != ASHIFTRT
)
8856 return force_to_mode (x
, mode
, mask
, next_select
);
8860 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8861 even if the shift count isn't a constant. */
8863 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8864 XEXP (x
, 0), XEXP (x
, 1));
8868 /* If this is a zero- or sign-extension operation that just affects bits
8869 we don't care about, remove it. Be sure the call above returned
8870 something that is still a shift. */
8872 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8873 && CONST_INT_P (XEXP (x
, 1))
8874 && INTVAL (XEXP (x
, 1)) >= 0
8875 && (INTVAL (XEXP (x
, 1))
8876 <= GET_MODE_PRECISION (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8877 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8878 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8879 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8886 /* If the shift count is constant and we can do computations
8887 in the mode of X, compute where the bits we care about are.
8888 Otherwise, we can't do anything. Don't change the mode of
8889 the shift or propagate MODE into the shift, though. */
8890 if (CONST_INT_P (XEXP (x
, 1))
8891 && INTVAL (XEXP (x
, 1)) >= 0)
8893 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8895 gen_int_mode (mask
, GET_MODE (x
)),
8897 if (temp
&& CONST_INT_P (temp
))
8898 x
= simplify_gen_binary (code
, GET_MODE (x
),
8899 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8900 INTVAL (temp
), next_select
),
8906 /* If we just want the low-order bit, the NEG isn't needed since it
8907 won't change the low-order bit. */
8909 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8911 /* We need any bits less significant than the most significant bit in
8912 MASK since carries from those bits will affect the bits we are
8918 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8919 same as the XOR case above. Ensure that the constant we form is not
8920 wider than the mode of X. */
8922 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8923 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8924 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8925 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8926 < GET_MODE_PRECISION (GET_MODE (x
)))
8927 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8929 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8931 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8932 XEXP (XEXP (x
, 0), 0), temp
);
8933 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8934 temp
, XEXP (XEXP (x
, 0), 1));
8936 return force_to_mode (x
, mode
, mask
, next_select
);
8939 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8940 use the full mask inside the NOT. */
8944 op0
= gen_lowpart_or_truncate (op_mode
,
8945 force_to_mode (XEXP (x
, 0), mode
, mask
,
8947 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8948 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8952 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8953 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8954 which is equal to STORE_FLAG_VALUE. */
8955 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8956 && XEXP (x
, 1) == const0_rtx
8957 && GET_MODE (XEXP (x
, 0)) == mode
8958 && pow2p_hwi (nonzero_bits (XEXP (x
, 0), mode
))
8959 && (nonzero_bits (XEXP (x
, 0), mode
)
8960 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8961 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8966 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8967 written in a narrower mode. We play it safe and do not do so. */
8969 op0
= gen_lowpart_or_truncate (GET_MODE (x
),
8970 force_to_mode (XEXP (x
, 1), mode
,
8971 mask
, next_select
));
8972 op1
= gen_lowpart_or_truncate (GET_MODE (x
),
8973 force_to_mode (XEXP (x
, 2), mode
,
8974 mask
, next_select
));
8975 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
8976 x
= simplify_gen_ternary (IF_THEN_ELSE
, GET_MODE (x
),
8977 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
8985 /* Ensure we return a value of the proper mode. */
8986 return gen_lowpart_or_truncate (mode
, x
);
8989 /* Return nonzero if X is an expression that has one of two values depending on
8990 whether some other value is zero or nonzero. In that case, we return the
8991 value that is being tested, *PTRUE is set to the value if the rtx being
8992 returned has a nonzero value, and *PFALSE is set to the other alternative.
8994 If we return zero, we set *PTRUE and *PFALSE to X. */
8997 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
8999 machine_mode mode
= GET_MODE (x
);
9000 enum rtx_code code
= GET_CODE (x
);
9001 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
9002 unsigned HOST_WIDE_INT nz
;
9004 /* If we are comparing a value against zero, we are done. */
9005 if ((code
== NE
|| code
== EQ
)
9006 && XEXP (x
, 1) == const0_rtx
)
9008 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
9009 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
9013 /* If this is a unary operation whose operand has one of two values, apply
9014 our opcode to compute those values. */
9015 else if (UNARY_P (x
)
9016 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
9018 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
9019 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
9020 GET_MODE (XEXP (x
, 0)));
9024 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9025 make can't possibly match and would suppress other optimizations. */
9026 else if (code
== COMPARE
)
9029 /* If this is a binary operation, see if either side has only one of two
9030 values. If either one does or if both do and they are conditional on
9031 the same value, compute the new true and false values. */
9032 else if (BINARY_P (x
))
9034 cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
);
9035 cond1
= if_then_else_cond (XEXP (x
, 1), &true1
, &false1
);
9037 if ((cond0
!= 0 || cond1
!= 0)
9038 && ! (cond0
!= 0 && cond1
!= 0 && ! rtx_equal_p (cond0
, cond1
)))
9040 /* If if_then_else_cond returned zero, then true/false are the
9041 same rtl. We must copy one of them to prevent invalid rtl
9044 true0
= copy_rtx (true0
);
9045 else if (cond1
== 0)
9046 true1
= copy_rtx (true1
);
9048 if (COMPARISON_P (x
))
9050 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
9052 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
9057 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
9058 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
9061 return cond0
? cond0
: cond1
;
9064 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9065 operands is zero when the other is nonzero, and vice-versa,
9066 and STORE_FLAG_VALUE is 1 or -1. */
9068 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9069 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
9071 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9073 rtx op0
= XEXP (XEXP (x
, 0), 1);
9074 rtx op1
= XEXP (XEXP (x
, 1), 1);
9076 cond0
= XEXP (XEXP (x
, 0), 0);
9077 cond1
= XEXP (XEXP (x
, 1), 0);
9079 if (COMPARISON_P (cond0
)
9080 && COMPARISON_P (cond1
)
9081 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9082 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9083 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9084 || ((swap_condition (GET_CODE (cond0
))
9085 == reversed_comparison_code (cond1
, NULL
))
9086 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9087 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9088 && ! side_effects_p (x
))
9090 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
9091 *pfalse
= simplify_gen_binary (MULT
, mode
,
9093 ? simplify_gen_unary (NEG
, mode
,
9101 /* Similarly for MULT, AND and UMIN, except that for these the result
9103 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
9104 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
9105 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
9107 cond0
= XEXP (XEXP (x
, 0), 0);
9108 cond1
= XEXP (XEXP (x
, 1), 0);
9110 if (COMPARISON_P (cond0
)
9111 && COMPARISON_P (cond1
)
9112 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
9113 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
9114 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
9115 || ((swap_condition (GET_CODE (cond0
))
9116 == reversed_comparison_code (cond1
, NULL
))
9117 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
9118 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
9119 && ! side_effects_p (x
))
9121 *ptrue
= *pfalse
= const0_rtx
;
9127 else if (code
== IF_THEN_ELSE
)
9129 /* If we have IF_THEN_ELSE already, extract the condition and
9130 canonicalize it if it is NE or EQ. */
9131 cond0
= XEXP (x
, 0);
9132 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
9133 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
9134 return XEXP (cond0
, 0);
9135 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
9137 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
9138 return XEXP (cond0
, 0);
9144 /* If X is a SUBREG, we can narrow both the true and false values
9145 if the inner expression, if there is a condition. */
9146 else if (code
== SUBREG
9147 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
9150 true0
= simplify_gen_subreg (mode
, true0
,
9151 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9152 false0
= simplify_gen_subreg (mode
, false0
,
9153 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
9154 if (true0
&& false0
)
9162 /* If X is a constant, this isn't special and will cause confusions
9163 if we treat it as such. Likewise if it is equivalent to a constant. */
9164 else if (CONSTANT_P (x
)
9165 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
9168 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9169 will be least confusing to the rest of the compiler. */
9170 else if (mode
== BImode
)
9172 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
9176 /* If X is known to be either 0 or -1, those are the true and
9177 false values when testing X. */
9178 else if (x
== constm1_rtx
|| x
== const0_rtx
9179 || (mode
!= VOIDmode
&& mode
!= BLKmode
9180 && num_sign_bit_copies (x
, mode
) == GET_MODE_PRECISION (mode
)))
9182 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9186 /* Likewise for 0 or a single bit. */
9187 else if (HWI_COMPUTABLE_MODE_P (mode
)
9188 && pow2p_hwi (nz
= nonzero_bits (x
, mode
)))
9190 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9194 /* Otherwise fail; show no condition with true and false values the same. */
9195 *ptrue
= *pfalse
= x
;
9199 /* Return the value of expression X given the fact that condition COND
9200 is known to be true when applied to REG as its first operand and VAL
9201 as its second. X is known to not be shared and so can be modified in
9204 We only handle the simplest cases, and specifically those cases that
9205 arise with IF_THEN_ELSE expressions. */
9208 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9210 enum rtx_code code
= GET_CODE (x
);
9214 if (side_effects_p (x
))
9217 /* If either operand of the condition is a floating point value,
9218 then we have to avoid collapsing an EQ comparison. */
9220 && rtx_equal_p (x
, reg
)
9221 && ! FLOAT_MODE_P (GET_MODE (x
))
9222 && ! FLOAT_MODE_P (GET_MODE (val
)))
9225 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9228 /* If X is (abs REG) and we know something about REG's relationship
9229 with zero, we may be able to simplify this. */
9231 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9234 case GE
: case GT
: case EQ
:
9237 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9239 GET_MODE (XEXP (x
, 0)));
9244 /* The only other cases we handle are MIN, MAX, and comparisons if the
9245 operands are the same as REG and VAL. */
9247 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9249 if (rtx_equal_p (XEXP (x
, 0), val
))
9251 std::swap (val
, reg
);
9252 cond
= swap_condition (cond
);
9255 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9257 if (COMPARISON_P (x
))
9259 if (comparison_dominates_p (cond
, code
))
9260 return const_true_rtx
;
9262 code
= reversed_comparison_code (x
, NULL
);
9264 && comparison_dominates_p (cond
, code
))
9269 else if (code
== SMAX
|| code
== SMIN
9270 || code
== UMIN
|| code
== UMAX
)
9272 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9274 /* Do not reverse the condition when it is NE or EQ.
9275 This is because we cannot conclude anything about
9276 the value of 'SMAX (x, y)' when x is not equal to y,
9277 but we can when x equals y. */
9278 if ((code
== SMAX
|| code
== UMAX
)
9279 && ! (cond
== EQ
|| cond
== NE
))
9280 cond
= reverse_condition (cond
);
9285 return unsignedp
? x
: XEXP (x
, 1);
9287 return unsignedp
? x
: XEXP (x
, 0);
9289 return unsignedp
? XEXP (x
, 1) : x
;
9291 return unsignedp
? XEXP (x
, 0) : x
;
9298 else if (code
== SUBREG
)
9300 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9301 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9303 if (SUBREG_REG (x
) != r
)
9305 /* We must simplify subreg here, before we lose track of the
9306 original inner_mode. */
9307 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9308 inner_mode
, SUBREG_BYTE (x
));
9312 SUBST (SUBREG_REG (x
), r
);
9317 /* We don't have to handle SIGN_EXTEND here, because even in the
9318 case of replacing something with a modeless CONST_INT, a
9319 CONST_INT is already (supposed to be) a valid sign extension for
9320 its narrower mode, which implies it's already properly
9321 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9322 story is different. */
9323 else if (code
== ZERO_EXTEND
)
9325 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9326 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9328 if (XEXP (x
, 0) != r
)
9330 /* We must simplify the zero_extend here, before we lose
9331 track of the original inner_mode. */
9332 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9337 SUBST (XEXP (x
, 0), r
);
9343 fmt
= GET_RTX_FORMAT (code
);
9344 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9347 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9348 else if (fmt
[i
] == 'E')
9349 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9350 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9357 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9358 assignment as a field assignment. */
9361 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9363 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9365 if (GET_MODE_SIZE (GET_MODE (x
)) > GET_MODE_SIZE (GET_MODE (y
)))
9367 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9369 /* For big endian, adjust the memory offset. */
9370 if (BYTES_BIG_ENDIAN
)
9371 x
= adjust_address_nv (x
, GET_MODE (y
),
9372 -subreg_lowpart_offset (GET_MODE (x
),
9375 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9378 if (x
== y
|| rtx_equal_p (x
, y
))
9381 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9384 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9385 Note that all SUBREGs of MEM are paradoxical; otherwise they
9386 would have been rewritten. */
9387 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9388 && MEM_P (SUBREG_REG (y
))
9389 && rtx_equal_p (SUBREG_REG (y
),
9390 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9393 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9394 && MEM_P (SUBREG_REG (x
))
9395 && rtx_equal_p (SUBREG_REG (x
),
9396 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9399 /* We used to see if get_last_value of X and Y were the same but that's
9400 not correct. In one direction, we'll cause the assignment to have
9401 the wrong destination and in the case, we'll import a register into this
9402 insn that might have already have been dead. So fail if none of the
9403 above cases are true. */
9407 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9408 Return that assignment if so.
9410 We only handle the most common cases. */
9413 make_field_assignment (rtx x
)
9415 rtx dest
= SET_DEST (x
);
9416 rtx src
= SET_SRC (x
);
9421 unsigned HOST_WIDE_INT len
;
9425 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9426 a clear of a one-bit field. We will have changed it to
9427 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9430 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9431 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9432 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9433 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9435 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9438 return gen_rtx_SET (assign
, const0_rtx
);
9442 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9443 && subreg_lowpart_p (XEXP (src
, 0))
9444 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9445 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9446 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9447 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9448 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9449 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9451 assign
= make_extraction (VOIDmode
, dest
, 0,
9452 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9455 return gen_rtx_SET (assign
, const0_rtx
);
9459 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9461 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9462 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9463 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9465 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9468 return gen_rtx_SET (assign
, const1_rtx
);
9472 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9473 SRC is an AND with all bits of that field set, then we can discard
9475 if (GET_CODE (dest
) == ZERO_EXTRACT
9476 && CONST_INT_P (XEXP (dest
, 1))
9477 && GET_CODE (src
) == AND
9478 && CONST_INT_P (XEXP (src
, 1)))
9480 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9481 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9482 unsigned HOST_WIDE_INT ze_mask
;
9484 if (width
>= HOST_BITS_PER_WIDE_INT
)
9487 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9489 /* Complete overlap. We can remove the source AND. */
9490 if ((and_mask
& ze_mask
) == ze_mask
)
9491 return gen_rtx_SET (dest
, XEXP (src
, 0));
9493 /* Partial overlap. We can reduce the source AND. */
9494 if ((and_mask
& ze_mask
) != and_mask
)
9496 mode
= GET_MODE (src
);
9497 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9498 gen_int_mode (and_mask
& ze_mask
, mode
));
9499 return gen_rtx_SET (dest
, src
);
9503 /* The other case we handle is assignments into a constant-position
9504 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9505 a mask that has all one bits except for a group of zero bits and
9506 OTHER is known to have zeros where C1 has ones, this is such an
9507 assignment. Compute the position and length from C1. Shift OTHER
9508 to the appropriate position, force it to the required mode, and
9509 make the extraction. Check for the AND in both operands. */
9511 /* One or more SUBREGs might obscure the constant-position field
9512 assignment. The first one we are likely to encounter is an outer
9513 narrowing SUBREG, which we can just strip for the purposes of
9514 identifying the constant-field assignment. */
9515 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
))
9516 src
= SUBREG_REG (src
);
9518 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9521 rhs
= expand_compound_operation (XEXP (src
, 0));
9522 lhs
= expand_compound_operation (XEXP (src
, 1));
9524 if (GET_CODE (rhs
) == AND
9525 && CONST_INT_P (XEXP (rhs
, 1))
9526 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9527 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9528 /* The second SUBREG that might get in the way is a paradoxical
9529 SUBREG around the first operand of the AND. We want to
9530 pretend the operand is as wide as the destination here. We
9531 do this by adjusting the MEM to wider mode for the sole
9532 purpose of the call to rtx_equal_for_field_assignment_p. Also
9533 note this trick only works for MEMs. */
9534 else if (GET_CODE (rhs
) == AND
9535 && paradoxical_subreg_p (XEXP (rhs
, 0))
9536 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9537 && CONST_INT_P (XEXP (rhs
, 1))
9538 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9540 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9541 else if (GET_CODE (lhs
) == AND
9542 && CONST_INT_P (XEXP (lhs
, 1))
9543 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9544 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9545 /* The second SUBREG that might get in the way is a paradoxical
9546 SUBREG around the first operand of the AND. We want to
9547 pretend the operand is as wide as the destination here. We
9548 do this by adjusting the MEM to wider mode for the sole
9549 purpose of the call to rtx_equal_for_field_assignment_p. Also
9550 note this trick only works for MEMs. */
9551 else if (GET_CODE (lhs
) == AND
9552 && paradoxical_subreg_p (XEXP (lhs
, 0))
9553 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9554 && CONST_INT_P (XEXP (lhs
, 1))
9555 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9557 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9561 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (GET_MODE (dest
)), &len
);
9562 if (pos
< 0 || pos
+ len
> GET_MODE_PRECISION (GET_MODE (dest
))
9563 || GET_MODE_PRECISION (GET_MODE (dest
)) > HOST_BITS_PER_WIDE_INT
9564 || (c1
& nonzero_bits (other
, GET_MODE (dest
))) != 0)
9567 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9571 /* The mode to use for the source is the mode of the assignment, or of
9572 what is inside a possible STRICT_LOW_PART. */
9573 mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9574 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9576 /* Shift OTHER right POS places and make it the source, restricting it
9577 to the proper length and mode. */
9579 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9583 src
= force_to_mode (src
, mode
,
9584 GET_MODE_PRECISION (mode
) >= HOST_BITS_PER_WIDE_INT
9586 : (HOST_WIDE_INT_1U
<< len
) - 1,
9589 /* If SRC is masked by an AND that does not make a difference in
9590 the value being stored, strip it. */
9591 if (GET_CODE (assign
) == ZERO_EXTRACT
9592 && CONST_INT_P (XEXP (assign
, 1))
9593 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9594 && GET_CODE (src
) == AND
9595 && CONST_INT_P (XEXP (src
, 1))
9596 && UINTVAL (XEXP (src
, 1))
9597 == (HOST_WIDE_INT_1U
<< INTVAL (XEXP (assign
, 1))) - 1)
9598 src
= XEXP (src
, 0);
9600 return gen_rtx_SET (assign
, src
);
9603 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9607 apply_distributive_law (rtx x
)
9609 enum rtx_code code
= GET_CODE (x
);
9610 enum rtx_code inner_code
;
9611 rtx lhs
, rhs
, other
;
9614 /* Distributivity is not true for floating point as it can change the
9615 value. So we don't do it unless -funsafe-math-optimizations. */
9616 if (FLOAT_MODE_P (GET_MODE (x
))
9617 && ! flag_unsafe_math_optimizations
)
9620 /* The outer operation can only be one of the following: */
9621 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9622 && code
!= PLUS
&& code
!= MINUS
)
9628 /* If either operand is a primitive we can't do anything, so get out
9630 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9633 lhs
= expand_compound_operation (lhs
);
9634 rhs
= expand_compound_operation (rhs
);
9635 inner_code
= GET_CODE (lhs
);
9636 if (inner_code
!= GET_CODE (rhs
))
9639 /* See if the inner and outer operations distribute. */
9646 /* These all distribute except over PLUS. */
9647 if (code
== PLUS
|| code
== MINUS
)
9652 if (code
!= PLUS
&& code
!= MINUS
)
9657 /* This is also a multiply, so it distributes over everything. */
9660 /* This used to handle SUBREG, but this turned out to be counter-
9661 productive, since (subreg (op ...)) usually is not handled by
9662 insn patterns, and this "optimization" therefore transformed
9663 recognizable patterns into unrecognizable ones. Therefore the
9664 SUBREG case was removed from here.
9666 It is possible that distributing SUBREG over arithmetic operations
9667 leads to an intermediate result than can then be optimized further,
9668 e.g. by moving the outer SUBREG to the other side of a SET as done
9669 in simplify_set. This seems to have been the original intent of
9670 handling SUBREGs here.
9672 However, with current GCC this does not appear to actually happen,
9673 at least on major platforms. If some case is found where removing
9674 the SUBREG case here prevents follow-on optimizations, distributing
9675 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9681 /* Set LHS and RHS to the inner operands (A and B in the example
9682 above) and set OTHER to the common operand (C in the example).
9683 There is only one way to do this unless the inner operation is
9685 if (COMMUTATIVE_ARITH_P (lhs
)
9686 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9687 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9688 else if (COMMUTATIVE_ARITH_P (lhs
)
9689 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9690 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9691 else if (COMMUTATIVE_ARITH_P (lhs
)
9692 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9693 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9694 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9695 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9699 /* Form the new inner operation, seeing if it simplifies first. */
9700 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9702 /* There is one exception to the general way of distributing:
9703 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9704 if (code
== XOR
&& inner_code
== IOR
)
9707 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9710 /* We may be able to continuing distributing the result, so call
9711 ourselves recursively on the inner operation before forming the
9712 outer operation, which we return. */
9713 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9714 apply_distributive_law (tem
), other
);
9717 /* See if X is of the form (* (+ A B) C), and if so convert to
9718 (+ (* A C) (* B C)) and try to simplify.
9720 Most of the time, this results in no change. However, if some of
9721 the operands are the same or inverses of each other, simplifications
9724 For example, (and (ior A B) (not B)) can occur as the result of
9725 expanding a bit field assignment. When we apply the distributive
9726 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9727 which then simplifies to (and (A (not B))).
9729 Note that no checks happen on the validity of applying the inverse
9730 distributive law. This is pointless since we can do it in the
9731 few places where this routine is called.
9733 N is the index of the term that is decomposed (the arithmetic operation,
9734 i.e. (+ A B) in the first example above). !N is the index of the term that
9735 is distributed, i.e. of C in the first example above. */
9737 distribute_and_simplify_rtx (rtx x
, int n
)
9740 enum rtx_code outer_code
, inner_code
;
9741 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9743 /* Distributivity is not true for floating point as it can change the
9744 value. So we don't do it unless -funsafe-math-optimizations. */
9745 if (FLOAT_MODE_P (GET_MODE (x
))
9746 && ! flag_unsafe_math_optimizations
)
9749 decomposed
= XEXP (x
, n
);
9750 if (!ARITHMETIC_P (decomposed
))
9753 mode
= GET_MODE (x
);
9754 outer_code
= GET_CODE (x
);
9755 distributed
= XEXP (x
, !n
);
9757 inner_code
= GET_CODE (decomposed
);
9758 inner_op0
= XEXP (decomposed
, 0);
9759 inner_op1
= XEXP (decomposed
, 1);
9761 /* Special case (and (xor B C) (not A)), which is equivalent to
9762 (xor (ior A B) (ior A C)) */
9763 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9765 distributed
= XEXP (distributed
, 0);
9771 /* Distribute the second term. */
9772 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9773 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9777 /* Distribute the first term. */
9778 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9779 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9782 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9784 if (GET_CODE (tmp
) != outer_code
9785 && (set_src_cost (tmp
, mode
, optimize_this_for_speed_p
)
9786 < set_src_cost (x
, mode
, optimize_this_for_speed_p
)))
9792 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9793 in MODE. Return an equivalent form, if different from (and VAROP
9794 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9797 simplify_and_const_int_1 (machine_mode mode
, rtx varop
,
9798 unsigned HOST_WIDE_INT constop
)
9800 unsigned HOST_WIDE_INT nonzero
;
9801 unsigned HOST_WIDE_INT orig_constop
;
9806 orig_constop
= constop
;
9807 if (GET_CODE (varop
) == CLOBBER
)
9810 /* Simplify VAROP knowing that we will be only looking at some of the
9813 Note by passing in CONSTOP, we guarantee that the bits not set in
9814 CONSTOP are not significant and will never be examined. We must
9815 ensure that is the case by explicitly masking out those bits
9816 before returning. */
9817 varop
= force_to_mode (varop
, mode
, constop
, 0);
9819 /* If VAROP is a CLOBBER, we will fail so return it. */
9820 if (GET_CODE (varop
) == CLOBBER
)
9823 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9824 to VAROP and return the new constant. */
9825 if (CONST_INT_P (varop
))
9826 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9828 /* See what bits may be nonzero in VAROP. Unlike the general case of
9829 a call to nonzero_bits, here we don't care about bits outside
9832 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9834 /* Turn off all bits in the constant that are known to already be zero.
9835 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9836 which is tested below. */
9840 /* If we don't have any bits left, return zero. */
9844 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9845 a power of two, we can replace this with an ASHIFT. */
9846 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9847 && (i
= exact_log2 (constop
)) >= 0)
9848 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9850 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9851 or XOR, then try to apply the distributive law. This may eliminate
9852 operations if either branch can be simplified because of the AND.
9853 It may also make some cases more complex, but those cases probably
9854 won't match a pattern either with or without this. */
9856 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9860 apply_distributive_law
9861 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9862 simplify_and_const_int (NULL_RTX
,
9866 simplify_and_const_int (NULL_RTX
,
9871 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9872 the AND and see if one of the operands simplifies to zero. If so, we
9873 may eliminate it. */
9875 if (GET_CODE (varop
) == PLUS
9876 && pow2p_hwi (constop
+ 1))
9880 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9881 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9882 if (o0
== const0_rtx
)
9884 if (o1
== const0_rtx
)
9888 /* Make a SUBREG if necessary. If we can't make it, fail. */
9889 varop
= gen_lowpart (mode
, varop
);
9890 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9893 /* If we are only masking insignificant bits, return VAROP. */
9894 if (constop
== nonzero
)
9897 if (varop
== orig_varop
&& constop
== orig_constop
)
9900 /* Otherwise, return an AND. */
9901 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9905 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9908 Return an equivalent form, if different from X. Otherwise, return X. If
9909 X is zero, we are to always construct the equivalent form. */
9912 simplify_and_const_int (rtx x
, machine_mode mode
, rtx varop
,
9913 unsigned HOST_WIDE_INT constop
)
9915 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9920 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9921 gen_int_mode (constop
, mode
));
9922 if (GET_MODE (x
) != mode
)
9923 x
= gen_lowpart (mode
, x
);
9927 /* Given a REG, X, compute which bits in X can be nonzero.
9928 We don't care about bits outside of those defined in MODE.
9930 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9931 a shift, AND, or zero_extract, we can do better. */
9934 reg_nonzero_bits_for_combine (const_rtx x
, machine_mode mode
,
9935 const_rtx known_x ATTRIBUTE_UNUSED
,
9936 machine_mode known_mode ATTRIBUTE_UNUSED
,
9937 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
9938 unsigned HOST_WIDE_INT
*nonzero
)
9943 /* If X is a register whose nonzero bits value is current, use it.
9944 Otherwise, if X is a register whose value we can find, use that
9945 value. Otherwise, use the previously-computed global nonzero bits
9946 for this register. */
9948 rsp
= ®_stat
[REGNO (x
)];
9949 if (rsp
->last_set_value
!= 0
9950 && (rsp
->last_set_mode
== mode
9951 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
9952 && GET_MODE_CLASS (mode
) == MODE_INT
))
9953 && ((rsp
->last_set_label
>= label_tick_ebb_start
9954 && rsp
->last_set_label
< label_tick
)
9955 || (rsp
->last_set_label
== label_tick
9956 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9957 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9958 && REGNO (x
) < reg_n_sets_max
9959 && REG_N_SETS (REGNO (x
)) == 1
9961 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9964 /* Note that, even if the precision of last_set_mode is lower than that
9965 of mode, record_value_for_reg invoked nonzero_bits on the register
9966 with nonzero_bits_mode (because last_set_mode is necessarily integral
9967 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
9968 are all valid, hence in mode too since nonzero_bits_mode is defined
9969 to the largest HWI_COMPUTABLE_MODE_P mode. */
9970 *nonzero
&= rsp
->last_set_nonzero_bits
;
9974 tem
= get_last_value (x
);
9977 if (SHORT_IMMEDIATES_SIGN_EXTEND
)
9978 tem
= sign_extend_short_imm (tem
, GET_MODE (x
),
9979 GET_MODE_PRECISION (mode
));
9984 if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
9986 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
9988 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
))
9989 /* We don't know anything about the upper bits. */
9990 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
9998 /* Return the number of bits at the high-order end of X that are known to
9999 be equal to the sign bit. X will be used in mode MODE; if MODE is
10000 VOIDmode, X will be used in its own mode. The returned value will always
10001 be between 1 and the number of bits in MODE. */
10004 reg_num_sign_bit_copies_for_combine (const_rtx x
, machine_mode mode
,
10005 const_rtx known_x ATTRIBUTE_UNUSED
,
10006 machine_mode known_mode
10008 unsigned int known_ret ATTRIBUTE_UNUSED
,
10009 unsigned int *result
)
10012 reg_stat_type
*rsp
;
10014 rsp
= ®_stat
[REGNO (x
)];
10015 if (rsp
->last_set_value
!= 0
10016 && rsp
->last_set_mode
== mode
10017 && ((rsp
->last_set_label
>= label_tick_ebb_start
10018 && rsp
->last_set_label
< label_tick
)
10019 || (rsp
->last_set_label
== label_tick
10020 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
10021 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
10022 && REGNO (x
) < reg_n_sets_max
10023 && REG_N_SETS (REGNO (x
)) == 1
10024 && !REGNO_REG_SET_P
10025 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
10028 *result
= rsp
->last_set_sign_bit_copies
;
10032 tem
= get_last_value (x
);
10036 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
10037 && GET_MODE_PRECISION (GET_MODE (x
)) == GET_MODE_PRECISION (mode
))
10038 *result
= rsp
->sign_bit_copies
;
10043 /* Return the number of "extended" bits there are in X, when interpreted
10044 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10045 unsigned quantities, this is the number of high-order zero bits.
10046 For signed quantities, this is the number of copies of the sign bit
10047 minus 1. In both case, this function returns the number of "spare"
10048 bits. For example, if two quantities for which this function returns
10049 at least 1 are added, the addition is known not to overflow.
10051 This function will always return 0 unless called during combine, which
10052 implies that it must be called from a define_split. */
10055 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
10057 if (nonzero_sign_valid
== 0)
10061 ? (HWI_COMPUTABLE_MODE_P (mode
)
10062 ? (unsigned int) (GET_MODE_PRECISION (mode
) - 1
10063 - floor_log2 (nonzero_bits (x
, mode
)))
10065 : num_sign_bit_copies (x
, mode
) - 1);
10068 /* This function is called from `simplify_shift_const' to merge two
10069 outer operations. Specifically, we have already found that we need
10070 to perform operation *POP0 with constant *PCONST0 at the outermost
10071 position. We would now like to also perform OP1 with constant CONST1
10072 (with *POP0 being done last).
10074 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10075 the resulting operation. *PCOMP_P is set to 1 if we would need to
10076 complement the innermost operand, otherwise it is unchanged.
10078 MODE is the mode in which the operation will be done. No bits outside
10079 the width of this mode matter. It is assumed that the width of this mode
10080 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10082 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10083 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10084 result is simply *PCONST0.
10086 If the resulting operation cannot be expressed as one operation, we
10087 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10090 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
10092 enum rtx_code op0
= *pop0
;
10093 HOST_WIDE_INT const0
= *pconst0
;
10095 const0
&= GET_MODE_MASK (mode
);
10096 const1
&= GET_MODE_MASK (mode
);
10098 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10102 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10105 if (op1
== UNKNOWN
|| op0
== SET
)
10108 else if (op0
== UNKNOWN
)
10109 op0
= op1
, const0
= const1
;
10111 else if (op0
== op1
)
10135 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10136 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
10139 /* If the two constants aren't the same, we can't do anything. The
10140 remaining six cases can all be done. */
10141 else if (const0
!= const1
)
10149 /* (a & b) | b == b */
10151 else /* op1 == XOR */
10152 /* (a ^ b) | b == a | b */
10158 /* (a & b) ^ b == (~a) & b */
10159 op0
= AND
, *pcomp_p
= 1;
10160 else /* op1 == IOR */
10161 /* (a | b) ^ b == a & ~b */
10162 op0
= AND
, const0
= ~const0
;
10167 /* (a | b) & b == b */
10169 else /* op1 == XOR */
10170 /* (a ^ b) & b) == (~a) & b */
10177 /* Check for NO-OP cases. */
10178 const0
&= GET_MODE_MASK (mode
);
10180 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10182 else if (const0
== 0 && op0
== AND
)
10184 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10190 /* ??? Slightly redundant with the above mask, but not entirely.
10191 Moving this above means we'd have to sign-extend the mode mask
10192 for the final test. */
10193 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10194 *pconst0
= trunc_int_for_mode (const0
, mode
);
10199 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10200 the shift in. The original shift operation CODE is performed on OP in
10201 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10202 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10203 result of the shift is subject to operation OUTER_CODE with operand
10206 static machine_mode
10207 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10208 machine_mode orig_mode
, machine_mode mode
,
10209 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10211 if (orig_mode
== mode
)
10213 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10215 /* In general we can't perform in wider mode for right shift and rotate. */
10219 /* We can still widen if the bits brought in from the left are identical
10220 to the sign bit of ORIG_MODE. */
10221 if (num_sign_bit_copies (op
, mode
)
10222 > (unsigned) (GET_MODE_PRECISION (mode
)
10223 - GET_MODE_PRECISION (orig_mode
)))
10228 /* Similarly here but with zero bits. */
10229 if (HWI_COMPUTABLE_MODE_P (mode
)
10230 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10233 /* We can also widen if the bits brought in will be masked off. This
10234 operation is performed in ORIG_MODE. */
10235 if (outer_code
== AND
)
10237 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10240 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10249 gcc_unreachable ();
10256 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10257 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10258 if we cannot simplify it. Otherwise, return a simplified value.
10260 The shift is normally computed in the widest mode we find in VAROP, as
10261 long as it isn't a different number of words than RESULT_MODE. Exceptions
10262 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10265 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10266 rtx varop
, int orig_count
)
10268 enum rtx_code orig_code
= code
;
10269 rtx orig_varop
= varop
;
10271 machine_mode mode
= result_mode
;
10272 machine_mode shift_mode
, tmode
;
10273 unsigned int mode_words
10274 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10275 /* We form (outer_op (code varop count) (outer_const)). */
10276 enum rtx_code outer_op
= UNKNOWN
;
10277 HOST_WIDE_INT outer_const
= 0;
10278 int complement_p
= 0;
10281 /* Make sure and truncate the "natural" shift on the way in. We don't
10282 want to do this inside the loop as it makes it more difficult to
10284 if (SHIFT_COUNT_TRUNCATED
)
10285 orig_count
&= GET_MODE_UNIT_BITSIZE (mode
) - 1;
10287 /* If we were given an invalid count, don't do anything except exactly
10288 what was requested. */
10290 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_UNIT_PRECISION (mode
))
10293 count
= orig_count
;
10295 /* Unless one of the branches of the `if' in this loop does a `continue',
10296 we will `break' the loop after the `if'. */
10300 /* If we have an operand of (clobber (const_int 0)), fail. */
10301 if (GET_CODE (varop
) == CLOBBER
)
10304 /* Convert ROTATERT to ROTATE. */
10305 if (code
== ROTATERT
)
10307 unsigned int bitsize
= GET_MODE_UNIT_PRECISION (result_mode
);
10309 count
= bitsize
- count
;
10312 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
10313 mode
, outer_op
, outer_const
);
10314 machine_mode shift_unit_mode
= GET_MODE_INNER (shift_mode
);
10316 /* Handle cases where the count is greater than the size of the mode
10317 minus 1. For ASHIFT, use the size minus one as the count (this can
10318 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10319 take the count modulo the size. For other shifts, the result is
10322 Since these shifts are being produced by the compiler by combining
10323 multiple operations, each of which are defined, we know what the
10324 result is supposed to be. */
10326 if (count
> (GET_MODE_PRECISION (shift_unit_mode
) - 1))
10328 if (code
== ASHIFTRT
)
10329 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10330 else if (code
== ROTATE
|| code
== ROTATERT
)
10331 count
%= GET_MODE_PRECISION (shift_unit_mode
);
10334 /* We can't simply return zero because there may be an
10336 varop
= const0_rtx
;
10342 /* If we discovered we had to complement VAROP, leave. Making a NOT
10343 here would cause an infinite loop. */
10347 if (shift_mode
== shift_unit_mode
)
10349 /* An arithmetic right shift of a quantity known to be -1 or 0
10351 if (code
== ASHIFTRT
10352 && (num_sign_bit_copies (varop
, shift_unit_mode
)
10353 == GET_MODE_PRECISION (shift_unit_mode
)))
10359 /* If we are doing an arithmetic right shift and discarding all but
10360 the sign bit copies, this is equivalent to doing a shift by the
10361 bitsize minus one. Convert it into that shift because it will
10362 often allow other simplifications. */
10364 if (code
== ASHIFTRT
10365 && (count
+ num_sign_bit_copies (varop
, shift_unit_mode
)
10366 >= GET_MODE_PRECISION (shift_unit_mode
)))
10367 count
= GET_MODE_PRECISION (shift_unit_mode
) - 1;
10369 /* We simplify the tests below and elsewhere by converting
10370 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10371 `make_compound_operation' will convert it to an ASHIFTRT for
10372 those machines (such as VAX) that don't have an LSHIFTRT. */
10373 if (code
== ASHIFTRT
10374 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10375 && val_signbit_known_clear_p (shift_unit_mode
,
10376 nonzero_bits (varop
,
10380 if (((code
== LSHIFTRT
10381 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10382 && !(nonzero_bits (varop
, shift_unit_mode
) >> count
))
10384 && HWI_COMPUTABLE_MODE_P (shift_unit_mode
)
10385 && !((nonzero_bits (varop
, shift_unit_mode
) << count
)
10386 & GET_MODE_MASK (shift_unit_mode
))))
10387 && !side_effects_p (varop
))
10388 varop
= const0_rtx
;
10391 switch (GET_CODE (varop
))
10397 new_rtx
= expand_compound_operation (varop
);
10398 if (new_rtx
!= varop
)
10406 /* The following rules apply only to scalars. */
10407 if (shift_mode
!= shift_unit_mode
)
10410 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10411 minus the width of a smaller mode, we can do this with a
10412 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10413 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10414 && ! mode_dependent_address_p (XEXP (varop
, 0),
10415 MEM_ADDR_SPACE (varop
))
10416 && ! MEM_VOLATILE_P (varop
)
10417 && (tmode
= mode_for_size (GET_MODE_BITSIZE (mode
) - count
,
10418 MODE_INT
, 1)) != BLKmode
)
10420 new_rtx
= adjust_address_nv (varop
, tmode
,
10421 BYTES_BIG_ENDIAN
? 0
10422 : count
/ BITS_PER_UNIT
);
10424 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10425 : ZERO_EXTEND
, mode
, new_rtx
);
10432 /* The following rules apply only to scalars. */
10433 if (shift_mode
!= shift_unit_mode
)
10436 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10437 the same number of words as what we've seen so far. Then store
10438 the widest mode in MODE. */
10439 if (subreg_lowpart_p (varop
)
10440 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10441 > GET_MODE_SIZE (GET_MODE (varop
)))
10442 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10443 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10445 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
10446 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop
))) == MODE_INT
)
10448 varop
= SUBREG_REG (varop
);
10449 if (GET_MODE_SIZE (GET_MODE (varop
)) > GET_MODE_SIZE (mode
))
10450 mode
= GET_MODE (varop
);
10456 /* Some machines use MULT instead of ASHIFT because MULT
10457 is cheaper. But it is still better on those machines to
10458 merge two shifts into one. */
10459 if (CONST_INT_P (XEXP (varop
, 1))
10460 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10463 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10465 GEN_INT (exact_log2 (
10466 UINTVAL (XEXP (varop
, 1)))));
10472 /* Similar, for when divides are cheaper. */
10473 if (CONST_INT_P (XEXP (varop
, 1))
10474 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10477 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10479 GEN_INT (exact_log2 (
10480 UINTVAL (XEXP (varop
, 1)))));
10486 /* If we are extracting just the sign bit of an arithmetic
10487 right shift, that shift is not needed. However, the sign
10488 bit of a wider mode may be different from what would be
10489 interpreted as the sign bit in a narrower mode, so, if
10490 the result is narrower, don't discard the shift. */
10491 if (code
== LSHIFTRT
10492 && count
== (GET_MODE_UNIT_BITSIZE (result_mode
) - 1)
10493 && (GET_MODE_UNIT_BITSIZE (result_mode
)
10494 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop
))))
10496 varop
= XEXP (varop
, 0);
10505 /* The following rules apply only to scalars. */
10506 if (shift_mode
!= shift_unit_mode
)
10509 /* Here we have two nested shifts. The result is usually the
10510 AND of a new shift with a mask. We compute the result below. */
10511 if (CONST_INT_P (XEXP (varop
, 1))
10512 && INTVAL (XEXP (varop
, 1)) >= 0
10513 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (GET_MODE (varop
))
10514 && HWI_COMPUTABLE_MODE_P (result_mode
)
10515 && HWI_COMPUTABLE_MODE_P (mode
))
10517 enum rtx_code first_code
= GET_CODE (varop
);
10518 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10519 unsigned HOST_WIDE_INT mask
;
10522 /* We have one common special case. We can't do any merging if
10523 the inner code is an ASHIFTRT of a smaller mode. However, if
10524 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10525 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10526 we can convert it to
10527 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10528 This simplifies certain SIGN_EXTEND operations. */
10529 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10530 && count
== (GET_MODE_PRECISION (result_mode
)
10531 - GET_MODE_PRECISION (GET_MODE (varop
))))
10533 /* C3 has the low-order C1 bits zero. */
10535 mask
= GET_MODE_MASK (mode
)
10536 & ~((HOST_WIDE_INT_1U
<< first_count
) - 1);
10538 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10539 XEXP (varop
, 0), mask
);
10540 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10542 count
= first_count
;
10547 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10548 than C1 high-order bits equal to the sign bit, we can convert
10549 this to either an ASHIFT or an ASHIFTRT depending on the
10552 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10554 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10555 && GET_MODE (varop
) == shift_mode
10556 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10559 varop
= XEXP (varop
, 0);
10560 count
-= first_count
;
10570 /* There are some cases we can't do. If CODE is ASHIFTRT,
10571 we can only do this if FIRST_CODE is also ASHIFTRT.
10573 We can't do the case when CODE is ROTATE and FIRST_CODE is
10576 If the mode of this shift is not the mode of the outer shift,
10577 we can't do this if either shift is a right shift or ROTATE.
10579 Finally, we can't do any of these if the mode is too wide
10580 unless the codes are the same.
10582 Handle the case where the shift codes are the same
10585 if (code
== first_code
)
10587 if (GET_MODE (varop
) != result_mode
10588 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10589 || code
== ROTATE
))
10592 count
+= first_count
;
10593 varop
= XEXP (varop
, 0);
10597 if (code
== ASHIFTRT
10598 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10599 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
10600 || (GET_MODE (varop
) != result_mode
10601 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10602 || first_code
== ROTATE
10603 || code
== ROTATE
)))
10606 /* To compute the mask to apply after the shift, shift the
10607 nonzero bits of the inner shift the same way the
10608 outer shift will. */
10610 mask_rtx
= gen_int_mode (nonzero_bits (varop
, GET_MODE (varop
)),
10614 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10617 /* Give up if we can't compute an outer operation to use. */
10619 || !CONST_INT_P (mask_rtx
)
10620 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10622 result_mode
, &complement_p
))
10625 /* If the shifts are in the same direction, we add the
10626 counts. Otherwise, we subtract them. */
10627 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10628 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10629 count
+= first_count
;
10631 count
-= first_count
;
10633 /* If COUNT is positive, the new shift is usually CODE,
10634 except for the two exceptions below, in which case it is
10635 FIRST_CODE. If the count is negative, FIRST_CODE should
10638 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10639 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10641 else if (count
< 0)
10642 code
= first_code
, count
= -count
;
10644 varop
= XEXP (varop
, 0);
10648 /* If we have (A << B << C) for any shift, we can convert this to
10649 (A << C << B). This wins if A is a constant. Only try this if
10650 B is not a constant. */
10652 else if (GET_CODE (varop
) == code
10653 && CONST_INT_P (XEXP (varop
, 0))
10654 && !CONST_INT_P (XEXP (varop
, 1)))
10656 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10657 sure the result will be masked. See PR70222. */
10658 if (code
== LSHIFTRT
10659 && mode
!= result_mode
10660 && !merge_outer_ops (&outer_op
, &outer_const
, AND
,
10661 GET_MODE_MASK (result_mode
)
10662 >> orig_count
, result_mode
,
10665 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10666 up outer sign extension (often left and right shift) is
10667 hardly more efficient than the original. See PR70429. */
10668 if (code
== ASHIFTRT
&& mode
!= result_mode
)
10671 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10674 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10681 /* The following rules apply only to scalars. */
10682 if (shift_mode
!= shift_unit_mode
)
10685 /* Make this fit the case below. */
10686 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10692 /* The following rules apply only to scalars. */
10693 if (shift_mode
!= shift_unit_mode
)
10696 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10697 with C the size of VAROP - 1 and the shift is logical if
10698 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10699 we have an (le X 0) operation. If we have an arithmetic shift
10700 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10701 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10703 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10704 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10705 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10706 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10707 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10708 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10711 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10714 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10715 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10720 /* If we have (shift (logical)), move the logical to the outside
10721 to allow it to possibly combine with another logical and the
10722 shift to combine with another shift. This also canonicalizes to
10723 what a ZERO_EXTRACT looks like. Also, some machines have
10724 (and (shift)) insns. */
10726 if (CONST_INT_P (XEXP (varop
, 1))
10727 /* We can't do this if we have (ashiftrt (xor)) and the
10728 constant has its sign bit set in shift_mode with shift_mode
10729 wider than result_mode. */
10730 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10731 && result_mode
!= shift_mode
10732 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10734 && (new_rtx
= simplify_const_binary_operation
10735 (code
, result_mode
,
10736 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10737 GEN_INT (count
))) != 0
10738 && CONST_INT_P (new_rtx
)
10739 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10740 INTVAL (new_rtx
), result_mode
, &complement_p
))
10742 varop
= XEXP (varop
, 0);
10746 /* If we can't do that, try to simplify the shift in each arm of the
10747 logical expression, make a new logical expression, and apply
10748 the inverse distributive law. This also can't be done for
10749 (ashiftrt (xor)) where we've widened the shift and the constant
10750 changes the sign bit. */
10751 if (CONST_INT_P (XEXP (varop
, 1))
10752 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10753 && result_mode
!= shift_mode
10754 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10757 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10758 XEXP (varop
, 0), count
);
10759 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10760 XEXP (varop
, 1), count
);
10762 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10764 varop
= apply_distributive_law (varop
);
10772 /* The following rules apply only to scalars. */
10773 if (shift_mode
!= shift_unit_mode
)
10776 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10777 says that the sign bit can be tested, FOO has mode MODE, C is
10778 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10779 that may be nonzero. */
10780 if (code
== LSHIFTRT
10781 && XEXP (varop
, 1) == const0_rtx
10782 && GET_MODE (XEXP (varop
, 0)) == result_mode
10783 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10784 && HWI_COMPUTABLE_MODE_P (result_mode
)
10785 && STORE_FLAG_VALUE
== -1
10786 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10787 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10790 varop
= XEXP (varop
, 0);
10797 /* The following rules apply only to scalars. */
10798 if (shift_mode
!= shift_unit_mode
)
10801 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10802 than the number of bits in the mode is equivalent to A. */
10803 if (code
== LSHIFTRT
10804 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10805 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10807 varop
= XEXP (varop
, 0);
10812 /* NEG commutes with ASHIFT since it is multiplication. Move the
10813 NEG outside to allow shifts to combine. */
10815 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10818 varop
= XEXP (varop
, 0);
10824 /* The following rules apply only to scalars. */
10825 if (shift_mode
!= shift_unit_mode
)
10828 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10829 is one less than the number of bits in the mode is
10830 equivalent to (xor A 1). */
10831 if (code
== LSHIFTRT
10832 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10833 && XEXP (varop
, 1) == constm1_rtx
10834 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10835 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10839 varop
= XEXP (varop
, 0);
10843 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10844 that might be nonzero in BAR are those being shifted out and those
10845 bits are known zero in FOO, we can replace the PLUS with FOO.
10846 Similarly in the other operand order. This code occurs when
10847 we are computing the size of a variable-size array. */
10849 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10850 && count
< HOST_BITS_PER_WIDE_INT
10851 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10852 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10853 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10855 varop
= XEXP (varop
, 0);
10858 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10859 && count
< HOST_BITS_PER_WIDE_INT
10860 && HWI_COMPUTABLE_MODE_P (result_mode
)
10861 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10863 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10864 & nonzero_bits (XEXP (varop
, 1),
10867 varop
= XEXP (varop
, 1);
10871 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10873 && CONST_INT_P (XEXP (varop
, 1))
10874 && (new_rtx
= simplify_const_binary_operation
10875 (ASHIFT
, result_mode
,
10876 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10877 GEN_INT (count
))) != 0
10878 && CONST_INT_P (new_rtx
)
10879 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10880 INTVAL (new_rtx
), result_mode
, &complement_p
))
10882 varop
= XEXP (varop
, 0);
10886 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10887 signbit', and attempt to change the PLUS to an XOR and move it to
10888 the outer operation as is done above in the AND/IOR/XOR case
10889 leg for shift(logical). See details in logical handling above
10890 for reasoning in doing so. */
10891 if (code
== LSHIFTRT
10892 && CONST_INT_P (XEXP (varop
, 1))
10893 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10894 && (new_rtx
= simplify_const_binary_operation
10895 (code
, result_mode
,
10896 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10897 GEN_INT (count
))) != 0
10898 && CONST_INT_P (new_rtx
)
10899 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10900 INTVAL (new_rtx
), result_mode
, &complement_p
))
10902 varop
= XEXP (varop
, 0);
10909 /* The following rules apply only to scalars. */
10910 if (shift_mode
!= shift_unit_mode
)
10913 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10914 with C the size of VAROP - 1 and the shift is logical if
10915 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10916 we have a (gt X 0) operation. If the shift is arithmetic with
10917 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10918 we have a (neg (gt X 0)) operation. */
10920 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10921 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10922 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10923 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10924 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10925 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10926 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10929 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
10932 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10933 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10940 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10941 if the truncate does not affect the value. */
10942 if (code
== LSHIFTRT
10943 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
10944 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10945 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
10946 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop
, 0)))
10947 - GET_MODE_UNIT_PRECISION (GET_MODE (varop
)))))
10949 rtx varop_inner
= XEXP (varop
, 0);
10952 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
10953 XEXP (varop_inner
, 0),
10955 (count
+ INTVAL (XEXP (varop_inner
, 1))));
10956 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
10969 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
10970 outer_op
, outer_const
);
10972 /* We have now finished analyzing the shift. The result should be
10973 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10974 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10975 to the result of the shift. OUTER_CONST is the relevant constant,
10976 but we must turn off all bits turned off in the shift. */
10978 if (outer_op
== UNKNOWN
10979 && orig_code
== code
&& orig_count
== count
10980 && varop
== orig_varop
10981 && shift_mode
== GET_MODE (varop
))
10984 /* Make a SUBREG if necessary. If we can't make it, fail. */
10985 varop
= gen_lowpart (shift_mode
, varop
);
10986 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10989 /* If we have an outer operation and we just made a shift, it is
10990 possible that we could have simplified the shift were it not
10991 for the outer operation. So try to do the simplification
10994 if (outer_op
!= UNKNOWN
)
10995 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
11000 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
11002 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11003 turn off all the bits that the shift would have turned off. */
11004 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
11005 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
11006 GET_MODE_MASK (result_mode
) >> orig_count
);
11008 /* Do the remainder of the processing in RESULT_MODE. */
11009 x
= gen_lowpart_or_truncate (result_mode
, x
);
11011 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11014 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
11016 if (outer_op
!= UNKNOWN
)
11018 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
11019 && GET_MODE_PRECISION (result_mode
) < HOST_BITS_PER_WIDE_INT
)
11020 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
11022 if (outer_op
== AND
)
11023 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
11024 else if (outer_op
== SET
)
11026 /* This means that we have determined that the result is
11027 equivalent to a constant. This should be rare. */
11028 if (!side_effects_p (x
))
11029 x
= GEN_INT (outer_const
);
11031 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
11032 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
11034 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
11035 GEN_INT (outer_const
));
11041 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11042 The result of the shift is RESULT_MODE. If we cannot simplify it,
11043 return X or, if it is NULL, synthesize the expression with
11044 simplify_gen_binary. Otherwise, return a simplified value.
11046 The shift is normally computed in the widest mode we find in VAROP, as
11047 long as it isn't a different number of words than RESULT_MODE. Exceptions
11048 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11051 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
11052 rtx varop
, int count
)
11054 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
11059 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
11060 if (GET_MODE (x
) != result_mode
)
11061 x
= gen_lowpart (result_mode
, x
);
11066 /* A subroutine of recog_for_combine. See there for arguments and
11070 recog_for_combine_1 (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11072 rtx pat
= *pnewpat
;
11073 rtx pat_without_clobbers
;
11074 int insn_code_number
;
11075 int num_clobbers_to_add
= 0;
11077 rtx notes
= NULL_RTX
;
11078 rtx old_notes
, old_pat
;
11081 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11082 we use to indicate that something didn't match. If we find such a
11083 thing, force rejection. */
11084 if (GET_CODE (pat
) == PARALLEL
)
11085 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
11086 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
11087 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
11090 old_pat
= PATTERN (insn
);
11091 old_notes
= REG_NOTES (insn
);
11092 PATTERN (insn
) = pat
;
11093 REG_NOTES (insn
) = NULL_RTX
;
11095 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11096 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11098 if (insn_code_number
< 0)
11099 fputs ("Failed to match this instruction:\n", dump_file
);
11101 fputs ("Successfully matched this instruction:\n", dump_file
);
11102 print_rtl_single (dump_file
, pat
);
11105 /* If it isn't, there is the possibility that we previously had an insn
11106 that clobbered some register as a side effect, but the combined
11107 insn doesn't need to do that. So try once more without the clobbers
11108 unless this represents an ASM insn. */
11110 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
11111 && GET_CODE (pat
) == PARALLEL
)
11115 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
11116 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
11119 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
11123 SUBST_INT (XVECLEN (pat
, 0), pos
);
11126 pat
= XVECEXP (pat
, 0, 0);
11128 PATTERN (insn
) = pat
;
11129 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
11130 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11132 if (insn_code_number
< 0)
11133 fputs ("Failed to match this instruction:\n", dump_file
);
11135 fputs ("Successfully matched this instruction:\n", dump_file
);
11136 print_rtl_single (dump_file
, pat
);
11140 pat_without_clobbers
= pat
;
11142 PATTERN (insn
) = old_pat
;
11143 REG_NOTES (insn
) = old_notes
;
11145 /* Recognize all noop sets, these will be killed by followup pass. */
11146 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
11147 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
11149 /* If we had any clobbers to add, make a new pattern than contains
11150 them. Then check to make sure that all of them are dead. */
11151 if (num_clobbers_to_add
)
11153 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
11154 rtvec_alloc (GET_CODE (pat
) == PARALLEL
11155 ? (XVECLEN (pat
, 0)
11156 + num_clobbers_to_add
)
11157 : num_clobbers_to_add
+ 1));
11159 if (GET_CODE (pat
) == PARALLEL
)
11160 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11161 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
11163 XVECEXP (newpat
, 0, 0) = pat
;
11165 add_clobbers (newpat
, insn_code_number
);
11167 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
11168 i
< XVECLEN (newpat
, 0); i
++)
11170 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
11171 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
11173 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
11175 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
11176 notes
= alloc_reg_note (REG_UNUSED
,
11177 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
11183 if (insn_code_number
>= 0
11184 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
11186 old_pat
= PATTERN (insn
);
11187 old_notes
= REG_NOTES (insn
);
11188 old_icode
= INSN_CODE (insn
);
11189 PATTERN (insn
) = pat
;
11190 REG_NOTES (insn
) = notes
;
11192 /* Allow targets to reject combined insn. */
11193 if (!targetm
.legitimate_combined_insn (insn
))
11195 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
11196 fputs ("Instruction not appropriate for target.",
11199 /* Callers expect recog_for_combine to strip
11200 clobbers from the pattern on failure. */
11201 pat
= pat_without_clobbers
;
11204 insn_code_number
= -1;
11207 PATTERN (insn
) = old_pat
;
11208 REG_NOTES (insn
) = old_notes
;
11209 INSN_CODE (insn
) = old_icode
;
11215 return insn_code_number
;
11218 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11219 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11220 Return whether anything was so changed. */
11223 change_zero_ext (rtx pat
)
11225 bool changed
= false;
11226 rtx
*src
= &SET_SRC (pat
);
11228 subrtx_ptr_iterator::array_type array
;
11229 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11232 machine_mode mode
= GET_MODE (x
);
11235 if (GET_CODE (x
) == ZERO_EXTRACT
11236 && CONST_INT_P (XEXP (x
, 1))
11237 && CONST_INT_P (XEXP (x
, 2))
11238 && GET_MODE (XEXP (x
, 0)) == mode
)
11240 size
= INTVAL (XEXP (x
, 1));
11242 int start
= INTVAL (XEXP (x
, 2));
11243 if (BITS_BIG_ENDIAN
)
11244 start
= GET_MODE_PRECISION (mode
) - size
- start
;
11247 x
= gen_rtx_LSHIFTRT (mode
, XEXP (x
, 0), GEN_INT (start
));
11251 else if (GET_CODE (x
) == ZERO_EXTEND
11252 && SCALAR_INT_MODE_P (mode
)
11253 && GET_CODE (XEXP (x
, 0)) == SUBREG
11254 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x
, 0))))
11255 && !paradoxical_subreg_p (XEXP (x
, 0))
11256 && subreg_lowpart_p (XEXP (x
, 0)))
11258 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11259 x
= SUBREG_REG (XEXP (x
, 0));
11260 if (GET_MODE (x
) != mode
)
11261 x
= gen_lowpart_SUBREG (mode
, x
);
11263 else if (GET_CODE (x
) == ZERO_EXTEND
11264 && SCALAR_INT_MODE_P (mode
)
11265 && REG_P (XEXP (x
, 0))
11266 && HARD_REGISTER_P (XEXP (x
, 0))
11267 && can_change_dest_mode (XEXP (x
, 0), 0, mode
))
11269 size
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
11270 x
= gen_rtx_REG (mode
, REGNO (XEXP (x
, 0)));
11275 wide_int mask
= wi::mask (size
, false, GET_MODE_PRECISION (mode
));
11276 x
= gen_rtx_AND (mode
, x
, immed_wide_int_const (mask
, mode
));
11283 FOR_EACH_SUBRTX_PTR (iter
, array
, src
, NONCONST
)
11284 maybe_swap_commutative_operands (**iter
);
11286 rtx
*dst
= &SET_DEST (pat
);
11287 if (GET_CODE (*dst
) == ZERO_EXTRACT
11288 && REG_P (XEXP (*dst
, 0))
11289 && CONST_INT_P (XEXP (*dst
, 1))
11290 && CONST_INT_P (XEXP (*dst
, 2)))
11292 rtx reg
= XEXP (*dst
, 0);
11293 int width
= INTVAL (XEXP (*dst
, 1));
11294 int offset
= INTVAL (XEXP (*dst
, 2));
11295 machine_mode mode
= GET_MODE (reg
);
11296 int reg_width
= GET_MODE_PRECISION (mode
);
11297 if (BITS_BIG_ENDIAN
)
11298 offset
= reg_width
- width
- offset
;
11301 wide_int mask
= wi::shifted_mask (offset
, width
, true, reg_width
);
11302 wide_int mask2
= wi::shifted_mask (offset
, width
, false, reg_width
);
11303 x
= gen_rtx_AND (mode
, reg
, immed_wide_int_const (mask
, mode
));
11305 y
= gen_rtx_ASHIFT (mode
, SET_SRC (pat
), GEN_INT (offset
));
11308 z
= gen_rtx_AND (mode
, y
, immed_wide_int_const (mask2
, mode
));
11309 w
= gen_rtx_IOR (mode
, x
, z
);
11310 SUBST (SET_DEST (pat
), reg
);
11311 SUBST (SET_SRC (pat
), w
);
11319 /* Like recog, but we receive the address of a pointer to a new pattern.
11320 We try to match the rtx that the pointer points to.
11321 If that fails, we may try to modify or replace the pattern,
11322 storing the replacement into the same pointer object.
11324 Modifications include deletion or addition of CLOBBERs. If the
11325 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11326 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11327 (and undo if that fails).
11329 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11330 the CLOBBERs are placed.
11332 The value is the final insn code from the pattern ultimately matched,
11336 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
11338 rtx pat
= *pnewpat
;
11339 int insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11340 if (insn_code_number
>= 0 || check_asm_operands (pat
))
11341 return insn_code_number
;
11343 void *marker
= get_undo_marker ();
11344 bool changed
= false;
11346 if (GET_CODE (pat
) == SET
)
11347 changed
= change_zero_ext (pat
);
11348 else if (GET_CODE (pat
) == PARALLEL
)
11351 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
11353 rtx set
= XVECEXP (pat
, 0, i
);
11354 if (GET_CODE (set
) == SET
)
11355 changed
|= change_zero_ext (set
);
11361 insn_code_number
= recog_for_combine_1 (pnewpat
, insn
, pnotes
);
11363 if (insn_code_number
< 0)
11364 undo_to_marker (marker
);
11367 return insn_code_number
;
11370 /* Like gen_lowpart_general but for use by combine. In combine it
11371 is not possible to create any new pseudoregs. However, it is
11372 safe to create invalid memory addresses, because combine will
11373 try to recognize them and all they will do is make the combine
11376 If for some reason this cannot do its job, an rtx
11377 (clobber (const_int 0)) is returned.
11378 An insn containing that will not be recognized. */
11381 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11383 machine_mode imode
= GET_MODE (x
);
11384 unsigned int osize
= GET_MODE_SIZE (omode
);
11385 unsigned int isize
= GET_MODE_SIZE (imode
);
11388 if (omode
== imode
)
11391 /* We can only support MODE being wider than a word if X is a
11392 constant integer or has a mode the same size. */
11393 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11394 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11397 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11398 won't know what to do. So we will strip off the SUBREG here and
11399 process normally. */
11400 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11402 x
= SUBREG_REG (x
);
11404 /* For use in case we fall down into the address adjustments
11405 further below, we need to adjust the known mode and size of
11406 x; imode and isize, since we just adjusted x. */
11407 imode
= GET_MODE (x
);
11409 if (imode
== omode
)
11412 isize
= GET_MODE_SIZE (imode
);
11415 result
= gen_lowpart_common (omode
, x
);
11424 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11426 if (MEM_VOLATILE_P (x
)
11427 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11430 /* If we want to refer to something bigger than the original memref,
11431 generate a paradoxical subreg instead. That will force a reload
11432 of the original memref X. */
11434 return gen_rtx_SUBREG (omode
, x
, 0);
11436 if (WORDS_BIG_ENDIAN
)
11437 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11439 /* Adjust the address so that the address-after-the-data is
11441 if (BYTES_BIG_ENDIAN
)
11442 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11444 return adjust_address_nv (x
, omode
, offset
);
11447 /* If X is a comparison operator, rewrite it in a new mode. This
11448 probably won't match, but may allow further simplifications. */
11449 else if (COMPARISON_P (x
))
11450 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11452 /* If we couldn't simplify X any other way, just enclose it in a
11453 SUBREG. Normally, this SUBREG won't match, but some patterns may
11454 include an explicit SUBREG or we may simplify it further in combine. */
11459 if (imode
== VOIDmode
)
11461 imode
= int_mode_for_mode (omode
);
11462 x
= gen_lowpart_common (imode
, x
);
11466 res
= lowpart_subreg (omode
, x
, imode
);
11472 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11475 /* Try to simplify a comparison between OP0 and a constant OP1,
11476 where CODE is the comparison code that will be tested, into a
11477 (CODE OP0 const0_rtx) form.
11479 The result is a possibly different comparison code to use.
11480 *POP1 may be updated. */
11482 static enum rtx_code
11483 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11484 rtx op0
, rtx
*pop1
)
11486 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11487 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11489 /* Get the constant we are comparing against and turn off all bits
11490 not on in our mode. */
11491 if (mode
!= VOIDmode
)
11492 const_op
= trunc_int_for_mode (const_op
, mode
);
11494 /* If we are comparing against a constant power of two and the value
11495 being compared can only have that single bit nonzero (e.g., it was
11496 `and'ed with that bit), we can replace this with a comparison
11499 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11500 || code
== LT
|| code
== LTU
)
11501 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11502 && pow2p_hwi (const_op
& GET_MODE_MASK (mode
))
11503 && (nonzero_bits (op0
, mode
)
11504 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (mode
))))
11506 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11510 /* Similarly, if we are comparing a value known to be either -1 or
11511 0 with -1, change it to the opposite comparison against zero. */
11513 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11514 || code
== GEU
|| code
== LTU
)
11515 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11517 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11521 /* Do some canonicalizations based on the comparison code. We prefer
11522 comparisons against zero and then prefer equality comparisons.
11523 If we can reduce the size of a constant, we will do that too. */
11527 /* < C is equivalent to <= (C - 1) */
11532 /* ... fall through to LE case below. */
11533 gcc_fallthrough ();
11539 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11546 /* If we are doing a <= 0 comparison on a value known to have
11547 a zero sign bit, we can replace this with == 0. */
11548 else if (const_op
== 0
11549 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11550 && (nonzero_bits (op0
, mode
)
11551 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11557 /* >= C is equivalent to > (C - 1). */
11562 /* ... fall through to GT below. */
11563 gcc_fallthrough ();
11569 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11576 /* If we are doing a > 0 comparison on a value known to have
11577 a zero sign bit, we can replace this with != 0. */
11578 else if (const_op
== 0
11579 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11580 && (nonzero_bits (op0
, mode
)
11581 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11587 /* < C is equivalent to <= (C - 1). */
11592 /* ... fall through ... */
11594 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11595 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11596 && (unsigned HOST_WIDE_INT
) const_op
11597 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11607 /* unsigned <= 0 is equivalent to == 0 */
11610 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11611 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11612 && (unsigned HOST_WIDE_INT
) const_op
11613 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11621 /* >= C is equivalent to > (C - 1). */
11626 /* ... fall through ... */
11629 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11630 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11631 && (unsigned HOST_WIDE_INT
) const_op
11632 == HOST_WIDE_INT_1U
<< (mode_width
- 1))
11642 /* unsigned > 0 is equivalent to != 0 */
11645 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11646 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11647 && (unsigned HOST_WIDE_INT
) const_op
11648 == (HOST_WIDE_INT_1U
<< (mode_width
- 1)) - 1)
11659 *pop1
= GEN_INT (const_op
);
11663 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11664 comparison code that will be tested.
11666 The result is a possibly different comparison code to use. *POP0 and
11667 *POP1 may be updated.
11669 It is possible that we might detect that a comparison is either always
11670 true or always false. However, we do not perform general constant
11671 folding in combine, so this knowledge isn't useful. Such tautologies
11672 should have been detected earlier. Hence we ignore all such cases. */
11674 static enum rtx_code
11675 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11681 machine_mode mode
, tmode
;
11683 /* Try a few ways of applying the same transformation to both operands. */
11686 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11687 so check specially. */
11688 if (!WORD_REGISTER_OPERATIONS
11689 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11690 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11691 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11692 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11693 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11694 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11695 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0)))
11696 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0))))
11697 && CONST_INT_P (XEXP (op0
, 1))
11698 && XEXP (op0
, 1) == XEXP (op1
, 1)
11699 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11700 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11701 && (INTVAL (XEXP (op0
, 1))
11702 == (GET_MODE_PRECISION (GET_MODE (op0
))
11703 - (GET_MODE_PRECISION
11704 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))))))))
11706 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11707 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11710 /* If both operands are the same constant shift, see if we can ignore the
11711 shift. We can if the shift is a rotate or if the bits shifted out of
11712 this shift are known to be zero for both inputs and if the type of
11713 comparison is compatible with the shift. */
11714 if (GET_CODE (op0
) == GET_CODE (op1
)
11715 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11716 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11717 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11718 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11719 || (GET_CODE (op0
) == ASHIFTRT
11720 && (code
!= GTU
&& code
!= LTU
11721 && code
!= GEU
&& code
!= LEU
)))
11722 && CONST_INT_P (XEXP (op0
, 1))
11723 && INTVAL (XEXP (op0
, 1)) >= 0
11724 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11725 && XEXP (op0
, 1) == XEXP (op1
, 1))
11727 machine_mode mode
= GET_MODE (op0
);
11728 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11729 int shift_count
= INTVAL (XEXP (op0
, 1));
11731 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11732 mask
&= (mask
>> shift_count
) << shift_count
;
11733 else if (GET_CODE (op0
) == ASHIFT
)
11734 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11736 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11737 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11738 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11743 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11744 SUBREGs are of the same mode, and, in both cases, the AND would
11745 be redundant if the comparison was done in the narrower mode,
11746 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11747 and the operand's possibly nonzero bits are 0xffffff01; in that case
11748 if we only care about QImode, we don't need the AND). This case
11749 occurs if the output mode of an scc insn is not SImode and
11750 STORE_FLAG_VALUE == 1 (e.g., the 386).
11752 Similarly, check for a case where the AND's are ZERO_EXTEND
11753 operations from some narrower mode even though a SUBREG is not
11756 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11757 && CONST_INT_P (XEXP (op0
, 1))
11758 && CONST_INT_P (XEXP (op1
, 1)))
11760 rtx inner_op0
= XEXP (op0
, 0);
11761 rtx inner_op1
= XEXP (op1
, 0);
11762 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11763 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11766 if (paradoxical_subreg_p (inner_op0
)
11767 && GET_CODE (inner_op1
) == SUBREG
11768 && (GET_MODE (SUBREG_REG (inner_op0
))
11769 == GET_MODE (SUBREG_REG (inner_op1
)))
11770 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11771 <= HOST_BITS_PER_WIDE_INT
)
11772 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11773 GET_MODE (SUBREG_REG (inner_op0
)))))
11774 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11775 GET_MODE (SUBREG_REG (inner_op1
))))))
11777 op0
= SUBREG_REG (inner_op0
);
11778 op1
= SUBREG_REG (inner_op1
);
11780 /* The resulting comparison is always unsigned since we masked
11781 off the original sign bit. */
11782 code
= unsigned_condition (code
);
11788 for (tmode
= GET_CLASS_NARROWEST_MODE
11789 (GET_MODE_CLASS (GET_MODE (op0
)));
11790 tmode
!= GET_MODE (op0
); tmode
= GET_MODE_WIDER_MODE (tmode
))
11791 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11793 op0
= gen_lowpart_or_truncate (tmode
, inner_op0
);
11794 op1
= gen_lowpart_or_truncate (tmode
, inner_op1
);
11795 code
= unsigned_condition (code
);
11804 /* If both operands are NOT, we can strip off the outer operation
11805 and adjust the comparison code for swapped operands; similarly for
11806 NEG, except that this must be an equality comparison. */
11807 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11808 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11809 && (code
== EQ
|| code
== NE
)))
11810 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11816 /* If the first operand is a constant, swap the operands and adjust the
11817 comparison code appropriately, but don't do this if the second operand
11818 is already a constant integer. */
11819 if (swap_commutative_operands_p (op0
, op1
))
11821 std::swap (op0
, op1
);
11822 code
= swap_condition (code
);
11825 /* We now enter a loop during which we will try to simplify the comparison.
11826 For the most part, we only are concerned with comparisons with zero,
11827 but some things may really be comparisons with zero but not start
11828 out looking that way. */
11830 while (CONST_INT_P (op1
))
11832 machine_mode mode
= GET_MODE (op0
);
11833 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11834 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11835 int equality_comparison_p
;
11836 int sign_bit_comparison_p
;
11837 int unsigned_comparison_p
;
11838 HOST_WIDE_INT const_op
;
11840 /* We only want to handle integral modes. This catches VOIDmode,
11841 CCmode, and the floating-point modes. An exception is that we
11842 can handle VOIDmode if OP0 is a COMPARE or a comparison
11845 if (GET_MODE_CLASS (mode
) != MODE_INT
11846 && ! (mode
== VOIDmode
11847 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
11850 /* Try to simplify the compare to constant, possibly changing the
11851 comparison op, and/or changing op1 to zero. */
11852 code
= simplify_compare_const (code
, mode
, op0
, &op1
);
11853 const_op
= INTVAL (op1
);
11855 /* Compute some predicates to simplify code below. */
11857 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11858 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11859 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11862 /* If this is a sign bit comparison and we can do arithmetic in
11863 MODE, say that we will only be needing the sign bit of OP0. */
11864 if (sign_bit_comparison_p
&& HWI_COMPUTABLE_MODE_P (mode
))
11865 op0
= force_to_mode (op0
, mode
,
11867 << (GET_MODE_PRECISION (mode
) - 1),
11870 /* Now try cases based on the opcode of OP0. If none of the cases
11871 does a "continue", we exit this loop immediately after the
11874 switch (GET_CODE (op0
))
11877 /* If we are extracting a single bit from a variable position in
11878 a constant that has only a single bit set and are comparing it
11879 with zero, we can convert this into an equality comparison
11880 between the position and the location of the single bit. */
11881 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11882 have already reduced the shift count modulo the word size. */
11883 if (!SHIFT_COUNT_TRUNCATED
11884 && CONST_INT_P (XEXP (op0
, 0))
11885 && XEXP (op0
, 1) == const1_rtx
11886 && equality_comparison_p
&& const_op
== 0
11887 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11889 if (BITS_BIG_ENDIAN
)
11890 i
= BITS_PER_WORD
- 1 - i
;
11892 op0
= XEXP (op0
, 2);
11896 /* Result is nonzero iff shift count is equal to I. */
11897 code
= reverse_condition (code
);
11904 tem
= expand_compound_operation (op0
);
11913 /* If testing for equality, we can take the NOT of the constant. */
11914 if (equality_comparison_p
11915 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11917 op0
= XEXP (op0
, 0);
11922 /* If just looking at the sign bit, reverse the sense of the
11924 if (sign_bit_comparison_p
)
11926 op0
= XEXP (op0
, 0);
11927 code
= (code
== GE
? LT
: GE
);
11933 /* If testing for equality, we can take the NEG of the constant. */
11934 if (equality_comparison_p
11935 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
11937 op0
= XEXP (op0
, 0);
11942 /* The remaining cases only apply to comparisons with zero. */
11946 /* When X is ABS or is known positive,
11947 (neg X) is < 0 if and only if X != 0. */
11949 if (sign_bit_comparison_p
11950 && (GET_CODE (XEXP (op0
, 0)) == ABS
11951 || (mode_width
<= HOST_BITS_PER_WIDE_INT
11952 && (nonzero_bits (XEXP (op0
, 0), mode
)
11953 & (HOST_WIDE_INT_1U
<< (mode_width
- 1)))
11956 op0
= XEXP (op0
, 0);
11957 code
= (code
== LT
? NE
: EQ
);
11961 /* If we have NEG of something whose two high-order bits are the
11962 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11963 if (num_sign_bit_copies (op0
, mode
) >= 2)
11965 op0
= XEXP (op0
, 0);
11966 code
= swap_condition (code
);
11972 /* If we are testing equality and our count is a constant, we
11973 can perform the inverse operation on our RHS. */
11974 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11975 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
11976 op1
, XEXP (op0
, 1))) != 0)
11978 op0
= XEXP (op0
, 0);
11983 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11984 a particular bit. Convert it to an AND of a constant of that
11985 bit. This will be converted into a ZERO_EXTRACT. */
11986 if (const_op
== 0 && sign_bit_comparison_p
11987 && CONST_INT_P (XEXP (op0
, 1))
11988 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11990 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11993 - INTVAL (XEXP (op0
, 1)))));
11994 code
= (code
== LT
? NE
: EQ
);
11998 /* Fall through. */
12001 /* ABS is ignorable inside an equality comparison with zero. */
12002 if (const_op
== 0 && equality_comparison_p
)
12004 op0
= XEXP (op0
, 0);
12010 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12011 (compare FOO CONST) if CONST fits in FOO's mode and we
12012 are either testing inequality or have an unsigned
12013 comparison with ZERO_EXTEND or a signed comparison with
12014 SIGN_EXTEND. But don't do it if we don't have a compare
12015 insn of the given mode, since we'd have to revert it
12016 later on, and then we wouldn't know whether to sign- or
12018 mode
= GET_MODE (XEXP (op0
, 0));
12019 if (GET_MODE_CLASS (mode
) == MODE_INT
12020 && ! unsigned_comparison_p
12021 && HWI_COMPUTABLE_MODE_P (mode
)
12022 && trunc_int_for_mode (const_op
, mode
) == const_op
12023 && have_insn_for (COMPARE
, mode
))
12025 op0
= XEXP (op0
, 0);
12031 /* Check for the case where we are comparing A - C1 with C2, that is
12033 (subreg:MODE (plus (A) (-C1))) op (C2)
12035 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12036 comparison in the wider mode. One of the following two conditions
12037 must be true in order for this to be valid:
12039 1. The mode extension results in the same bit pattern being added
12040 on both sides and the comparison is equality or unsigned. As
12041 C2 has been truncated to fit in MODE, the pattern can only be
12044 2. The mode extension results in the sign bit being copied on
12047 The difficulty here is that we have predicates for A but not for
12048 (A - C1) so we need to check that C1 is within proper bounds so
12049 as to perturbate A as little as possible. */
12051 if (mode_width
<= HOST_BITS_PER_WIDE_INT
12052 && subreg_lowpart_p (op0
)
12053 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) > mode_width
12054 && GET_CODE (SUBREG_REG (op0
)) == PLUS
12055 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
12057 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
12058 rtx a
= XEXP (SUBREG_REG (op0
), 0);
12059 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
12062 && (unsigned HOST_WIDE_INT
) c1
12063 < HOST_WIDE_INT_1U
<< (mode_width
- 1)
12064 && (equality_comparison_p
|| unsigned_comparison_p
)
12065 /* (A - C1) zero-extends if it is positive and sign-extends
12066 if it is negative, C2 both zero- and sign-extends. */
12067 && ((0 == (nonzero_bits (a
, inner_mode
)
12068 & ~GET_MODE_MASK (mode
))
12070 /* (A - C1) sign-extends if it is positive and 1-extends
12071 if it is negative, C2 both sign- and 1-extends. */
12072 || (num_sign_bit_copies (a
, inner_mode
)
12073 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12076 || ((unsigned HOST_WIDE_INT
) c1
12077 < HOST_WIDE_INT_1U
<< (mode_width
- 2)
12078 /* (A - C1) always sign-extends, like C2. */
12079 && num_sign_bit_copies (a
, inner_mode
)
12080 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
12081 - (mode_width
- 1))))
12083 op0
= SUBREG_REG (op0
);
12088 /* If the inner mode is narrower and we are extracting the low part,
12089 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12090 if (subreg_lowpart_p (op0
)
12091 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) < mode_width
)
12093 else if (subreg_lowpart_p (op0
)
12094 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12095 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12096 && (code
== NE
|| code
== EQ
)
12097 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12098 <= HOST_BITS_PER_WIDE_INT
)
12099 && !paradoxical_subreg_p (op0
)
12100 && (nonzero_bits (SUBREG_REG (op0
),
12101 GET_MODE (SUBREG_REG (op0
)))
12102 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12104 /* Remove outer subregs that don't do anything. */
12105 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12107 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12108 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12110 op0
= SUBREG_REG (op0
);
12122 mode
= GET_MODE (XEXP (op0
, 0));
12123 if (GET_MODE_CLASS (mode
) == MODE_INT
12124 && (unsigned_comparison_p
|| equality_comparison_p
)
12125 && HWI_COMPUTABLE_MODE_P (mode
)
12126 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
12128 && have_insn_for (COMPARE
, mode
))
12130 op0
= XEXP (op0
, 0);
12136 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12137 this for equality comparisons due to pathological cases involving
12139 if (equality_comparison_p
12140 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12141 op1
, XEXP (op0
, 1))))
12143 op0
= XEXP (op0
, 0);
12148 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12149 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
12150 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
12152 op0
= XEXP (XEXP (op0
, 0), 0);
12153 code
= (code
== LT
? EQ
: NE
);
12159 /* We used to optimize signed comparisons against zero, but that
12160 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12161 arrive here as equality comparisons, or (GEU, LTU) are
12162 optimized away. No need to special-case them. */
12164 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12165 (eq B (minus A C)), whichever simplifies. We can only do
12166 this for equality comparisons due to pathological cases involving
12168 if (equality_comparison_p
12169 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
12170 XEXP (op0
, 1), op1
)))
12172 op0
= XEXP (op0
, 0);
12177 if (equality_comparison_p
12178 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
12179 XEXP (op0
, 0), op1
)))
12181 op0
= XEXP (op0
, 1);
12186 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12187 of bits in X minus 1, is one iff X > 0. */
12188 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
12189 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12190 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
12191 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12193 op0
= XEXP (op0
, 1);
12194 code
= (code
== GE
? LE
: GT
);
12200 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12201 if C is zero or B is a constant. */
12202 if (equality_comparison_p
12203 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
12204 XEXP (op0
, 1), op1
)))
12206 op0
= XEXP (op0
, 0);
12213 case UNEQ
: case LTGT
:
12214 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
12215 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
12216 case UNORDERED
: case ORDERED
:
12217 /* We can't do anything if OP0 is a condition code value, rather
12218 than an actual data value. */
12220 || CC0_P (XEXP (op0
, 0))
12221 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
12224 /* Get the two operands being compared. */
12225 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
12226 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
12228 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
12230 /* Check for the cases where we simply want the result of the
12231 earlier test or the opposite of that result. */
12232 if (code
== NE
|| code
== EQ
12233 || (val_signbit_known_set_p (GET_MODE (op0
), STORE_FLAG_VALUE
)
12234 && (code
== LT
|| code
== GE
)))
12236 enum rtx_code new_code
;
12237 if (code
== LT
|| code
== NE
)
12238 new_code
= GET_CODE (op0
);
12240 new_code
= reversed_comparison_code (op0
, NULL
);
12242 if (new_code
!= UNKNOWN
)
12253 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12255 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
12256 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
12257 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
12259 op0
= XEXP (op0
, 1);
12260 code
= (code
== GE
? GT
: LE
);
12266 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12267 will be converted to a ZERO_EXTRACT later. */
12268 if (const_op
== 0 && equality_comparison_p
12269 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12270 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
12272 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
12273 XEXP (XEXP (op0
, 0), 1));
12274 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12278 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12279 zero and X is a comparison and C1 and C2 describe only bits set
12280 in STORE_FLAG_VALUE, we can compare with X. */
12281 if (const_op
== 0 && equality_comparison_p
12282 && mode_width
<= HOST_BITS_PER_WIDE_INT
12283 && CONST_INT_P (XEXP (op0
, 1))
12284 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
12285 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12286 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
12287 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
12289 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12290 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
12291 if ((~STORE_FLAG_VALUE
& mask
) == 0
12292 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
12293 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
12294 && COMPARISON_P (tem
))))
12296 op0
= XEXP (XEXP (op0
, 0), 0);
12301 /* If we are doing an equality comparison of an AND of a bit equal
12302 to the sign bit, replace this with a LT or GE comparison of
12303 the underlying value. */
12304 if (equality_comparison_p
12306 && CONST_INT_P (XEXP (op0
, 1))
12307 && mode_width
<= HOST_BITS_PER_WIDE_INT
12308 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
12309 == HOST_WIDE_INT_1U
<< (mode_width
- 1)))
12311 op0
= XEXP (op0
, 0);
12312 code
= (code
== EQ
? GE
: LT
);
12316 /* If this AND operation is really a ZERO_EXTEND from a narrower
12317 mode, the constant fits within that mode, and this is either an
12318 equality or unsigned comparison, try to do this comparison in
12323 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12324 -> (ne:DI (reg:SI 4) (const_int 0))
12326 unless TRULY_NOOP_TRUNCATION allows it or the register is
12327 known to hold a value of the required mode the
12328 transformation is invalid. */
12329 if ((equality_comparison_p
|| unsigned_comparison_p
)
12330 && CONST_INT_P (XEXP (op0
, 1))
12331 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
12332 & GET_MODE_MASK (mode
))
12334 && const_op
>> i
== 0
12335 && (tmode
= mode_for_size (i
, MODE_INT
, 1)) != BLKmode
)
12337 op0
= gen_lowpart_or_truncate (tmode
, XEXP (op0
, 0));
12341 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12342 fits in both M1 and M2 and the SUBREG is either paradoxical
12343 or represents the low part, permute the SUBREG and the AND
12345 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
12346 && CONST_INT_P (XEXP (op0
, 1)))
12348 tmode
= GET_MODE (SUBREG_REG (XEXP (op0
, 0)));
12349 unsigned HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
12350 /* Require an integral mode, to avoid creating something like
12352 if (SCALAR_INT_MODE_P (tmode
)
12353 /* It is unsafe to commute the AND into the SUBREG if the
12354 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12355 not defined. As originally written the upper bits
12356 have a defined value due to the AND operation.
12357 However, if we commute the AND inside the SUBREG then
12358 they no longer have defined values and the meaning of
12359 the code has been changed.
12360 Also C1 should not change value in the smaller mode,
12361 see PR67028 (a positive C1 can become negative in the
12362 smaller mode, so that the AND does no longer mask the
12364 && ((WORD_REGISTER_OPERATIONS
12365 && mode_width
> GET_MODE_PRECISION (tmode
)
12366 && mode_width
<= BITS_PER_WORD
12367 && trunc_int_for_mode (c1
, tmode
) == (HOST_WIDE_INT
) c1
)
12368 || (mode_width
<= GET_MODE_PRECISION (tmode
)
12369 && subreg_lowpart_p (XEXP (op0
, 0))))
12370 && mode_width
<= HOST_BITS_PER_WIDE_INT
12371 && HWI_COMPUTABLE_MODE_P (tmode
)
12372 && (c1
& ~mask
) == 0
12373 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
12375 && c1
!= GET_MODE_MASK (tmode
))
12377 op0
= simplify_gen_binary (AND
, tmode
,
12378 SUBREG_REG (XEXP (op0
, 0)),
12379 gen_int_mode (c1
, tmode
));
12380 op0
= gen_lowpart (mode
, op0
);
12385 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12386 if (const_op
== 0 && equality_comparison_p
12387 && XEXP (op0
, 1) == const1_rtx
12388 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12390 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12391 XEXP (XEXP (op0
, 0), 0), 1);
12392 code
= (code
== NE
? EQ
: NE
);
12396 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12397 (eq (and (lshiftrt X) 1) 0).
12398 Also handle the case where (not X) is expressed using xor. */
12399 if (const_op
== 0 && equality_comparison_p
12400 && XEXP (op0
, 1) == const1_rtx
12401 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12403 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12404 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12406 if (GET_CODE (shift_op
) == NOT
12407 || (GET_CODE (shift_op
) == XOR
12408 && CONST_INT_P (XEXP (shift_op
, 1))
12409 && CONST_INT_P (shift_count
)
12410 && HWI_COMPUTABLE_MODE_P (mode
)
12411 && (UINTVAL (XEXP (shift_op
, 1))
12412 == HOST_WIDE_INT_1U
12413 << INTVAL (shift_count
))))
12416 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12417 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12418 code
= (code
== NE
? EQ
: NE
);
12425 /* If we have (compare (ashift FOO N) (const_int C)) and
12426 the high order N bits of FOO (N+1 if an inequality comparison)
12427 are known to be zero, we can do this by comparing FOO with C
12428 shifted right N bits so long as the low-order N bits of C are
12430 if (CONST_INT_P (XEXP (op0
, 1))
12431 && INTVAL (XEXP (op0
, 1)) >= 0
12432 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12433 < HOST_BITS_PER_WIDE_INT
)
12434 && (((unsigned HOST_WIDE_INT
) const_op
12435 & ((HOST_WIDE_INT_1U
<< INTVAL (XEXP (op0
, 1)))
12437 && mode_width
<= HOST_BITS_PER_WIDE_INT
12438 && (nonzero_bits (XEXP (op0
, 0), mode
)
12439 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12440 + ! equality_comparison_p
))) == 0)
12442 /* We must perform a logical shift, not an arithmetic one,
12443 as we want the top N bits of C to be zero. */
12444 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12446 temp
>>= INTVAL (XEXP (op0
, 1));
12447 op1
= gen_int_mode (temp
, mode
);
12448 op0
= XEXP (op0
, 0);
12452 /* If we are doing a sign bit comparison, it means we are testing
12453 a particular bit. Convert it to the appropriate AND. */
12454 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12455 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12457 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12460 - INTVAL (XEXP (op0
, 1)))));
12461 code
= (code
== LT
? NE
: EQ
);
12465 /* If this an equality comparison with zero and we are shifting
12466 the low bit to the sign bit, we can convert this to an AND of the
12468 if (const_op
== 0 && equality_comparison_p
12469 && CONST_INT_P (XEXP (op0
, 1))
12470 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12472 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12478 /* If this is an equality comparison with zero, we can do this
12479 as a logical shift, which might be much simpler. */
12480 if (equality_comparison_p
&& const_op
== 0
12481 && CONST_INT_P (XEXP (op0
, 1)))
12483 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12485 INTVAL (XEXP (op0
, 1)));
12489 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12490 do the comparison in a narrower mode. */
12491 if (! unsigned_comparison_p
12492 && CONST_INT_P (XEXP (op0
, 1))
12493 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12494 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12495 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12496 MODE_INT
, 1)) != BLKmode
12497 && (((unsigned HOST_WIDE_INT
) const_op
12498 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12499 <= GET_MODE_MASK (tmode
)))
12501 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12505 /* Likewise if OP0 is a PLUS of a sign extension with a
12506 constant, which is usually represented with the PLUS
12507 between the shifts. */
12508 if (! unsigned_comparison_p
12509 && CONST_INT_P (XEXP (op0
, 1))
12510 && GET_CODE (XEXP (op0
, 0)) == PLUS
12511 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12512 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12513 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12514 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12515 MODE_INT
, 1)) != BLKmode
12516 && (((unsigned HOST_WIDE_INT
) const_op
12517 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12518 <= GET_MODE_MASK (tmode
)))
12520 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12521 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12522 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
12523 add_const
, XEXP (op0
, 1));
12525 op0
= simplify_gen_binary (PLUS
, tmode
,
12526 gen_lowpart (tmode
, inner
),
12533 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12534 the low order N bits of FOO are known to be zero, we can do this
12535 by comparing FOO with C shifted left N bits so long as no
12536 overflow occurs. Even if the low order N bits of FOO aren't known
12537 to be zero, if the comparison is >= or < we can use the same
12538 optimization and for > or <= by setting all the low
12539 order N bits in the comparison constant. */
12540 if (CONST_INT_P (XEXP (op0
, 1))
12541 && INTVAL (XEXP (op0
, 1)) > 0
12542 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12543 && mode_width
<= HOST_BITS_PER_WIDE_INT
12544 && (((unsigned HOST_WIDE_INT
) const_op
12545 + (GET_CODE (op0
) != LSHIFTRT
12546 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12549 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12551 unsigned HOST_WIDE_INT low_bits
12552 = (nonzero_bits (XEXP (op0
, 0), mode
)
12553 & ((HOST_WIDE_INT_1U
12554 << INTVAL (XEXP (op0
, 1))) - 1));
12555 if (low_bits
== 0 || !equality_comparison_p
)
12557 /* If the shift was logical, then we must make the condition
12559 if (GET_CODE (op0
) == LSHIFTRT
)
12560 code
= unsigned_condition (code
);
12562 const_op
<<= INTVAL (XEXP (op0
, 1));
12564 && (code
== GT
|| code
== GTU
12565 || code
== LE
|| code
== LEU
))
12567 |= ((HOST_WIDE_INT_1
<< INTVAL (XEXP (op0
, 1))) - 1);
12568 op1
= GEN_INT (const_op
);
12569 op0
= XEXP (op0
, 0);
12574 /* If we are using this shift to extract just the sign bit, we
12575 can replace this with an LT or GE comparison. */
12577 && (equality_comparison_p
|| sign_bit_comparison_p
)
12578 && CONST_INT_P (XEXP (op0
, 1))
12579 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12581 op0
= XEXP (op0
, 0);
12582 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12594 /* Now make any compound operations involved in this comparison. Then,
12595 check for an outmost SUBREG on OP0 that is not doing anything or is
12596 paradoxical. The latter transformation must only be performed when
12597 it is known that the "extra" bits will be the same in op0 and op1 or
12598 that they don't matter. There are three cases to consider:
12600 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12601 care bits and we can assume they have any convenient value. So
12602 making the transformation is safe.
12604 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12605 In this case the upper bits of op0 are undefined. We should not make
12606 the simplification in that case as we do not know the contents of
12609 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12610 In that case we know those bits are zeros or ones. We must also be
12611 sure that they are the same as the upper bits of op1.
12613 We can never remove a SUBREG for a non-equality comparison because
12614 the sign bit is in a different place in the underlying object. */
12616 rtx_code op0_mco_code
= SET
;
12617 if (op1
== const0_rtx
)
12618 op0_mco_code
= code
== NE
|| code
== EQ
? EQ
: COMPARE
;
12620 op0
= make_compound_operation (op0
, op0_mco_code
);
12621 op1
= make_compound_operation (op1
, SET
);
12623 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12624 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12625 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12626 && (code
== NE
|| code
== EQ
))
12628 if (paradoxical_subreg_p (op0
))
12630 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12632 if (REG_P (SUBREG_REG (op0
)))
12634 op0
= SUBREG_REG (op0
);
12635 op1
= gen_lowpart (GET_MODE (op0
), op1
);
12638 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12639 <= HOST_BITS_PER_WIDE_INT
)
12640 && (nonzero_bits (SUBREG_REG (op0
),
12641 GET_MODE (SUBREG_REG (op0
)))
12642 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12644 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12646 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12647 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12648 op0
= SUBREG_REG (op0
), op1
= tem
;
12652 /* We now do the opposite procedure: Some machines don't have compare
12653 insns in all modes. If OP0's mode is an integer mode smaller than a
12654 word and we can't do a compare in that mode, see if there is a larger
12655 mode for which we can do the compare. There are a number of cases in
12656 which we can use the wider mode. */
12658 mode
= GET_MODE (op0
);
12659 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
12660 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12661 && ! have_insn_for (COMPARE
, mode
))
12662 for (tmode
= GET_MODE_WIDER_MODE (mode
);
12663 (tmode
!= VOIDmode
&& HWI_COMPUTABLE_MODE_P (tmode
));
12664 tmode
= GET_MODE_WIDER_MODE (tmode
))
12665 if (have_insn_for (COMPARE
, tmode
))
12669 /* If this is a test for negative, we can make an explicit
12670 test of the sign bit. Test this first so we can use
12671 a paradoxical subreg to extend OP0. */
12673 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12674 && HWI_COMPUTABLE_MODE_P (mode
))
12676 unsigned HOST_WIDE_INT sign
12677 = HOST_WIDE_INT_1U
<< (GET_MODE_BITSIZE (mode
) - 1);
12678 op0
= simplify_gen_binary (AND
, tmode
,
12679 gen_lowpart (tmode
, op0
),
12680 gen_int_mode (sign
, tmode
));
12681 code
= (code
== LT
) ? NE
: EQ
;
12685 /* If the only nonzero bits in OP0 and OP1 are those in the
12686 narrower mode and this is an equality or unsigned comparison,
12687 we can use the wider mode. Similarly for sign-extended
12688 values, in which case it is true for all comparisons. */
12689 zero_extended
= ((code
== EQ
|| code
== NE
12690 || code
== GEU
|| code
== GTU
12691 || code
== LEU
|| code
== LTU
)
12692 && (nonzero_bits (op0
, tmode
)
12693 & ~GET_MODE_MASK (mode
)) == 0
12694 && ((CONST_INT_P (op1
)
12695 || (nonzero_bits (op1
, tmode
)
12696 & ~GET_MODE_MASK (mode
)) == 0)));
12699 || ((num_sign_bit_copies (op0
, tmode
)
12700 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12701 - GET_MODE_PRECISION (mode
)))
12702 && (num_sign_bit_copies (op1
, tmode
)
12703 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12704 - GET_MODE_PRECISION (mode
)))))
12706 /* If OP0 is an AND and we don't have an AND in MODE either,
12707 make a new AND in the proper mode. */
12708 if (GET_CODE (op0
) == AND
12709 && !have_insn_for (AND
, mode
))
12710 op0
= simplify_gen_binary (AND
, tmode
,
12711 gen_lowpart (tmode
,
12713 gen_lowpart (tmode
,
12719 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op0
, mode
);
12720 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op1
, mode
);
12724 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op0
, mode
);
12725 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op1
, mode
);
12732 /* We may have changed the comparison operands. Re-canonicalize. */
12733 if (swap_commutative_operands_p (op0
, op1
))
12735 std::swap (op0
, op1
);
12736 code
= swap_condition (code
);
12739 /* If this machine only supports a subset of valid comparisons, see if we
12740 can convert an unsupported one into a supported one. */
12741 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12749 /* Utility function for record_value_for_reg. Count number of
12754 enum rtx_code code
= GET_CODE (x
);
12758 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12759 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12761 rtx x0
= XEXP (x
, 0);
12762 rtx x1
= XEXP (x
, 1);
12765 return 1 + 2 * count_rtxs (x0
);
12767 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12768 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12769 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12770 return 2 + 2 * count_rtxs (x0
)
12771 + count_rtxs (x
== XEXP (x1
, 0)
12772 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12774 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12775 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12776 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12777 return 2 + 2 * count_rtxs (x1
)
12778 + count_rtxs (x
== XEXP (x0
, 0)
12779 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12782 fmt
= GET_RTX_FORMAT (code
);
12783 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12785 ret
+= count_rtxs (XEXP (x
, i
));
12786 else if (fmt
[i
] == 'E')
12787 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12788 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12793 /* Utility function for following routine. Called when X is part of a value
12794 being stored into last_set_value. Sets last_set_table_tick
12795 for each register mentioned. Similar to mention_regs in cse.c */
12798 update_table_tick (rtx x
)
12800 enum rtx_code code
= GET_CODE (x
);
12801 const char *fmt
= GET_RTX_FORMAT (code
);
12806 unsigned int regno
= REGNO (x
);
12807 unsigned int endregno
= END_REGNO (x
);
12810 for (r
= regno
; r
< endregno
; r
++)
12812 reg_stat_type
*rsp
= ®_stat
[r
];
12813 rsp
->last_set_table_tick
= label_tick
;
12819 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12822 /* Check for identical subexpressions. If x contains
12823 identical subexpression we only have to traverse one of
12825 if (i
== 0 && ARITHMETIC_P (x
))
12827 /* Note that at this point x1 has already been
12829 rtx x0
= XEXP (x
, 0);
12830 rtx x1
= XEXP (x
, 1);
12832 /* If x0 and x1 are identical then there is no need to
12837 /* If x0 is identical to a subexpression of x1 then while
12838 processing x1, x0 has already been processed. Thus we
12839 are done with x. */
12840 if (ARITHMETIC_P (x1
)
12841 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12844 /* If x1 is identical to a subexpression of x0 then we
12845 still have to process the rest of x0. */
12846 if (ARITHMETIC_P (x0
)
12847 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12849 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12854 update_table_tick (XEXP (x
, i
));
12856 else if (fmt
[i
] == 'E')
12857 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12858 update_table_tick (XVECEXP (x
, i
, j
));
12861 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12862 are saying that the register is clobbered and we no longer know its
12863 value. If INSN is zero, don't update reg_stat[].last_set; this is
12864 only permitted with VALUE also zero and is used to invalidate the
12868 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
12870 unsigned int regno
= REGNO (reg
);
12871 unsigned int endregno
= END_REGNO (reg
);
12873 reg_stat_type
*rsp
;
12875 /* If VALUE contains REG and we have a previous value for REG, substitute
12876 the previous value. */
12877 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12881 /* Set things up so get_last_value is allowed to see anything set up to
12883 subst_low_luid
= DF_INSN_LUID (insn
);
12884 tem
= get_last_value (reg
);
12886 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12887 it isn't going to be useful and will take a lot of time to process,
12888 so just use the CLOBBER. */
12892 if (ARITHMETIC_P (tem
)
12893 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12894 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12895 tem
= XEXP (tem
, 0);
12896 else if (count_occurrences (value
, reg
, 1) >= 2)
12898 /* If there are two or more occurrences of REG in VALUE,
12899 prevent the value from growing too much. */
12900 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12901 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12904 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12908 /* For each register modified, show we don't know its value, that
12909 we don't know about its bitwise content, that its value has been
12910 updated, and that we don't know the location of the death of the
12912 for (i
= regno
; i
< endregno
; i
++)
12914 rsp
= ®_stat
[i
];
12917 rsp
->last_set
= insn
;
12919 rsp
->last_set_value
= 0;
12920 rsp
->last_set_mode
= VOIDmode
;
12921 rsp
->last_set_nonzero_bits
= 0;
12922 rsp
->last_set_sign_bit_copies
= 0;
12923 rsp
->last_death
= 0;
12924 rsp
->truncated_to_mode
= VOIDmode
;
12927 /* Mark registers that are being referenced in this value. */
12929 update_table_tick (value
);
12931 /* Now update the status of each register being set.
12932 If someone is using this register in this block, set this register
12933 to invalid since we will get confused between the two lives in this
12934 basic block. This makes using this register always invalid. In cse, we
12935 scan the table to invalidate all entries using this register, but this
12936 is too much work for us. */
12938 for (i
= regno
; i
< endregno
; i
++)
12940 rsp
= ®_stat
[i
];
12941 rsp
->last_set_label
= label_tick
;
12943 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
12944 rsp
->last_set_invalid
= 1;
12946 rsp
->last_set_invalid
= 0;
12949 /* The value being assigned might refer to X (like in "x++;"). In that
12950 case, we must replace it with (clobber (const_int 0)) to prevent
12952 rsp
= ®_stat
[regno
];
12953 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
12955 value
= copy_rtx (value
);
12956 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
12960 /* For the main register being modified, update the value, the mode, the
12961 nonzero bits, and the number of sign bit copies. */
12963 rsp
->last_set_value
= value
;
12967 machine_mode mode
= GET_MODE (reg
);
12968 subst_low_luid
= DF_INSN_LUID (insn
);
12969 rsp
->last_set_mode
= mode
;
12970 if (GET_MODE_CLASS (mode
) == MODE_INT
12971 && HWI_COMPUTABLE_MODE_P (mode
))
12972 mode
= nonzero_bits_mode
;
12973 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
12974 rsp
->last_set_sign_bit_copies
12975 = num_sign_bit_copies (value
, GET_MODE (reg
));
12979 /* Called via note_stores from record_dead_and_set_regs to handle one
12980 SET or CLOBBER in an insn. DATA is the instruction in which the
12981 set is occurring. */
12984 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
12986 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
12988 if (GET_CODE (dest
) == SUBREG
)
12989 dest
= SUBREG_REG (dest
);
12991 if (!record_dead_insn
)
12994 record_value_for_reg (dest
, NULL
, NULL_RTX
);
13000 /* If we are setting the whole register, we know its value. Otherwise
13001 show that we don't know the value. We can handle SUBREG in
13003 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
13004 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
13005 else if (GET_CODE (setter
) == SET
13006 && GET_CODE (SET_DEST (setter
)) == SUBREG
13007 && SUBREG_REG (SET_DEST (setter
)) == dest
13008 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
13009 && subreg_lowpart_p (SET_DEST (setter
)))
13010 record_value_for_reg (dest
, record_dead_insn
,
13011 gen_lowpart (GET_MODE (dest
),
13012 SET_SRC (setter
)));
13014 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
13016 else if (MEM_P (dest
)
13017 /* Ignore pushes, they clobber nothing. */
13018 && ! push_operand (dest
, GET_MODE (dest
)))
13019 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
13022 /* Update the records of when each REG was most recently set or killed
13023 for the things done by INSN. This is the last thing done in processing
13024 INSN in the combiner loop.
13026 We update reg_stat[], in particular fields last_set, last_set_value,
13027 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13028 last_death, and also the similar information mem_last_set (which insn
13029 most recently modified memory) and last_call_luid (which insn was the
13030 most recent subroutine call). */
13033 record_dead_and_set_regs (rtx_insn
*insn
)
13038 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
13040 if (REG_NOTE_KIND (link
) == REG_DEAD
13041 && REG_P (XEXP (link
, 0)))
13043 unsigned int regno
= REGNO (XEXP (link
, 0));
13044 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
13046 for (i
= regno
; i
< endregno
; i
++)
13048 reg_stat_type
*rsp
;
13050 rsp
= ®_stat
[i
];
13051 rsp
->last_death
= insn
;
13054 else if (REG_NOTE_KIND (link
) == REG_INC
)
13055 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
13060 hard_reg_set_iterator hrsi
;
13061 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
13063 reg_stat_type
*rsp
;
13065 rsp
= ®_stat
[i
];
13066 rsp
->last_set_invalid
= 1;
13067 rsp
->last_set
= insn
;
13068 rsp
->last_set_value
= 0;
13069 rsp
->last_set_mode
= VOIDmode
;
13070 rsp
->last_set_nonzero_bits
= 0;
13071 rsp
->last_set_sign_bit_copies
= 0;
13072 rsp
->last_death
= 0;
13073 rsp
->truncated_to_mode
= VOIDmode
;
13076 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
13078 /* We can't combine into a call pattern. Remember, though, that
13079 the return value register is set at this LUID. We could
13080 still replace a register with the return value from the
13081 wrong subroutine call! */
13082 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
13085 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
13088 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13089 register present in the SUBREG, so for each such SUBREG go back and
13090 adjust nonzero and sign bit information of the registers that are
13091 known to have some zero/sign bits set.
13093 This is needed because when combine blows the SUBREGs away, the
13094 information on zero/sign bits is lost and further combines can be
13095 missed because of that. */
13098 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
13100 struct insn_link
*links
;
13102 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
13103 machine_mode mode
= GET_MODE (subreg
);
13105 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
13108 for (links
= LOG_LINKS (insn
); links
;)
13110 reg_stat_type
*rsp
;
13112 insn
= links
->insn
;
13113 set
= single_set (insn
);
13115 if (! set
|| !REG_P (SET_DEST (set
))
13116 || REGNO (SET_DEST (set
)) != regno
13117 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
13119 links
= links
->next
;
13123 rsp
= ®_stat
[regno
];
13124 if (rsp
->last_set
== insn
)
13126 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
13127 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
13130 if (REG_P (SET_SRC (set
)))
13132 regno
= REGNO (SET_SRC (set
));
13133 links
= LOG_LINKS (insn
);
13140 /* Check if X, a register, is known to contain a value already
13141 truncated to MODE. In this case we can use a subreg to refer to
13142 the truncated value even though in the generic case we would need
13143 an explicit truncation. */
13146 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
13148 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
13149 machine_mode truncated
= rsp
->truncated_to_mode
;
13152 || rsp
->truncation_label
< label_tick_ebb_start
)
13154 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
13156 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
13161 /* If X is a hard reg or a subreg record the mode that the register is
13162 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
13163 to turn a truncate into a subreg using this information. Return true
13164 if traversing X is complete. */
13167 record_truncated_value (rtx x
)
13169 machine_mode truncated_mode
;
13170 reg_stat_type
*rsp
;
13172 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
13174 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
13175 truncated_mode
= GET_MODE (x
);
13177 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
13180 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
13183 x
= SUBREG_REG (x
);
13185 /* ??? For hard-regs we now record everything. We might be able to
13186 optimize this using last_set_mode. */
13187 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
13188 truncated_mode
= GET_MODE (x
);
13192 rsp
= ®_stat
[REGNO (x
)];
13193 if (rsp
->truncated_to_mode
== 0
13194 || rsp
->truncation_label
< label_tick_ebb_start
13195 || (GET_MODE_SIZE (truncated_mode
)
13196 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
13198 rsp
->truncated_to_mode
= truncated_mode
;
13199 rsp
->truncation_label
= label_tick
;
13205 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13206 the modes they are used in. This can help truning TRUNCATEs into
13210 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
13212 subrtx_var_iterator::array_type array
;
13213 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
13214 if (record_truncated_value (*iter
))
13215 iter
.skip_subrtxes ();
13218 /* Scan X for promoted SUBREGs. For each one found,
13219 note what it implies to the registers used in it. */
13222 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
13224 if (GET_CODE (x
) == SUBREG
13225 && SUBREG_PROMOTED_VAR_P (x
)
13226 && REG_P (SUBREG_REG (x
)))
13227 record_promoted_value (insn
, x
);
13230 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
13233 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
13237 check_promoted_subreg (insn
, XEXP (x
, i
));
13241 if (XVEC (x
, i
) != 0)
13242 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13243 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
13249 /* Verify that all the registers and memory references mentioned in *LOC are
13250 still valid. *LOC was part of a value set in INSN when label_tick was
13251 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13252 the invalid references with (clobber (const_int 0)) and return 1. This
13253 replacement is useful because we often can get useful information about
13254 the form of a value (e.g., if it was produced by a shift that always
13255 produces -1 or 0) even though we don't know exactly what registers it
13256 was produced from. */
13259 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
13262 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
13263 int len
= GET_RTX_LENGTH (GET_CODE (x
));
13268 unsigned int regno
= REGNO (x
);
13269 unsigned int endregno
= END_REGNO (x
);
13272 for (j
= regno
; j
< endregno
; j
++)
13274 reg_stat_type
*rsp
= ®_stat
[j
];
13275 if (rsp
->last_set_invalid
13276 /* If this is a pseudo-register that was only set once and not
13277 live at the beginning of the function, it is always valid. */
13278 || (! (regno
>= FIRST_PSEUDO_REGISTER
13279 && regno
< reg_n_sets_max
13280 && REG_N_SETS (regno
) == 1
13281 && (!REGNO_REG_SET_P
13282 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
13284 && rsp
->last_set_label
> tick
))
13287 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13294 /* If this is a memory reference, make sure that there were no stores after
13295 it that might have clobbered the value. We don't have alias info, so we
13296 assume any store invalidates it. Moreover, we only have local UIDs, so
13297 we also assume that there were stores in the intervening basic blocks. */
13298 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
13299 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
13302 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
13306 for (i
= 0; i
< len
; i
++)
13310 /* Check for identical subexpressions. If x contains
13311 identical subexpression we only have to traverse one of
13313 if (i
== 1 && ARITHMETIC_P (x
))
13315 /* Note that at this point x0 has already been checked
13316 and found valid. */
13317 rtx x0
= XEXP (x
, 0);
13318 rtx x1
= XEXP (x
, 1);
13320 /* If x0 and x1 are identical then x is also valid. */
13324 /* If x1 is identical to a subexpression of x0 then
13325 while checking x0, x1 has already been checked. Thus
13326 it is valid and so as x. */
13327 if (ARITHMETIC_P (x0
)
13328 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
13331 /* If x0 is identical to a subexpression of x1 then x is
13332 valid iff the rest of x1 is valid. */
13333 if (ARITHMETIC_P (x1
)
13334 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
13336 get_last_value_validate (&XEXP (x1
,
13337 x0
== XEXP (x1
, 0) ? 1 : 0),
13338 insn
, tick
, replace
);
13341 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
13345 else if (fmt
[i
] == 'E')
13346 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13347 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
13348 insn
, tick
, replace
) == 0)
13352 /* If we haven't found a reason for it to be invalid, it is valid. */
13356 /* Get the last value assigned to X, if known. Some registers
13357 in the value may be replaced with (clobber (const_int 0)) if their value
13358 is known longer known reliably. */
13361 get_last_value (const_rtx x
)
13363 unsigned int regno
;
13365 reg_stat_type
*rsp
;
13367 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13368 then convert it to the desired mode. If this is a paradoxical SUBREG,
13369 we cannot predict what values the "extra" bits might have. */
13370 if (GET_CODE (x
) == SUBREG
13371 && subreg_lowpart_p (x
)
13372 && !paradoxical_subreg_p (x
)
13373 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
13374 return gen_lowpart (GET_MODE (x
), value
);
13380 rsp
= ®_stat
[regno
];
13381 value
= rsp
->last_set_value
;
13383 /* If we don't have a value, or if it isn't for this basic block and
13384 it's either a hard register, set more than once, or it's a live
13385 at the beginning of the function, return 0.
13387 Because if it's not live at the beginning of the function then the reg
13388 is always set before being used (is never used without being set).
13389 And, if it's set only once, and it's always set before use, then all
13390 uses must have the same last value, even if it's not from this basic
13394 || (rsp
->last_set_label
< label_tick_ebb_start
13395 && (regno
< FIRST_PSEUDO_REGISTER
13396 || regno
>= reg_n_sets_max
13397 || REG_N_SETS (regno
) != 1
13399 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13402 /* If the value was set in a later insn than the ones we are processing,
13403 we can't use it even if the register was only set once. */
13404 if (rsp
->last_set_label
== label_tick
13405 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13408 /* If fewer bits were set than what we are asked for now, we cannot use
13410 if (GET_MODE_PRECISION (rsp
->last_set_mode
)
13411 < GET_MODE_PRECISION (GET_MODE (x
)))
13414 /* If the value has all its registers valid, return it. */
13415 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13418 /* Otherwise, make a copy and replace any invalid register with
13419 (clobber (const_int 0)). If that fails for some reason, return 0. */
13421 value
= copy_rtx (value
);
13422 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13428 /* Return nonzero if expression X refers to a REG or to memory
13429 that is set in an instruction more recent than FROM_LUID. */
13432 use_crosses_set_p (const_rtx x
, int from_luid
)
13436 enum rtx_code code
= GET_CODE (x
);
13440 unsigned int regno
= REGNO (x
);
13441 unsigned endreg
= END_REGNO (x
);
13443 #ifdef PUSH_ROUNDING
13444 /* Don't allow uses of the stack pointer to be moved,
13445 because we don't know whether the move crosses a push insn. */
13446 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13449 for (; regno
< endreg
; regno
++)
13451 reg_stat_type
*rsp
= ®_stat
[regno
];
13453 && rsp
->last_set_label
== label_tick
13454 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13460 if (code
== MEM
&& mem_last_set
> from_luid
)
13463 fmt
= GET_RTX_FORMAT (code
);
13465 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13470 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13471 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13474 else if (fmt
[i
] == 'e'
13475 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13481 /* Define three variables used for communication between the following
13484 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13485 static int reg_dead_flag
;
13487 /* Function called via note_stores from reg_dead_at_p.
13489 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13490 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13493 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13495 unsigned int regno
, endregno
;
13500 regno
= REGNO (dest
);
13501 endregno
= END_REGNO (dest
);
13502 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13503 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13506 /* Return nonzero if REG is known to be dead at INSN.
13508 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13509 referencing REG, it is dead. If we hit a SET referencing REG, it is
13510 live. Otherwise, see if it is live or dead at the start of the basic
13511 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13512 must be assumed to be always live. */
13515 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13520 /* Set variables for reg_dead_at_p_1. */
13521 reg_dead_regno
= REGNO (reg
);
13522 reg_dead_endregno
= END_REGNO (reg
);
13526 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13527 we allow the machine description to decide whether use-and-clobber
13528 patterns are OK. */
13529 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13531 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13532 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13536 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13537 beginning of basic block. */
13538 block
= BLOCK_FOR_INSN (insn
);
13543 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13546 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13548 return reg_dead_flag
== 1 ? 1 : 0;
13550 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13554 if (insn
== BB_HEAD (block
))
13557 insn
= PREV_INSN (insn
);
13560 /* Look at live-in sets for the basic block that we were in. */
13561 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13562 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13568 /* Note hard registers in X that are used. */
13571 mark_used_regs_combine (rtx x
)
13573 RTX_CODE code
= GET_CODE (x
);
13574 unsigned int regno
;
13585 case ADDR_DIFF_VEC
:
13587 /* CC0 must die in the insn after it is set, so we don't need to take
13588 special note of it here. */
13593 /* If we are clobbering a MEM, mark any hard registers inside the
13594 address as used. */
13595 if (MEM_P (XEXP (x
, 0)))
13596 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13601 /* A hard reg in a wide mode may really be multiple registers.
13602 If so, mark all of them just like the first. */
13603 if (regno
< FIRST_PSEUDO_REGISTER
)
13605 /* None of this applies to the stack, frame or arg pointers. */
13606 if (regno
== STACK_POINTER_REGNUM
13607 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13608 && regno
== HARD_FRAME_POINTER_REGNUM
)
13609 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
13610 && regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13611 || regno
== FRAME_POINTER_REGNUM
)
13614 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13620 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13622 rtx testreg
= SET_DEST (x
);
13624 while (GET_CODE (testreg
) == SUBREG
13625 || GET_CODE (testreg
) == ZERO_EXTRACT
13626 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13627 testreg
= XEXP (testreg
, 0);
13629 if (MEM_P (testreg
))
13630 mark_used_regs_combine (XEXP (testreg
, 0));
13632 mark_used_regs_combine (SET_SRC (x
));
13640 /* Recursively scan the operands of this expression. */
13643 const char *fmt
= GET_RTX_FORMAT (code
);
13645 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13648 mark_used_regs_combine (XEXP (x
, i
));
13649 else if (fmt
[i
] == 'E')
13653 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13654 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13660 /* Remove register number REGNO from the dead registers list of INSN.
13662 Return the note used to record the death, if there was one. */
13665 remove_death (unsigned int regno
, rtx_insn
*insn
)
13667 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13670 remove_note (insn
, note
);
13675 /* For each register (hardware or pseudo) used within expression X, if its
13676 death is in an instruction with luid between FROM_LUID (inclusive) and
13677 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13678 list headed by PNOTES.
13680 That said, don't move registers killed by maybe_kill_insn.
13682 This is done when X is being merged by combination into TO_INSN. These
13683 notes will then be distributed as needed. */
13686 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13691 enum rtx_code code
= GET_CODE (x
);
13695 unsigned int regno
= REGNO (x
);
13696 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13698 /* Don't move the register if it gets killed in between from and to. */
13699 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13700 && ! reg_referenced_p (x
, maybe_kill_insn
))
13704 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13705 && DF_INSN_LUID (where_dead
) >= from_luid
13706 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13708 rtx note
= remove_death (regno
, where_dead
);
13710 /* It is possible for the call above to return 0. This can occur
13711 when last_death points to I2 or I1 that we combined with.
13712 In that case make a new note.
13714 We must also check for the case where X is a hard register
13715 and NOTE is a death note for a range of hard registers
13716 including X. In that case, we must put REG_DEAD notes for
13717 the remaining registers in place of NOTE. */
13719 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13720 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13721 > GET_MODE_SIZE (GET_MODE (x
))))
13723 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13724 unsigned int deadend
= END_REGNO (XEXP (note
, 0));
13725 unsigned int ourend
= END_REGNO (x
);
13728 for (i
= deadregno
; i
< deadend
; i
++)
13729 if (i
< regno
|| i
>= ourend
)
13730 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13733 /* If we didn't find any note, or if we found a REG_DEAD note that
13734 covers only part of the given reg, and we have a multi-reg hard
13735 register, then to be safe we must check for REG_DEAD notes
13736 for each register other than the first. They could have
13737 their own REG_DEAD notes lying around. */
13738 else if ((note
== 0
13740 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13741 < GET_MODE_SIZE (GET_MODE (x
)))))
13742 && regno
< FIRST_PSEUDO_REGISTER
13743 && REG_NREGS (x
) > 1)
13745 unsigned int ourend
= END_REGNO (x
);
13746 unsigned int i
, offset
;
13750 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13754 for (i
= regno
+ offset
; i
< ourend
; i
++)
13755 move_deaths (regno_reg_rtx
[i
],
13756 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13759 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13761 XEXP (note
, 1) = *pnotes
;
13765 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13771 else if (GET_CODE (x
) == SET
)
13773 rtx dest
= SET_DEST (x
);
13775 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13777 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13778 that accesses one word of a multi-word item, some
13779 piece of everything register in the expression is used by
13780 this insn, so remove any old death. */
13781 /* ??? So why do we test for equality of the sizes? */
13783 if (GET_CODE (dest
) == ZERO_EXTRACT
13784 || GET_CODE (dest
) == STRICT_LOW_PART
13785 || (GET_CODE (dest
) == SUBREG
13786 && (((GET_MODE_SIZE (GET_MODE (dest
))
13787 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13788 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13789 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13791 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13795 /* If this is some other SUBREG, we know it replaces the entire
13796 value, so use that as the destination. */
13797 if (GET_CODE (dest
) == SUBREG
)
13798 dest
= SUBREG_REG (dest
);
13800 /* If this is a MEM, adjust deaths of anything used in the address.
13801 For a REG (the only other possibility), the entire value is
13802 being replaced so the old value is not used in this insn. */
13805 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13810 else if (GET_CODE (x
) == CLOBBER
)
13813 len
= GET_RTX_LENGTH (code
);
13814 fmt
= GET_RTX_FORMAT (code
);
13816 for (i
= 0; i
< len
; i
++)
13821 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13822 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13825 else if (fmt
[i
] == 'e')
13826 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13830 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13831 pattern of an insn. X must be a REG. */
13834 reg_bitfield_target_p (rtx x
, rtx body
)
13838 if (GET_CODE (body
) == SET
)
13840 rtx dest
= SET_DEST (body
);
13842 unsigned int regno
, tregno
, endregno
, endtregno
;
13844 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13845 target
= XEXP (dest
, 0);
13846 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13847 target
= SUBREG_REG (XEXP (dest
, 0));
13851 if (GET_CODE (target
) == SUBREG
)
13852 target
= SUBREG_REG (target
);
13854 if (!REG_P (target
))
13857 tregno
= REGNO (target
), regno
= REGNO (x
);
13858 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13859 return target
== x
;
13861 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13862 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13864 return endregno
> tregno
&& regno
< endtregno
;
13867 else if (GET_CODE (body
) == PARALLEL
)
13868 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13869 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13875 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13876 as appropriate. I3 and I2 are the insns resulting from the combination
13877 insns including FROM (I2 may be zero).
13879 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13880 not need REG_DEAD notes because they are being substituted for. This
13881 saves searching in the most common cases.
13883 Each note in the list is either ignored or placed on some insns, depending
13884 on the type of note. */
13887 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
13888 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
13890 rtx note
, next_note
;
13892 rtx_insn
*tem_insn
;
13894 for (note
= notes
; note
; note
= next_note
)
13896 rtx_insn
*place
= 0, *place2
= 0;
13898 next_note
= XEXP (note
, 1);
13899 switch (REG_NOTE_KIND (note
))
13903 /* Doesn't matter much where we put this, as long as it's somewhere.
13904 It is preferable to keep these notes on branches, which is most
13905 likely to be i3. */
13909 case REG_NON_LOCAL_GOTO
:
13914 gcc_assert (i2
&& JUMP_P (i2
));
13919 case REG_EH_REGION
:
13920 /* These notes must remain with the call or trapping instruction. */
13923 else if (i2
&& CALL_P (i2
))
13927 gcc_assert (cfun
->can_throw_non_call_exceptions
);
13928 if (may_trap_p (i3
))
13930 else if (i2
&& may_trap_p (i2
))
13932 /* ??? Otherwise assume we've combined things such that we
13933 can now prove that the instructions can't trap. Drop the
13934 note in this case. */
13938 case REG_ARGS_SIZE
:
13939 /* ??? How to distribute between i3-i1. Assume i3 contains the
13940 entire adjustment. Assert i3 contains at least some adjust. */
13941 if (!noop_move_p (i3
))
13943 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
13944 /* fixup_args_size_notes looks at REG_NORETURN note,
13945 so ensure the note is placed there first. */
13949 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
13950 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
13954 XEXP (n
, 1) = REG_NOTES (i3
);
13955 REG_NOTES (i3
) = n
;
13959 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
13960 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13961 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13962 gcc_assert (old_size
!= args_size
13964 && !ACCUMULATE_OUTGOING_ARGS
13965 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
13972 case REG_CALL_DECL
:
13973 /* These notes must remain with the call. It should not be
13974 possible for both I2 and I3 to be a call. */
13979 gcc_assert (i2
&& CALL_P (i2
));
13985 /* Any clobbers for i3 may still exist, and so we must process
13986 REG_UNUSED notes from that insn.
13988 Any clobbers from i2 or i1 can only exist if they were added by
13989 recog_for_combine. In that case, recog_for_combine created the
13990 necessary REG_UNUSED notes. Trying to keep any original
13991 REG_UNUSED notes from these insns can cause incorrect output
13992 if it is for the same register as the original i3 dest.
13993 In that case, we will notice that the register is set in i3,
13994 and then add a REG_UNUSED note for the destination of i3, which
13995 is wrong. However, it is possible to have REG_UNUSED notes from
13996 i2 or i1 for register which were both used and clobbered, so
13997 we keep notes from i2 or i1 if they will turn into REG_DEAD
14000 /* If this register is set or clobbered in I3, put the note there
14001 unless there is one already. */
14002 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
14004 if (from_insn
!= i3
)
14007 if (! (REG_P (XEXP (note
, 0))
14008 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
14009 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
14012 /* Otherwise, if this register is used by I3, then this register
14013 now dies here, so we must put a REG_DEAD note here unless there
14015 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
14016 && ! (REG_P (XEXP (note
, 0))
14017 ? find_regno_note (i3
, REG_DEAD
,
14018 REGNO (XEXP (note
, 0)))
14019 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
14021 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
14029 /* These notes say something about results of an insn. We can
14030 only support them if they used to be on I3 in which case they
14031 remain on I3. Otherwise they are ignored.
14033 If the note refers to an expression that is not a constant, we
14034 must also ignore the note since we cannot tell whether the
14035 equivalence is still true. It might be possible to do
14036 slightly better than this (we only have a problem if I2DEST
14037 or I1DEST is present in the expression), but it doesn't
14038 seem worth the trouble. */
14040 if (from_insn
== i3
14041 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
14046 /* These notes say something about how a register is used. They must
14047 be present on any use of the register in I2 or I3. */
14048 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
14051 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
14060 case REG_LABEL_TARGET
:
14061 case REG_LABEL_OPERAND
:
14062 /* This can show up in several ways -- either directly in the
14063 pattern, or hidden off in the constant pool with (or without?)
14064 a REG_EQUAL note. */
14065 /* ??? Ignore the without-reg_equal-note problem for now. */
14066 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
14067 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
14068 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14069 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
14073 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
14074 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
14075 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
14076 && label_ref_label (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
14084 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14085 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14087 if (place
&& JUMP_P (place
)
14088 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14089 && (JUMP_LABEL (place
) == NULL
14090 || JUMP_LABEL (place
) == XEXP (note
, 0)))
14092 rtx label
= JUMP_LABEL (place
);
14095 JUMP_LABEL (place
) = XEXP (note
, 0);
14096 else if (LABEL_P (label
))
14097 LABEL_NUSES (label
)--;
14100 if (place2
&& JUMP_P (place2
)
14101 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
14102 && (JUMP_LABEL (place2
) == NULL
14103 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
14105 rtx label
= JUMP_LABEL (place2
);
14108 JUMP_LABEL (place2
) = XEXP (note
, 0);
14109 else if (LABEL_P (label
))
14110 LABEL_NUSES (label
)--;
14116 /* This note says something about the value of a register prior
14117 to the execution of an insn. It is too much trouble to see
14118 if the note is still correct in all situations. It is better
14119 to simply delete it. */
14123 /* If we replaced the right hand side of FROM_INSN with a
14124 REG_EQUAL note, the original use of the dying register
14125 will not have been combined into I3 and I2. In such cases,
14126 FROM_INSN is guaranteed to be the first of the combined
14127 instructions, so we simply need to search back before
14128 FROM_INSN for the previous use or set of this register,
14129 then alter the notes there appropriately.
14131 If the register is used as an input in I3, it dies there.
14132 Similarly for I2, if it is nonzero and adjacent to I3.
14134 If the register is not used as an input in either I3 or I2
14135 and it is not one of the registers we were supposed to eliminate,
14136 there are two possibilities. We might have a non-adjacent I2
14137 or we might have somehow eliminated an additional register
14138 from a computation. For example, we might have had A & B where
14139 we discover that B will always be zero. In this case we will
14140 eliminate the reference to A.
14142 In both cases, we must search to see if we can find a previous
14143 use of A and put the death note there. */
14146 && from_insn
== i2mod
14147 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
14148 tem_insn
= from_insn
;
14152 && CALL_P (from_insn
)
14153 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
14155 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
14157 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
14158 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14160 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
14162 && reg_overlap_mentioned_p (XEXP (note
, 0),
14164 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
14165 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
14168 /* If the new I2 sets the same register that is marked dead
14169 in the note, we do not know where to put the note.
14171 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
14177 basic_block bb
= this_basic_block
;
14179 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
14181 if (!NONDEBUG_INSN_P (tem_insn
))
14183 if (tem_insn
== BB_HEAD (bb
))
14188 /* If the register is being set at TEM_INSN, see if that is all
14189 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14190 into a REG_UNUSED note instead. Don't delete sets to
14191 global register vars. */
14192 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
14193 || !global_regs
[REGNO (XEXP (note
, 0))])
14194 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
14196 rtx set
= single_set (tem_insn
);
14197 rtx inner_dest
= 0;
14198 rtx_insn
*cc0_setter
= NULL
;
14201 for (inner_dest
= SET_DEST (set
);
14202 (GET_CODE (inner_dest
) == STRICT_LOW_PART
14203 || GET_CODE (inner_dest
) == SUBREG
14204 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
14205 inner_dest
= XEXP (inner_dest
, 0))
14208 /* Verify that it was the set, and not a clobber that
14209 modified the register.
14211 CC0 targets must be careful to maintain setter/user
14212 pairs. If we cannot delete the setter due to side
14213 effects, mark the user with an UNUSED note instead
14216 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
14217 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
14219 || (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
14220 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
14221 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))))
14223 /* Move the notes and links of TEM_INSN elsewhere.
14224 This might delete other dead insns recursively.
14225 First set the pattern to something that won't use
14227 rtx old_notes
= REG_NOTES (tem_insn
);
14229 PATTERN (tem_insn
) = pc_rtx
;
14230 REG_NOTES (tem_insn
) = NULL
;
14232 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
14233 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14234 distribute_links (LOG_LINKS (tem_insn
));
14236 SET_INSN_DELETED (tem_insn
);
14237 if (tem_insn
== i2
)
14240 /* Delete the setter too. */
14243 PATTERN (cc0_setter
) = pc_rtx
;
14244 old_notes
= REG_NOTES (cc0_setter
);
14245 REG_NOTES (cc0_setter
) = NULL
;
14247 distribute_notes (old_notes
, cc0_setter
,
14249 NULL_RTX
, NULL_RTX
, NULL_RTX
);
14250 distribute_links (LOG_LINKS (cc0_setter
));
14252 SET_INSN_DELETED (cc0_setter
);
14253 if (cc0_setter
== i2
)
14259 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
14261 /* If there isn't already a REG_UNUSED note, put one
14262 here. Do not place a REG_DEAD note, even if
14263 the register is also used here; that would not
14264 match the algorithm used in lifetime analysis
14265 and can cause the consistency check in the
14266 scheduler to fail. */
14267 if (! find_regno_note (tem_insn
, REG_UNUSED
,
14268 REGNO (XEXP (note
, 0))))
14273 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
14274 || (CALL_P (tem_insn
)
14275 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
14279 /* If we are doing a 3->2 combination, and we have a
14280 register which formerly died in i3 and was not used
14281 by i2, which now no longer dies in i3 and is used in
14282 i2 but does not die in i2, and place is between i2
14283 and i3, then we may need to move a link from place to
14285 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
14287 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
14288 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
14290 struct insn_link
*links
= LOG_LINKS (place
);
14291 LOG_LINKS (place
) = NULL
;
14292 distribute_links (links
);
14297 if (tem_insn
== BB_HEAD (bb
))
14303 /* If the register is set or already dead at PLACE, we needn't do
14304 anything with this note if it is still a REG_DEAD note.
14305 We check here if it is set at all, not if is it totally replaced,
14306 which is what `dead_or_set_p' checks, so also check for it being
14309 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
14311 unsigned int regno
= REGNO (XEXP (note
, 0));
14312 reg_stat_type
*rsp
= ®_stat
[regno
];
14314 if (dead_or_set_p (place
, XEXP (note
, 0))
14315 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
14317 /* Unless the register previously died in PLACE, clear
14318 last_death. [I no longer understand why this is
14320 if (rsp
->last_death
!= place
)
14321 rsp
->last_death
= 0;
14325 rsp
->last_death
= place
;
14327 /* If this is a death note for a hard reg that is occupying
14328 multiple registers, ensure that we are still using all
14329 parts of the object. If we find a piece of the object
14330 that is unused, we must arrange for an appropriate REG_DEAD
14331 note to be added for it. However, we can't just emit a USE
14332 and tag the note to it, since the register might actually
14333 be dead; so we recourse, and the recursive call then finds
14334 the previous insn that used this register. */
14336 if (place
&& REG_NREGS (XEXP (note
, 0)) > 1)
14338 unsigned int endregno
= END_REGNO (XEXP (note
, 0));
14339 bool all_used
= true;
14342 for (i
= regno
; i
< endregno
; i
++)
14343 if ((! refers_to_regno_p (i
, PATTERN (place
))
14344 && ! find_regno_fusage (place
, USE
, i
))
14345 || dead_or_set_regno_p (place
, i
))
14353 /* Put only REG_DEAD notes for pieces that are
14354 not already dead or set. */
14356 for (i
= regno
; i
< endregno
;
14357 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
14359 rtx piece
= regno_reg_rtx
[i
];
14360 basic_block bb
= this_basic_block
;
14362 if (! dead_or_set_p (place
, piece
)
14363 && ! reg_bitfield_target_p (piece
,
14366 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
14369 distribute_notes (new_note
, place
, place
,
14370 NULL
, NULL_RTX
, NULL_RTX
,
14373 else if (! refers_to_regno_p (i
, PATTERN (place
))
14374 && ! find_regno_fusage (place
, USE
, i
))
14375 for (tem_insn
= PREV_INSN (place
); ;
14376 tem_insn
= PREV_INSN (tem_insn
))
14378 if (!NONDEBUG_INSN_P (tem_insn
))
14380 if (tem_insn
== BB_HEAD (bb
))
14384 if (dead_or_set_p (tem_insn
, piece
)
14385 || reg_bitfield_target_p (piece
,
14386 PATTERN (tem_insn
)))
14388 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14401 /* Any other notes should not be present at this point in the
14403 gcc_unreachable ();
14408 XEXP (note
, 1) = REG_NOTES (place
);
14409 REG_NOTES (place
) = note
;
14413 add_shallow_copy_of_reg_note (place2
, note
);
14417 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14418 I3, I2, and I1 to new locations. This is also called to add a link
14419 pointing at I3 when I3's destination is changed. */
14422 distribute_links (struct insn_link
*links
)
14424 struct insn_link
*link
, *next_link
;
14426 for (link
= links
; link
; link
= next_link
)
14428 rtx_insn
*place
= 0;
14432 next_link
= link
->next
;
14434 /* If the insn that this link points to is a NOTE, ignore it. */
14435 if (NOTE_P (link
->insn
))
14439 rtx pat
= PATTERN (link
->insn
);
14440 if (GET_CODE (pat
) == SET
)
14442 else if (GET_CODE (pat
) == PARALLEL
)
14445 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14447 set
= XVECEXP (pat
, 0, i
);
14448 if (GET_CODE (set
) != SET
)
14451 reg
= SET_DEST (set
);
14452 while (GET_CODE (reg
) == ZERO_EXTRACT
14453 || GET_CODE (reg
) == STRICT_LOW_PART
14454 || GET_CODE (reg
) == SUBREG
)
14455 reg
= XEXP (reg
, 0);
14460 if (REGNO (reg
) == link
->regno
)
14463 if (i
== XVECLEN (pat
, 0))
14469 reg
= SET_DEST (set
);
14471 while (GET_CODE (reg
) == ZERO_EXTRACT
14472 || GET_CODE (reg
) == STRICT_LOW_PART
14473 || GET_CODE (reg
) == SUBREG
)
14474 reg
= XEXP (reg
, 0);
14476 /* A LOG_LINK is defined as being placed on the first insn that uses
14477 a register and points to the insn that sets the register. Start
14478 searching at the next insn after the target of the link and stop
14479 when we reach a set of the register or the end of the basic block.
14481 Note that this correctly handles the link that used to point from
14482 I3 to I2. Also note that not much searching is typically done here
14483 since most links don't point very far away. */
14485 for (insn
= NEXT_INSN (link
->insn
);
14486 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14487 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14488 insn
= NEXT_INSN (insn
))
14489 if (DEBUG_INSN_P (insn
))
14491 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14493 if (reg_referenced_p (reg
, PATTERN (insn
)))
14497 else if (CALL_P (insn
)
14498 && find_reg_fusage (insn
, USE
, reg
))
14503 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14506 /* If we found a place to put the link, place it there unless there
14507 is already a link to the same insn as LINK at that point. */
14511 struct insn_link
*link2
;
14513 FOR_EACH_LOG_LINK (link2
, place
)
14514 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14519 link
->next
= LOG_LINKS (place
);
14520 LOG_LINKS (place
) = link
;
14522 /* Set added_links_insn to the earliest insn we added a
14524 if (added_links_insn
== 0
14525 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14526 added_links_insn
= place
;
14532 /* Check for any register or memory mentioned in EQUIV that is not
14533 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14534 of EXPR where some registers may have been replaced by constants. */
14537 unmentioned_reg_p (rtx equiv
, rtx expr
)
14539 subrtx_iterator::array_type array
;
14540 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14542 const_rtx x
= *iter
;
14543 if ((REG_P (x
) || MEM_P (x
))
14544 && !reg_mentioned_p (x
, expr
))
14550 DEBUG_FUNCTION
void
14551 dump_combine_stats (FILE *file
)
14555 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14556 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14560 dump_combine_total_stats (FILE *file
)
14564 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14565 total_attempts
, total_merges
, total_extras
, total_successes
);
14568 /* Try combining insns through substitution. */
14569 static unsigned int
14570 rest_of_handle_combine (void)
14572 int rebuild_jump_labels_after_combine
;
14574 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14575 df_note_add_problem ();
14578 regstat_init_n_sets_and_refs ();
14579 reg_n_sets_max
= max_reg_num ();
14581 rebuild_jump_labels_after_combine
14582 = combine_instructions (get_insns (), max_reg_num ());
14584 /* Combining insns may have turned an indirect jump into a
14585 direct jump. Rebuild the JUMP_LABEL fields of jumping
14587 if (rebuild_jump_labels_after_combine
)
14589 if (dom_info_available_p (CDI_DOMINATORS
))
14590 free_dominance_info (CDI_DOMINATORS
);
14591 timevar_push (TV_JUMP
);
14592 rebuild_jump_labels (get_insns ());
14594 timevar_pop (TV_JUMP
);
14597 regstat_free_n_sets_and_refs ();
14603 const pass_data pass_data_combine
=
14605 RTL_PASS
, /* type */
14606 "combine", /* name */
14607 OPTGROUP_NONE
, /* optinfo_flags */
14608 TV_COMBINE
, /* tv_id */
14609 PROP_cfglayout
, /* properties_required */
14610 0, /* properties_provided */
14611 0, /* properties_destroyed */
14612 0, /* todo_flags_start */
14613 TODO_df_finish
, /* todo_flags_finish */
14616 class pass_combine
: public rtl_opt_pass
14619 pass_combine (gcc::context
*ctxt
)
14620 : rtl_opt_pass (pass_data_combine
, ctxt
)
14623 /* opt_pass methods: */
14624 virtual bool gate (function
*) { return (optimize
> 0); }
14625 virtual unsigned int execute (function
*)
14627 return rest_of_handle_combine ();
14630 }; // class pass_combine
14632 } // anon namespace
14635 make_pass_combine (gcc::context
*ctxt
)
14637 return new pass_combine (ctxt
);