1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This module is essentially the "combiner" phase of the U. of Arizona
23 Portable Optimizer, but redone to work on our list-structured
24 representation for RTL instead of their string representation.
26 The LOG_LINKS of each insn identify the most recent assignment
27 to each REG used in the insn. It is a list of previous insns,
28 each of which contains a SET for a REG that is used in this insn
29 and not used or set in between. LOG_LINKs never cross basic blocks.
30 They were set up by the preceding pass (lifetime analysis).
32 We try to combine each pair of insns joined by a logical link.
33 We also try to combine triples of insns A, B and C when
34 C has a link back to B and B has a link back to A.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
93 #include "insn-attr.h"
95 #include "diagnostic-core.h"
98 #include "insn-codes.h"
99 #include "rtlhooks-def.h"
100 /* Include output.h for dump_file. */
104 #include "tree-pass.h"
108 /* Number of attempts to combine instructions in this function. */
110 static int combine_attempts
;
112 /* Number of attempts that got as far as substitution in this function. */
114 static int combine_merges
;
116 /* Number of instructions combined with added SETs in this function. */
118 static int combine_extras
;
120 /* Number of instructions combined in this function. */
122 static int combine_successes
;
124 /* Totals over entire compilation. */
126 static int total_attempts
, total_merges
, total_extras
, total_successes
;
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139 static rtx i2mod_old_rhs
;
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143 static rtx i2mod_new_rhs
;
145 typedef struct reg_stat_struct
{
146 /* Record last point of death of (hard or pseudo) register n. */
149 /* Record last point of modification of (hard or pseudo) register n. */
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
158 We use an approach similar to that used by cse, but change it in the
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
165 Therefore, we maintain the following fields:
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
181 (The next two parameters are out of date).
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
195 /* Record last value assigned to (hard or pseudo) register n. */
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
202 int last_set_table_tick
;
204 /* Record the value of label_tick when the value for register n is placed in
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
214 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
215 char last_set_sign_bit_copies
;
216 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
222 char last_set_invalid
;
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
234 If an entry is zero, it means that we don't know anything special. */
236 unsigned char sign_bit_copies
;
238 unsigned HOST_WIDE_INT nonzero_bits
;
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
244 int truncation_label
;
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
251 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
254 DEF_VEC_O(reg_stat_type
);
255 DEF_VEC_ALLOC_O(reg_stat_type
,heap
);
257 static VEC(reg_stat_type
,heap
) *reg_stat
;
259 /* Record the luid of the last insn that invalidated memory
260 (anything that writes memory, and subroutine calls, but not pushes). */
262 static int mem_last_set
;
264 /* Record the luid of the last CALL_INSN
265 so we can tell whether a potential combination crosses any calls. */
267 static int last_call_luid
;
269 /* When `subst' is called, this is the insn that is being modified
270 (by combining in a previous insn). The PATTERN of this insn
271 is still the old pattern partially modified and it should not be
272 looked at, but this may be used to examine the successors of the insn
273 to judge whether a simplification is valid. */
275 static rtx subst_insn
;
277 /* This is the lowest LUID that `subst' is currently dealing with.
278 get_last_value will not return a value if the register was set at or
279 after this LUID. If not for this mechanism, we could get confused if
280 I2 or I1 in try_combine were an insn that used the old value of a register
281 to obtain a new value. In that case, we might erroneously get the
282 new value of the register when we wanted the old one. */
284 static int subst_low_luid
;
286 /* This contains any hard registers that are used in newpat; reg_dead_at_p
287 must consider all these registers to be always live. */
289 static HARD_REG_SET newpat_used_regs
;
291 /* This is an insn to which a LOG_LINKS entry has been added. If this
292 insn is the earlier than I2 or I3, combine should rescan starting at
295 static rtx added_links_insn
;
297 /* Basic block in which we are performing combines. */
298 static basic_block this_basic_block
;
299 static bool optimize_this_for_speed_p
;
302 /* Length of the currently allocated uid_insn_cost array. */
304 static int max_uid_known
;
306 /* The following array records the insn_rtx_cost for every insn
307 in the instruction stream. */
309 static int *uid_insn_cost
;
311 /* The following array records the LOG_LINKS for every insn in the
312 instruction stream as an INSN_LIST rtx. */
314 static rtx
*uid_log_links
;
316 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
317 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
319 /* Incremented for each basic block. */
321 static int label_tick
;
323 /* Reset to label_tick for each extended basic block in scanning order. */
325 static int label_tick_ebb_start
;
327 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
328 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
330 static enum machine_mode nonzero_bits_mode
;
332 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
333 be safely used. It is zero while computing them and after combine has
334 completed. This former test prevents propagating values based on
335 previously set values, which can be incorrect if a variable is modified
338 static int nonzero_sign_valid
;
341 /* Record one modification to rtl structure
342 to be undone by storing old_contents into *where. */
344 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
};
350 union { rtx r
; int i
; enum machine_mode m
; } old_contents
;
351 union { rtx
*r
; int *i
; } where
;
354 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
355 num_undo says how many are currently recorded.
357 other_insn is nonzero if we have modified some other insn in the process
358 of working on subst_insn. It must be verified too. */
367 static struct undobuf undobuf
;
369 /* Number of times the pseudo being substituted for
370 was found and replaced. */
372 static int n_occurrences
;
374 static rtx
reg_nonzero_bits_for_combine (const_rtx
, enum machine_mode
, const_rtx
,
376 unsigned HOST_WIDE_INT
,
377 unsigned HOST_WIDE_INT
*);
378 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, enum machine_mode
, const_rtx
,
380 unsigned int, unsigned int *);
381 static void do_SUBST (rtx
*, rtx
);
382 static void do_SUBST_INT (int *, int);
383 static void init_reg_last (void);
384 static void setup_incoming_promotions (rtx
);
385 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
386 static int cant_combine_insn_p (rtx
);
387 static int can_combine_p (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
*, rtx
*);
388 static int combinable_i3pat (rtx
, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
389 static int contains_muldiv (rtx
);
390 static rtx
try_combine (rtx
, rtx
, rtx
, rtx
, int *, rtx
);
391 static void undo_all (void);
392 static void undo_commit (void);
393 static rtx
*find_split_point (rtx
*, rtx
, bool);
394 static rtx
subst (rtx
, rtx
, rtx
, int, int);
395 static rtx
combine_simplify_rtx (rtx
, enum machine_mode
, int);
396 static rtx
simplify_if_then_else (rtx
);
397 static rtx
simplify_set (rtx
);
398 static rtx
simplify_logical (rtx
);
399 static rtx
expand_compound_operation (rtx
);
400 static const_rtx
expand_field_assignment (const_rtx
);
401 static rtx
make_extraction (enum machine_mode
, rtx
, HOST_WIDE_INT
,
402 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
403 static rtx
extract_left_shift (rtx
, int);
404 static rtx
make_compound_operation (rtx
, enum rtx_code
);
405 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
406 unsigned HOST_WIDE_INT
*);
407 static rtx
canon_reg_for_combine (rtx
, rtx
);
408 static rtx
force_to_mode (rtx
, enum machine_mode
,
409 unsigned HOST_WIDE_INT
, int);
410 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
411 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
412 static int rtx_equal_for_field_assignment_p (rtx
, rtx
);
413 static rtx
make_field_assignment (rtx
);
414 static rtx
apply_distributive_law (rtx
);
415 static rtx
distribute_and_simplify_rtx (rtx
, int);
416 static rtx
simplify_and_const_int_1 (enum machine_mode
, rtx
,
417 unsigned HOST_WIDE_INT
);
418 static rtx
simplify_and_const_int (rtx
, enum machine_mode
, rtx
,
419 unsigned HOST_WIDE_INT
);
420 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
421 HOST_WIDE_INT
, enum machine_mode
, int *);
422 static rtx
simplify_shift_const_1 (enum rtx_code
, enum machine_mode
, rtx
, int);
423 static rtx
simplify_shift_const (rtx
, enum rtx_code
, enum machine_mode
, rtx
,
425 static int recog_for_combine (rtx
*, rtx
, rtx
*);
426 static rtx
gen_lowpart_for_combine (enum machine_mode
, rtx
);
427 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
428 static void update_table_tick (rtx
);
429 static void record_value_for_reg (rtx
, rtx
, rtx
);
430 static void check_promoted_subreg (rtx
, rtx
);
431 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
432 static void record_dead_and_set_regs (rtx
);
433 static int get_last_value_validate (rtx
*, rtx
, int, int);
434 static rtx
get_last_value (const_rtx
);
435 static int use_crosses_set_p (const_rtx
, int);
436 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
437 static int reg_dead_at_p (rtx
, rtx
);
438 static void move_deaths (rtx
, rtx
, int, rtx
, rtx
*);
439 static int reg_bitfield_target_p (rtx
, rtx
);
440 static void distribute_notes (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
441 static void distribute_links (rtx
);
442 static void mark_used_regs_combine (rtx
);
443 static void record_promoted_value (rtx
, rtx
);
444 static int unmentioned_reg_p_1 (rtx
*, void *);
445 static bool unmentioned_reg_p (rtx
, rtx
);
446 static int record_truncated_value (rtx
*, void *);
447 static void record_truncated_values (rtx
*, void *);
448 static bool reg_truncated_to_mode (enum machine_mode
, const_rtx
);
449 static rtx
gen_lowpart_or_truncate (enum machine_mode
, rtx
);
452 /* It is not safe to use ordinary gen_lowpart in combine.
453 See comments in gen_lowpart_for_combine. */
454 #undef RTL_HOOKS_GEN_LOWPART
455 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
457 /* Our implementation of gen_lowpart never emits a new pseudo. */
458 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
459 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
461 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
462 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
464 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
465 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
467 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
468 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
470 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
473 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
474 PATTERN can not be split. Otherwise, it returns an insn sequence.
475 This is a wrapper around split_insns which ensures that the
476 reg_stat vector is made larger if the splitter creates a new
480 combine_split_insns (rtx pattern
, rtx insn
)
485 ret
= split_insns (pattern
, insn
);
486 nregs
= max_reg_num ();
487 if (nregs
> VEC_length (reg_stat_type
, reg_stat
))
488 VEC_safe_grow_cleared (reg_stat_type
, heap
, reg_stat
, nregs
);
492 /* This is used by find_single_use to locate an rtx in LOC that
493 contains exactly one use of DEST, which is typically either a REG
494 or CC0. It returns a pointer to the innermost rtx expression
495 containing DEST. Appearances of DEST that are being used to
496 totally replace it are not counted. */
499 find_single_use_1 (rtx dest
, rtx
*loc
)
502 enum rtx_code code
= GET_CODE (x
);
520 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
521 of a REG that occupies all of the REG, the insn uses DEST if
522 it is mentioned in the destination or the source. Otherwise, we
523 need just check the source. */
524 if (GET_CODE (SET_DEST (x
)) != CC0
525 && GET_CODE (SET_DEST (x
)) != PC
526 && !REG_P (SET_DEST (x
))
527 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
528 && REG_P (SUBREG_REG (SET_DEST (x
)))
529 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
530 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
531 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
532 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
535 return find_single_use_1 (dest
, &SET_SRC (x
));
539 return find_single_use_1 (dest
, &XEXP (x
, 0));
545 /* If it wasn't one of the common cases above, check each expression and
546 vector of this code. Look for a unique usage of DEST. */
548 fmt
= GET_RTX_FORMAT (code
);
549 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
553 if (dest
== XEXP (x
, i
)
554 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
555 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
558 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
561 result
= this_result
;
562 else if (this_result
)
563 /* Duplicate usage. */
566 else if (fmt
[i
] == 'E')
570 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
572 if (XVECEXP (x
, i
, j
) == dest
574 && REG_P (XVECEXP (x
, i
, j
))
575 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
578 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
581 result
= this_result
;
582 else if (this_result
)
592 /* See if DEST, produced in INSN, is used only a single time in the
593 sequel. If so, return a pointer to the innermost rtx expression in which
596 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
598 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
599 care about REG_DEAD notes or LOG_LINKS.
601 Otherwise, we find the single use by finding an insn that has a
602 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
603 only referenced once in that insn, we know that it must be the first
604 and last insn referencing DEST. */
607 find_single_use (rtx dest
, rtx insn
, rtx
*ploc
)
617 next
= NEXT_INSN (insn
);
619 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
622 result
= find_single_use_1 (dest
, &PATTERN (next
));
632 bb
= BLOCK_FOR_INSN (insn
);
633 for (next
= NEXT_INSN (insn
);
634 next
&& BLOCK_FOR_INSN (next
) == bb
;
635 next
= NEXT_INSN (next
))
636 if (INSN_P (next
) && dead_or_set_p (next
, dest
))
638 for (link
= LOG_LINKS (next
); link
; link
= XEXP (link
, 1))
639 if (XEXP (link
, 0) == insn
)
644 result
= find_single_use_1 (dest
, &PATTERN (next
));
654 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
655 insn. The substitution can be undone by undo_all. If INTO is already
656 set to NEWVAL, do not record this change. Because computing NEWVAL might
657 also call SUBST, we have to compute it before we put anything into
661 do_SUBST (rtx
*into
, rtx newval
)
666 if (oldval
== newval
)
669 /* We'd like to catch as many invalid transformations here as
670 possible. Unfortunately, there are way too many mode changes
671 that are perfectly valid, so we'd waste too much effort for
672 little gain doing the checks here. Focus on catching invalid
673 transformations involving integer constants. */
674 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
675 && CONST_INT_P (newval
))
677 /* Sanity check that we're replacing oldval with a CONST_INT
678 that is a valid sign-extension for the original mode. */
679 gcc_assert (INTVAL (newval
)
680 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
682 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
683 CONST_INT is not valid, because after the replacement, the
684 original mode would be gone. Unfortunately, we can't tell
685 when do_SUBST is called to replace the operand thereof, so we
686 perform this test on oldval instead, checking whether an
687 invalid replacement took place before we got here. */
688 gcc_assert (!(GET_CODE (oldval
) == SUBREG
689 && CONST_INT_P (SUBREG_REG (oldval
))));
690 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
691 && CONST_INT_P (XEXP (oldval
, 0))));
695 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
697 buf
= XNEW (struct undo
);
699 buf
->kind
= UNDO_RTX
;
701 buf
->old_contents
.r
= oldval
;
704 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
707 #define SUBST(INTO, NEWVAL) do_SUBST(&(INTO), (NEWVAL))
709 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
710 for the value of a HOST_WIDE_INT value (including CONST_INT) is
714 do_SUBST_INT (int *into
, int newval
)
719 if (oldval
== newval
)
723 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
725 buf
= XNEW (struct undo
);
727 buf
->kind
= UNDO_INT
;
729 buf
->old_contents
.i
= oldval
;
732 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
735 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT(&(INTO), (NEWVAL))
737 /* Similar to SUBST, but just substitute the mode. This is used when
738 changing the mode of a pseudo-register, so that any other
739 references to the entry in the regno_reg_rtx array will change as
743 do_SUBST_MODE (rtx
*into
, enum machine_mode newval
)
746 enum machine_mode oldval
= GET_MODE (*into
);
748 if (oldval
== newval
)
752 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
754 buf
= XNEW (struct undo
);
756 buf
->kind
= UNDO_MODE
;
758 buf
->old_contents
.m
= oldval
;
759 adjust_reg_mode (*into
, newval
);
761 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
764 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE(&(INTO), (NEWVAL))
766 /* Subroutine of try_combine. Determine whether the combine replacement
767 patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
768 insn_rtx_cost that the original instruction sequence I0, I1, I2, I3 and
769 undobuf.other_insn. Note that I1 and/or NEWI2PAT may be NULL_RTX.
770 NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX. This
771 function returns false, if the costs of all instructions can be
772 estimated, and the replacements are more expensive than the original
776 combine_validate_cost (rtx i0
, rtx i1
, rtx i2
, rtx i3
, rtx newpat
,
777 rtx newi2pat
, rtx newotherpat
)
779 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
780 int new_i2_cost
, new_i3_cost
;
781 int old_cost
, new_cost
;
783 /* Lookup the original insn_rtx_costs. */
784 i2_cost
= INSN_COST (i2
);
785 i3_cost
= INSN_COST (i3
);
789 i1_cost
= INSN_COST (i1
);
792 i0_cost
= INSN_COST (i0
);
793 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
794 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
798 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
799 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
805 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
806 i1_cost
= i0_cost
= 0;
809 /* Calculate the replacement insn_rtx_costs. */
810 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
813 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
814 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
815 ? new_i2_cost
+ new_i3_cost
: 0;
819 new_cost
= new_i3_cost
;
823 if (undobuf
.other_insn
)
825 int old_other_cost
, new_other_cost
;
827 old_other_cost
= INSN_COST (undobuf
.other_insn
);
828 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
829 if (old_other_cost
> 0 && new_other_cost
> 0)
831 old_cost
+= old_other_cost
;
832 new_cost
+= new_other_cost
;
838 /* Disallow this recombination if both new_cost and old_cost are
839 greater than zero, and new_cost is greater than old cost. */
841 && new_cost
> old_cost
)
848 "rejecting combination of insns %d, %d, %d and %d\n",
849 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
),
851 fprintf (dump_file
, "original costs %d + %d + %d + %d = %d\n",
852 i0_cost
, i1_cost
, i2_cost
, i3_cost
, old_cost
);
857 "rejecting combination of insns %d, %d and %d\n",
858 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
859 fprintf (dump_file
, "original costs %d + %d + %d = %d\n",
860 i1_cost
, i2_cost
, i3_cost
, old_cost
);
865 "rejecting combination of insns %d and %d\n",
866 INSN_UID (i2
), INSN_UID (i3
));
867 fprintf (dump_file
, "original costs %d + %d = %d\n",
868 i2_cost
, i3_cost
, old_cost
);
873 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
874 new_i2_cost
, new_i3_cost
, new_cost
);
877 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
883 /* Update the uid_insn_cost array with the replacement costs. */
884 INSN_COST (i2
) = new_i2_cost
;
885 INSN_COST (i3
) = new_i3_cost
;
893 /* Delete any insns that copy a register to itself. */
896 delete_noop_moves (void)
903 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
905 next
= NEXT_INSN (insn
);
906 if (INSN_P (insn
) && noop_move_p (insn
))
909 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
911 delete_insn_and_edges (insn
);
918 /* Fill in log links field for all insns. */
921 create_log_links (void)
925 df_ref
*def_vec
, *use_vec
;
927 next_use
= XCNEWVEC (rtx
, max_reg_num ());
929 /* Pass through each block from the end, recording the uses of each
930 register and establishing log links when def is encountered.
931 Note that we do not clear next_use array in order to save time,
932 so we have to test whether the use is in the same basic block as def.
934 There are a few cases below when we do not consider the definition or
935 usage -- these are taken from original flow.c did. Don't ask me why it is
936 done this way; I don't know and if it works, I don't want to know. */
940 FOR_BB_INSNS_REVERSE (bb
, insn
)
942 if (!NONDEBUG_INSN_P (insn
))
945 /* Log links are created only once. */
946 gcc_assert (!LOG_LINKS (insn
));
948 for (def_vec
= DF_INSN_DEFS (insn
); *def_vec
; def_vec
++)
950 df_ref def
= *def_vec
;
951 int regno
= DF_REF_REGNO (def
);
954 if (!next_use
[regno
])
957 /* Do not consider if it is pre/post modification in MEM. */
958 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
961 /* Do not make the log link for frame pointer. */
962 if ((regno
== FRAME_POINTER_REGNUM
963 && (! reload_completed
|| frame_pointer_needed
))
964 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
965 || (regno
== HARD_FRAME_POINTER_REGNUM
966 && (! reload_completed
|| frame_pointer_needed
))
968 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
969 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
974 use_insn
= next_use
[regno
];
975 if (BLOCK_FOR_INSN (use_insn
) == bb
)
979 We don't build a LOG_LINK for hard registers contained
980 in ASM_OPERANDs. If these registers get replaced,
981 we might wind up changing the semantics of the insn,
982 even if reload can make what appear to be valid
983 assignments later. */
984 if (regno
>= FIRST_PSEUDO_REGISTER
985 || asm_noperands (PATTERN (use_insn
)) < 0)
987 /* Don't add duplicate links between instructions. */
989 for (links
= LOG_LINKS (use_insn
); links
;
990 links
= XEXP (links
, 1))
991 if (insn
== XEXP (links
, 0))
995 LOG_LINKS (use_insn
) =
996 alloc_INSN_LIST (insn
, LOG_LINKS (use_insn
));
999 next_use
[regno
] = NULL_RTX
;
1002 for (use_vec
= DF_INSN_USES (insn
); *use_vec
; use_vec
++)
1004 df_ref use
= *use_vec
;
1005 int regno
= DF_REF_REGNO (use
);
1007 /* Do not consider the usage of the stack pointer
1008 by function call. */
1009 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1012 next_use
[regno
] = insn
;
1020 /* Clear LOG_LINKS fields of insns. */
1023 clear_log_links (void)
1027 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1029 free_INSN_LIST_list (&LOG_LINKS (insn
));
1032 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1033 true if we found a LOG_LINK that proves that A feeds B. This only works
1034 if there are no instructions between A and B which could have a link
1035 depending on A, since in that case we would not record a link for B.
1036 We also check the implicit dependency created by a cc0 setter/user
1040 insn_a_feeds_b (rtx a
, rtx b
)
1043 for (links
= LOG_LINKS (b
); links
; links
= XEXP (links
, 1))
1044 if (XEXP (links
, 0) == a
)
1053 /* Main entry point for combiner. F is the first insn of the function.
1054 NREGS is the first unused pseudo-reg number.
1056 Return nonzero if the combiner has turned an indirect jump
1057 instruction into a direct jump. */
1059 combine_instructions (rtx f
, unsigned int nregs
)
1065 rtx links
, nextlinks
;
1067 basic_block last_bb
;
1069 int new_direct_jump_p
= 0;
1071 for (first
= f
; first
&& !INSN_P (first
); )
1072 first
= NEXT_INSN (first
);
1076 combine_attempts
= 0;
1079 combine_successes
= 0;
1081 rtl_hooks
= combine_rtl_hooks
;
1083 VEC_safe_grow_cleared (reg_stat_type
, heap
, reg_stat
, nregs
);
1085 init_recog_no_volatile ();
1087 /* Allocate array for insn info. */
1088 max_uid_known
= get_max_uid ();
1089 uid_log_links
= XCNEWVEC (rtx
, max_uid_known
+ 1);
1090 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1092 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1094 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1095 problems when, for example, we have j <<= 1 in a loop. */
1097 nonzero_sign_valid
= 0;
1098 label_tick
= label_tick_ebb_start
= 1;
1100 /* Scan all SETs and see if we can deduce anything about what
1101 bits are known to be zero for some registers and how many copies
1102 of the sign bit are known to exist for those registers.
1104 Also set any known values so that we can use it while searching
1105 for what bits are known to be set. */
1107 setup_incoming_promotions (first
);
1108 /* Allow the entry block and the first block to fall into the same EBB.
1109 Conceptually the incoming promotions are assigned to the entry block. */
1110 last_bb
= ENTRY_BLOCK_PTR
;
1112 create_log_links ();
1113 FOR_EACH_BB (this_basic_block
)
1115 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1120 if (!single_pred_p (this_basic_block
)
1121 || single_pred (this_basic_block
) != last_bb
)
1122 label_tick_ebb_start
= label_tick
;
1123 last_bb
= this_basic_block
;
1125 FOR_BB_INSNS (this_basic_block
, insn
)
1126 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1128 subst_low_luid
= DF_INSN_LUID (insn
);
1131 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1133 record_dead_and_set_regs (insn
);
1136 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1137 if (REG_NOTE_KIND (links
) == REG_INC
)
1138 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1142 /* Record the current insn_rtx_cost of this instruction. */
1143 if (NONJUMP_INSN_P (insn
))
1144 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1145 optimize_this_for_speed_p
);
1147 fprintf(dump_file
, "insn_cost %d: %d\n",
1148 INSN_UID (insn
), INSN_COST (insn
));
1152 nonzero_sign_valid
= 1;
1154 /* Now scan all the insns in forward order. */
1155 label_tick
= label_tick_ebb_start
= 1;
1157 setup_incoming_promotions (first
);
1158 last_bb
= ENTRY_BLOCK_PTR
;
1160 FOR_EACH_BB (this_basic_block
)
1162 rtx last_combined_insn
= NULL_RTX
;
1163 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1168 if (!single_pred_p (this_basic_block
)
1169 || single_pred (this_basic_block
) != last_bb
)
1170 label_tick_ebb_start
= label_tick
;
1171 last_bb
= this_basic_block
;
1173 rtl_profile_for_bb (this_basic_block
);
1174 for (insn
= BB_HEAD (this_basic_block
);
1175 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1176 insn
= next
? next
: NEXT_INSN (insn
))
1179 if (NONDEBUG_INSN_P (insn
))
1181 while (last_combined_insn
1182 && INSN_DELETED_P (last_combined_insn
))
1183 last_combined_insn
= PREV_INSN (last_combined_insn
);
1184 if (last_combined_insn
== NULL_RTX
1185 || BARRIER_P (last_combined_insn
)
1186 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1187 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1188 last_combined_insn
= insn
;
1190 /* See if we know about function return values before this
1191 insn based upon SUBREG flags. */
1192 check_promoted_subreg (insn
, PATTERN (insn
));
1194 /* See if we can find hardregs and subreg of pseudos in
1195 narrower modes. This could help turning TRUNCATEs
1197 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1199 /* Try this insn with each insn it links back to. */
1201 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1202 if ((next
= try_combine (insn
, XEXP (links
, 0), NULL_RTX
,
1203 NULL_RTX
, &new_direct_jump_p
,
1204 last_combined_insn
)) != 0)
1207 /* Try each sequence of three linked insns ending with this one. */
1209 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1211 rtx link
= XEXP (links
, 0);
1213 /* If the linked insn has been replaced by a note, then there
1214 is no point in pursuing this chain any further. */
1218 for (nextlinks
= LOG_LINKS (link
);
1220 nextlinks
= XEXP (nextlinks
, 1))
1221 if ((next
= try_combine (insn
, link
, XEXP (nextlinks
, 0),
1222 NULL_RTX
, &new_direct_jump_p
,
1223 last_combined_insn
)) != 0)
1228 /* Try to combine a jump insn that uses CC0
1229 with a preceding insn that sets CC0, and maybe with its
1230 logical predecessor as well.
1231 This is how we make decrement-and-branch insns.
1232 We need this special code because data flow connections
1233 via CC0 do not get entered in LOG_LINKS. */
1236 && (prev
= prev_nonnote_insn (insn
)) != 0
1237 && NONJUMP_INSN_P (prev
)
1238 && sets_cc0_p (PATTERN (prev
)))
1240 if ((next
= try_combine (insn
, prev
, NULL_RTX
, NULL_RTX
,
1242 last_combined_insn
)) != 0)
1245 for (nextlinks
= LOG_LINKS (prev
); nextlinks
;
1246 nextlinks
= XEXP (nextlinks
, 1))
1247 if ((next
= try_combine (insn
, prev
, XEXP (nextlinks
, 0),
1248 NULL_RTX
, &new_direct_jump_p
,
1249 last_combined_insn
)) != 0)
1253 /* Do the same for an insn that explicitly references CC0. */
1254 if (NONJUMP_INSN_P (insn
)
1255 && (prev
= prev_nonnote_insn (insn
)) != 0
1256 && NONJUMP_INSN_P (prev
)
1257 && sets_cc0_p (PATTERN (prev
))
1258 && GET_CODE (PATTERN (insn
)) == SET
1259 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1261 if ((next
= try_combine (insn
, prev
, NULL_RTX
, NULL_RTX
,
1263 last_combined_insn
)) != 0)
1266 for (nextlinks
= LOG_LINKS (prev
); nextlinks
;
1267 nextlinks
= XEXP (nextlinks
, 1))
1268 if ((next
= try_combine (insn
, prev
, XEXP (nextlinks
, 0),
1269 NULL_RTX
, &new_direct_jump_p
,
1270 last_combined_insn
)) != 0)
1274 /* Finally, see if any of the insns that this insn links to
1275 explicitly references CC0. If so, try this insn, that insn,
1276 and its predecessor if it sets CC0. */
1277 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1278 if (NONJUMP_INSN_P (XEXP (links
, 0))
1279 && GET_CODE (PATTERN (XEXP (links
, 0))) == SET
1280 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (XEXP (links
, 0))))
1281 && (prev
= prev_nonnote_insn (XEXP (links
, 0))) != 0
1282 && NONJUMP_INSN_P (prev
)
1283 && sets_cc0_p (PATTERN (prev
))
1284 && (next
= try_combine (insn
, XEXP (links
, 0),
1285 prev
, NULL_RTX
, &new_direct_jump_p
,
1286 last_combined_insn
)) != 0)
1290 /* Try combining an insn with two different insns whose results it
1292 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1293 for (nextlinks
= XEXP (links
, 1); nextlinks
;
1294 nextlinks
= XEXP (nextlinks
, 1))
1295 if ((next
= try_combine (insn
, XEXP (links
, 0),
1296 XEXP (nextlinks
, 0), NULL_RTX
,
1298 last_combined_insn
)) != 0)
1301 /* Try four-instruction combinations. */
1302 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1305 rtx link
= XEXP (links
, 0);
1307 /* If the linked insn has been replaced by a note, then there
1308 is no point in pursuing this chain any further. */
1312 for (next1
= LOG_LINKS (link
); next1
; next1
= XEXP (next1
, 1))
1314 rtx link1
= XEXP (next1
, 0);
1317 /* I0 -> I1 -> I2 -> I3. */
1318 for (nextlinks
= LOG_LINKS (link1
); nextlinks
;
1319 nextlinks
= XEXP (nextlinks
, 1))
1320 if ((next
= try_combine (insn
, link
, link1
,
1321 XEXP (nextlinks
, 0),
1323 last_combined_insn
)) != 0)
1325 /* I0, I1 -> I2, I2 -> I3. */
1326 for (nextlinks
= XEXP (next1
, 1); nextlinks
;
1327 nextlinks
= XEXP (nextlinks
, 1))
1328 if ((next
= try_combine (insn
, link
, link1
,
1329 XEXP (nextlinks
, 0),
1331 last_combined_insn
)) != 0)
1335 for (next1
= XEXP (links
, 1); next1
; next1
= XEXP (next1
, 1))
1337 rtx link1
= XEXP (next1
, 0);
1340 /* I0 -> I2; I1, I2 -> I3. */
1341 for (nextlinks
= LOG_LINKS (link
); nextlinks
;
1342 nextlinks
= XEXP (nextlinks
, 1))
1343 if ((next
= try_combine (insn
, link
, link1
,
1344 XEXP (nextlinks
, 0),
1346 last_combined_insn
)) != 0)
1348 /* I0 -> I1; I1, I2 -> I3. */
1349 for (nextlinks
= LOG_LINKS (link1
); nextlinks
;
1350 nextlinks
= XEXP (nextlinks
, 1))
1351 if ((next
= try_combine (insn
, link
, link1
,
1352 XEXP (nextlinks
, 0),
1354 last_combined_insn
)) != 0)
1359 /* Try this insn with each REG_EQUAL note it links back to. */
1360 for (links
= LOG_LINKS (insn
); links
; links
= XEXP (links
, 1))
1363 rtx temp
= XEXP (links
, 0);
1364 if ((set
= single_set (temp
)) != 0
1365 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1366 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1367 /* Avoid using a register that may already been marked
1368 dead by an earlier instruction. */
1369 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1370 && (GET_MODE (note
) == VOIDmode
1371 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1372 : GET_MODE (SET_DEST (set
)) == GET_MODE (note
)))
1374 /* Temporarily replace the set's source with the
1375 contents of the REG_EQUAL note. The insn will
1376 be deleted or recognized by try_combine. */
1377 rtx orig
= SET_SRC (set
);
1378 SET_SRC (set
) = note
;
1380 i2mod_old_rhs
= copy_rtx (orig
);
1381 i2mod_new_rhs
= copy_rtx (note
);
1382 next
= try_combine (insn
, i2mod
, NULL_RTX
, NULL_RTX
,
1384 last_combined_insn
);
1388 SET_SRC (set
) = orig
;
1393 record_dead_and_set_regs (insn
);
1401 default_rtl_profile ();
1404 new_direct_jump_p
|= purge_all_dead_edges ();
1405 delete_noop_moves ();
1408 free (uid_log_links
);
1409 free (uid_insn_cost
);
1410 VEC_free (reg_stat_type
, heap
, reg_stat
);
1413 struct undo
*undo
, *next
;
1414 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1422 total_attempts
+= combine_attempts
;
1423 total_merges
+= combine_merges
;
1424 total_extras
+= combine_extras
;
1425 total_successes
+= combine_successes
;
1427 nonzero_sign_valid
= 0;
1428 rtl_hooks
= general_rtl_hooks
;
1430 /* Make recognizer allow volatile MEMs again. */
1433 return new_direct_jump_p
;
1436 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1439 init_reg_last (void)
1444 FOR_EACH_VEC_ELT (reg_stat_type
, reg_stat
, i
, p
)
1445 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1448 /* Set up any promoted values for incoming argument registers. */
1451 setup_incoming_promotions (rtx first
)
1454 bool strictly_local
= false;
1456 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1457 arg
= DECL_CHAIN (arg
))
1459 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1461 enum machine_mode mode1
, mode2
, mode3
, mode4
;
1463 /* Only continue if the incoming argument is in a register. */
1467 /* Determine, if possible, whether all call sites of the current
1468 function lie within the current compilation unit. (This does
1469 take into account the exporting of a function via taking its
1470 address, and so forth.) */
1471 strictly_local
= cgraph_local_info (current_function_decl
)->local
;
1473 /* The mode and signedness of the argument before any promotions happen
1474 (equal to the mode of the pseudo holding it at that stage). */
1475 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1476 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1478 /* The mode and signedness of the argument after any source language and
1479 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1480 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1481 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1483 /* The mode and signedness of the argument as it is actually passed,
1484 after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions. */
1485 mode3
= promote_function_mode (DECL_ARG_TYPE (arg
), mode2
, &uns3
,
1486 TREE_TYPE (cfun
->decl
), 0);
1488 /* The mode of the register in which the argument is being passed. */
1489 mode4
= GET_MODE (reg
);
1491 /* Eliminate sign extensions in the callee when:
1492 (a) A mode promotion has occurred; */
1495 /* (b) The mode of the register is the same as the mode of
1496 the argument as it is passed; */
1499 /* (c) There's no language level extension; */
1502 /* (c.1) All callers are from the current compilation unit. If that's
1503 the case we don't have to rely on an ABI, we only have to know
1504 what we're generating right now, and we know that we will do the
1505 mode1 to mode2 promotion with the given sign. */
1506 else if (!strictly_local
)
1508 /* (c.2) The combination of the two promotions is useful. This is
1509 true when the signs match, or if the first promotion is unsigned.
1510 In the later case, (sign_extend (zero_extend x)) is the same as
1511 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1517 /* Record that the value was promoted from mode1 to mode3,
1518 so that any sign extension at the head of the current
1519 function may be eliminated. */
1520 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1521 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1522 record_value_for_reg (reg
, first
, x
);
1526 /* Called via note_stores. If X is a pseudo that is narrower than
1527 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1529 If we are setting only a portion of X and we can't figure out what
1530 portion, assume all bits will be used since we don't know what will
1533 Similarly, set how many bits of X are known to be copies of the sign bit
1534 at all locations in the function. This is the smallest number implied
1538 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1540 rtx insn
= (rtx
) data
;
1544 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1545 /* If this register is undefined at the start of the file, we can't
1546 say what its contents were. */
1547 && ! REGNO_REG_SET_P
1548 (DF_LR_IN (ENTRY_BLOCK_PTR
->next_bb
), REGNO (x
))
1549 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
)
1551 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, REGNO (x
));
1553 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1555 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1556 rsp
->sign_bit_copies
= 1;
1560 /* If this register is being initialized using itself, and the
1561 register is uninitialized in this basic block, and there are
1562 no LOG_LINKS which set the register, then part of the
1563 register is uninitialized. In that case we can't assume
1564 anything about the number of nonzero bits.
1566 ??? We could do better if we checked this in
1567 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1568 could avoid making assumptions about the insn which initially
1569 sets the register, while still using the information in other
1570 insns. We would have to be careful to check every insn
1571 involved in the combination. */
1574 && reg_referenced_p (x
, PATTERN (insn
))
1575 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1580 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1582 if (dead_or_set_p (XEXP (link
, 0), x
))
1587 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1588 rsp
->sign_bit_copies
= 1;
1593 /* If this is a complex assignment, see if we can convert it into a
1594 simple assignment. */
1595 set
= expand_field_assignment (set
);
1597 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1598 set what we know about X. */
1600 if (SET_DEST (set
) == x
1601 || (GET_CODE (SET_DEST (set
)) == SUBREG
1602 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set
)))
1603 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set
)))))
1604 && SUBREG_REG (SET_DEST (set
)) == x
))
1606 rtx src
= SET_SRC (set
);
1608 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1609 /* If X is narrower than a word and SRC is a non-negative
1610 constant that would appear negative in the mode of X,
1611 sign-extend it for use in reg_stat[].nonzero_bits because some
1612 machines (maybe most) will actually do the sign-extension
1613 and this is the conservative approach.
1615 ??? For 2.5, try to tighten up the MD files in this regard
1616 instead of this kludge. */
1618 if (GET_MODE_BITSIZE (GET_MODE (x
)) < BITS_PER_WORD
1619 && CONST_INT_P (src
)
1621 && 0 != (UINTVAL (src
)
1622 & ((unsigned HOST_WIDE_INT
) 1
1623 << (GET_MODE_BITSIZE (GET_MODE (x
)) - 1))))
1624 src
= GEN_INT (UINTVAL (src
)
1625 | ((unsigned HOST_WIDE_INT
) (-1)
1626 << GET_MODE_BITSIZE (GET_MODE (x
))));
1629 /* Don't call nonzero_bits if it cannot change anything. */
1630 if (rsp
->nonzero_bits
!= ~(unsigned HOST_WIDE_INT
) 0)
1631 rsp
->nonzero_bits
|= nonzero_bits (src
, nonzero_bits_mode
);
1632 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1633 if (rsp
->sign_bit_copies
== 0
1634 || rsp
->sign_bit_copies
> num
)
1635 rsp
->sign_bit_copies
= num
;
1639 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1640 rsp
->sign_bit_copies
= 1;
1645 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1646 optionally insns that were previously combined into I3 or that will be
1647 combined into the merger of INSN and I3. The order is PRED, PRED2,
1648 INSN, SUCC, SUCC2, I3.
1650 Return 0 if the combination is not allowed for any reason.
1652 If the combination is allowed, *PDEST will be set to the single
1653 destination of INSN and *PSRC to the single source, and this function
1657 can_combine_p (rtx insn
, rtx i3
, rtx pred ATTRIBUTE_UNUSED
,
1658 rtx pred2 ATTRIBUTE_UNUSED
, rtx succ
, rtx succ2
,
1659 rtx
*pdest
, rtx
*psrc
)
1668 bool all_adjacent
= true;
1669 int (*is_volatile_p
) (const_rtx
);
1675 if (next_active_insn (succ2
) != i3
)
1676 all_adjacent
= false;
1677 if (next_active_insn (succ
) != succ2
)
1678 all_adjacent
= false;
1680 else if (next_active_insn (succ
) != i3
)
1681 all_adjacent
= false;
1682 if (next_active_insn (insn
) != succ
)
1683 all_adjacent
= false;
1685 else if (next_active_insn (insn
) != i3
)
1686 all_adjacent
= false;
1688 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1689 or a PARALLEL consisting of such a SET and CLOBBERs.
1691 If INSN has CLOBBER parallel parts, ignore them for our processing.
1692 By definition, these happen during the execution of the insn. When it
1693 is merged with another insn, all bets are off. If they are, in fact,
1694 needed and aren't also supplied in I3, they may be added by
1695 recog_for_combine. Otherwise, it won't match.
1697 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1700 Get the source and destination of INSN. If more than one, can't
1703 if (GET_CODE (PATTERN (insn
)) == SET
)
1704 set
= PATTERN (insn
);
1705 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1706 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1708 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1710 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1712 switch (GET_CODE (elt
))
1714 /* This is important to combine floating point insns
1715 for the SH4 port. */
1717 /* Combining an isolated USE doesn't make sense.
1718 We depend here on combinable_i3pat to reject them. */
1719 /* The code below this loop only verifies that the inputs of
1720 the SET in INSN do not change. We call reg_set_between_p
1721 to verify that the REG in the USE does not change between
1723 If the USE in INSN was for a pseudo register, the matching
1724 insn pattern will likely match any register; combining this
1725 with any other USE would only be safe if we knew that the
1726 used registers have identical values, or if there was
1727 something to tell them apart, e.g. different modes. For
1728 now, we forgo such complicated tests and simply disallow
1729 combining of USES of pseudo registers with any other USE. */
1730 if (REG_P (XEXP (elt
, 0))
1731 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1733 rtx i3pat
= PATTERN (i3
);
1734 int i
= XVECLEN (i3pat
, 0) - 1;
1735 unsigned int regno
= REGNO (XEXP (elt
, 0));
1739 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1741 if (GET_CODE (i3elt
) == USE
1742 && REG_P (XEXP (i3elt
, 0))
1743 && (REGNO (XEXP (i3elt
, 0)) == regno
1744 ? reg_set_between_p (XEXP (elt
, 0),
1745 PREV_INSN (insn
), i3
)
1746 : regno
>= FIRST_PSEUDO_REGISTER
))
1753 /* We can ignore CLOBBERs. */
1758 /* Ignore SETs whose result isn't used but not those that
1759 have side-effects. */
1760 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1761 && insn_nothrow_p (insn
)
1762 && !side_effects_p (elt
))
1765 /* If we have already found a SET, this is a second one and
1766 so we cannot combine with this insn. */
1774 /* Anything else means we can't combine. */
1780 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1781 so don't do anything with it. */
1782 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1791 set
= expand_field_assignment (set
);
1792 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1794 /* Don't eliminate a store in the stack pointer. */
1795 if (dest
== stack_pointer_rtx
1796 /* Don't combine with an insn that sets a register to itself if it has
1797 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1798 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1799 /* Can't merge an ASM_OPERANDS. */
1800 || GET_CODE (src
) == ASM_OPERANDS
1801 /* Can't merge a function call. */
1802 || GET_CODE (src
) == CALL
1803 /* Don't eliminate a function call argument. */
1805 && (find_reg_fusage (i3
, USE
, dest
)
1807 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1808 && global_regs
[REGNO (dest
)])))
1809 /* Don't substitute into an incremented register. */
1810 || FIND_REG_INC_NOTE (i3
, dest
)
1811 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1812 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1813 /* Don't substitute into a non-local goto, this confuses CFG. */
1814 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1815 /* Make sure that DEST is not used after SUCC but before I3. */
1818 && (reg_used_between_p (dest
, succ2
, i3
)
1819 || reg_used_between_p (dest
, succ
, succ2
)))
1820 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))))
1821 /* Make sure that the value that is to be substituted for the register
1822 does not use any registers whose values alter in between. However,
1823 If the insns are adjacent, a use can't cross a set even though we
1824 think it might (this can happen for a sequence of insns each setting
1825 the same destination; last_set of that register might point to
1826 a NOTE). If INSN has a REG_EQUIV note, the register is always
1827 equivalent to the memory so the substitution is valid even if there
1828 are intervening stores. Also, don't move a volatile asm or
1829 UNSPEC_VOLATILE across any other insns. */
1832 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1833 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1834 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1835 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1836 /* Don't combine across a CALL_INSN, because that would possibly
1837 change whether the life span of some REGs crosses calls or not,
1838 and it is a pain to update that information.
1839 Exception: if source is a constant, moving it later can't hurt.
1840 Accept that as a special case. */
1841 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1844 /* DEST must either be a REG or CC0. */
1847 /* If register alignment is being enforced for multi-word items in all
1848 cases except for parameters, it is possible to have a register copy
1849 insn referencing a hard register that is not allowed to contain the
1850 mode being copied and which would not be valid as an operand of most
1851 insns. Eliminate this problem by not combining with such an insn.
1853 Also, on some machines we don't want to extend the life of a hard
1857 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
1858 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
1859 /* Don't extend the life of a hard register unless it is
1860 user variable (if we have few registers) or it can't
1861 fit into the desired register (meaning something special
1863 Also avoid substituting a return register into I3, because
1864 reload can't handle a conflict with constraints of other
1866 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
1867 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
1870 else if (GET_CODE (dest
) != CC0
)
1874 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
1875 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
1876 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
1878 /* Don't substitute for a register intended as a clobberable
1880 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
1881 if (rtx_equal_p (reg
, dest
))
1884 /* If the clobber represents an earlyclobber operand, we must not
1885 substitute an expression containing the clobbered register.
1886 As we do not analyze the constraint strings here, we have to
1887 make the conservative assumption. However, if the register is
1888 a fixed hard reg, the clobber cannot represent any operand;
1889 we leave it up to the machine description to either accept or
1890 reject use-and-clobber patterns. */
1892 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1893 || !fixed_regs
[REGNO (reg
)])
1894 if (reg_overlap_mentioned_p (reg
, src
))
1898 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1899 or not), reject, unless nothing volatile comes between it and I3 */
1901 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
1903 /* Make sure neither succ nor succ2 contains a volatile reference. */
1904 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
1906 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
1908 /* We'll check insns between INSN and I3 below. */
1911 /* If INSN is an asm, and DEST is a hard register, reject, since it has
1912 to be an explicit register variable, and was chosen for a reason. */
1914 if (GET_CODE (src
) == ASM_OPERANDS
1915 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
1918 /* If INSN contains volatile references (specifically volatile MEMs),
1919 we cannot combine across any other volatile references.
1920 Even if INSN doesn't contain volatile references, any intervening
1921 volatile insn might affect machine state. */
1923 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
1927 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
1928 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
1931 /* If INSN contains an autoincrement or autodecrement, make sure that
1932 register is not used between there and I3, and not already used in
1933 I3 either. Neither must it be used in PRED or SUCC, if they exist.
1934 Also insist that I3 not be a jump; if it were one
1935 and the incremented register were spilled, we would lose. */
1938 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1939 if (REG_NOTE_KIND (link
) == REG_INC
1941 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
1942 || (pred
!= NULL_RTX
1943 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
1944 || (pred2
!= NULL_RTX
1945 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
1946 || (succ
!= NULL_RTX
1947 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
1948 || (succ2
!= NULL_RTX
1949 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
1950 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
1955 /* Don't combine an insn that follows a CC0-setting insn.
1956 An insn that uses CC0 must not be separated from the one that sets it.
1957 We do, however, allow I2 to follow a CC0-setting insn if that insn
1958 is passed as I1; in that case it will be deleted also.
1959 We also allow combining in this case if all the insns are adjacent
1960 because that would leave the two CC0 insns adjacent as well.
1961 It would be more logical to test whether CC0 occurs inside I1 or I2,
1962 but that would be much slower, and this ought to be equivalent. */
1964 p
= prev_nonnote_insn (insn
);
1965 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
1970 /* If we get here, we have passed all the tests and the combination is
1979 /* LOC is the location within I3 that contains its pattern or the component
1980 of a PARALLEL of the pattern. We validate that it is valid for combining.
1982 One problem is if I3 modifies its output, as opposed to replacing it
1983 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
1984 doing so would produce an insn that is not equivalent to the original insns.
1988 (set (reg:DI 101) (reg:DI 100))
1989 (set (subreg:SI (reg:DI 101) 0) <foo>)
1991 This is NOT equivalent to:
1993 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
1994 (set (reg:DI 101) (reg:DI 100))])
1996 Not only does this modify 100 (in which case it might still be valid
1997 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
1999 We can also run into a problem if I2 sets a register that I1
2000 uses and I1 gets directly substituted into I3 (not via I2). In that
2001 case, we would be getting the wrong value of I2DEST into I3, so we
2002 must reject the combination. This case occurs when I2 and I1 both
2003 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2004 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2005 of a SET must prevent combination from occurring. The same situation
2006 can occur for I0, in which case I0_NOT_IN_SRC is set.
2008 Before doing the above check, we first try to expand a field assignment
2009 into a set of logical operations.
2011 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2012 we place a register that is both set and used within I3. If more than one
2013 such register is detected, we fail.
2015 Return 1 if the combination is valid, zero otherwise. */
2018 combinable_i3pat (rtx i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2019 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2023 if (GET_CODE (x
) == SET
)
2026 rtx dest
= SET_DEST (set
);
2027 rtx src
= SET_SRC (set
);
2028 rtx inner_dest
= dest
;
2031 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2032 || GET_CODE (inner_dest
) == SUBREG
2033 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2034 inner_dest
= XEXP (inner_dest
, 0);
2036 /* Check for the case where I3 modifies its output, as discussed
2037 above. We don't want to prevent pseudos from being combined
2038 into the address of a MEM, so only prevent the combination if
2039 i1 or i2 set the same MEM. */
2040 if ((inner_dest
!= dest
&&
2041 (!MEM_P (inner_dest
)
2042 || rtx_equal_p (i2dest
, inner_dest
)
2043 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2044 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2045 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2046 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2047 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2049 /* This is the same test done in can_combine_p except we can't test
2050 all_adjacent; we don't have to, since this instruction will stay
2051 in place, thus we are not considering increasing the lifetime of
2054 Also, if this insn sets a function argument, combining it with
2055 something that might need a spill could clobber a previous
2056 function argument; the all_adjacent test in can_combine_p also
2057 checks this; here, we do a more specific test for this case. */
2059 || (REG_P (inner_dest
)
2060 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2061 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2062 GET_MODE (inner_dest
))))
2063 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2064 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2067 /* If DEST is used in I3, it is being killed in this insn, so
2068 record that for later. We have to consider paradoxical
2069 subregs here, since they kill the whole register, but we
2070 ignore partial subregs, STRICT_LOW_PART, etc.
2071 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2072 STACK_POINTER_REGNUM, since these are always considered to be
2073 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2075 if (GET_CODE (subdest
) == SUBREG
2076 && (GET_MODE_SIZE (GET_MODE (subdest
))
2077 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2078 subdest
= SUBREG_REG (subdest
);
2081 && reg_referenced_p (subdest
, PATTERN (i3
))
2082 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2083 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2084 && REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
2086 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2087 && (REGNO (subdest
) != ARG_POINTER_REGNUM
2088 || ! fixed_regs
[REGNO (subdest
)])
2090 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2092 if (*pi3dest_killed
)
2095 *pi3dest_killed
= subdest
;
2099 else if (GET_CODE (x
) == PARALLEL
)
2103 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2104 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2105 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2112 /* Return 1 if X is an arithmetic expression that contains a multiplication
2113 and division. We don't count multiplications by powers of two here. */
2116 contains_muldiv (rtx x
)
2118 switch (GET_CODE (x
))
2120 case MOD
: case DIV
: case UMOD
: case UDIV
:
2124 return ! (CONST_INT_P (XEXP (x
, 1))
2125 && exact_log2 (UINTVAL (XEXP (x
, 1))) >= 0);
2128 return contains_muldiv (XEXP (x
, 0))
2129 || contains_muldiv (XEXP (x
, 1));
2132 return contains_muldiv (XEXP (x
, 0));
2138 /* Determine whether INSN can be used in a combination. Return nonzero if
2139 not. This is used in try_combine to detect early some cases where we
2140 can't perform combinations. */
2143 cant_combine_insn_p (rtx insn
)
2148 /* If this isn't really an insn, we can't do anything.
2149 This can occur when flow deletes an insn that it has merged into an
2150 auto-increment address. */
2151 if (! INSN_P (insn
))
2154 /* Never combine loads and stores involving hard regs that are likely
2155 to be spilled. The register allocator can usually handle such
2156 reg-reg moves by tying. If we allow the combiner to make
2157 substitutions of likely-spilled regs, reload might die.
2158 As an exception, we allow combinations involving fixed regs; these are
2159 not available to the register allocator so there's no risk involved. */
2161 set
= single_set (insn
);
2164 src
= SET_SRC (set
);
2165 dest
= SET_DEST (set
);
2166 if (GET_CODE (src
) == SUBREG
)
2167 src
= SUBREG_REG (src
);
2168 if (GET_CODE (dest
) == SUBREG
)
2169 dest
= SUBREG_REG (dest
);
2170 if (REG_P (src
) && REG_P (dest
)
2171 && ((HARD_REGISTER_P (src
)
2172 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2173 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2174 || (HARD_REGISTER_P (dest
)
2175 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2176 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2182 struct likely_spilled_retval_info
2184 unsigned regno
, nregs
;
2188 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2189 hard registers that are known to be written to / clobbered in full. */
2191 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2193 struct likely_spilled_retval_info
*const info
=
2194 (struct likely_spilled_retval_info
*) data
;
2195 unsigned regno
, nregs
;
2198 if (!REG_P (XEXP (set
, 0)))
2201 if (regno
>= info
->regno
+ info
->nregs
)
2203 nregs
= hard_regno_nregs
[regno
][GET_MODE (x
)];
2204 if (regno
+ nregs
<= info
->regno
)
2206 new_mask
= (2U << (nregs
- 1)) - 1;
2207 if (regno
< info
->regno
)
2208 new_mask
>>= info
->regno
- regno
;
2210 new_mask
<<= regno
- info
->regno
;
2211 info
->mask
&= ~new_mask
;
2214 /* Return nonzero iff part of the return value is live during INSN, and
2215 it is likely spilled. This can happen when more than one insn is needed
2216 to copy the return value, e.g. when we consider to combine into the
2217 second copy insn for a complex value. */
2220 likely_spilled_retval_p (rtx insn
)
2222 rtx use
= BB_END (this_basic_block
);
2224 unsigned regno
, nregs
;
2225 /* We assume here that no machine mode needs more than
2226 32 hard registers when the value overlaps with a register
2227 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2229 struct likely_spilled_retval_info info
;
2231 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2233 reg
= XEXP (PATTERN (use
), 0);
2234 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2236 regno
= REGNO (reg
);
2237 nregs
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
2240 mask
= (2U << (nregs
- 1)) - 1;
2242 /* Disregard parts of the return value that are set later. */
2246 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2248 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2251 /* Check if any of the (probably) live return value registers is
2256 if ((mask
& 1 << nregs
)
2257 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2263 /* Adjust INSN after we made a change to its destination.
2265 Changing the destination can invalidate notes that say something about
2266 the results of the insn and a LOG_LINK pointing to the insn. */
2269 adjust_for_new_dest (rtx insn
)
2271 /* For notes, be conservative and simply remove them. */
2272 remove_reg_equal_equiv_notes (insn
);
2274 /* The new insn will have a destination that was previously the destination
2275 of an insn just above it. Call distribute_links to make a LOG_LINK from
2276 the next use of that destination. */
2277 distribute_links (gen_rtx_INSN_LIST (VOIDmode
, insn
, NULL_RTX
));
2279 df_insn_rescan (insn
);
2282 /* Return TRUE if combine can reuse reg X in mode MODE.
2283 ADDED_SETS is nonzero if the original set is still required. */
2285 can_change_dest_mode (rtx x
, int added_sets
, enum machine_mode mode
)
2293 /* Allow hard registers if the new mode is legal, and occupies no more
2294 registers than the old mode. */
2295 if (regno
< FIRST_PSEUDO_REGISTER
)
2296 return (HARD_REGNO_MODE_OK (regno
, mode
)
2297 && (hard_regno_nregs
[regno
][GET_MODE (x
)]
2298 >= hard_regno_nregs
[regno
][mode
]));
2300 /* Or a pseudo that is only used once. */
2301 return (REG_N_SETS (regno
) == 1 && !added_sets
2302 && !REG_USERVAR_P (x
));
2306 /* Check whether X, the destination of a set, refers to part of
2307 the register specified by REG. */
2310 reg_subword_p (rtx x
, rtx reg
)
2312 /* Check that reg is an integer mode register. */
2313 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2316 if (GET_CODE (x
) == STRICT_LOW_PART
2317 || GET_CODE (x
) == ZERO_EXTRACT
)
2320 return GET_CODE (x
) == SUBREG
2321 && SUBREG_REG (x
) == reg
2322 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2326 /* Replace auto-increment addressing modes with explicit operations to access
2327 the same addresses without modifying the corresponding registers. */
2330 cleanup_auto_inc_dec (rtx src
, enum machine_mode mem_mode
)
2333 const RTX_CODE code
= GET_CODE (x
);
2349 /* SCRATCH must be shared because they represent distinct values. */
2352 if (REG_P (XEXP (x
, 0)) && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
)
2357 if (shared_const_p (x
))
2362 mem_mode
= GET_MODE (x
);
2367 gcc_assert (mem_mode
!= VOIDmode
&& mem_mode
!= BLKmode
);
2368 return gen_rtx_PLUS (GET_MODE (x
),
2369 cleanup_auto_inc_dec (XEXP (x
, 0), mem_mode
),
2370 GEN_INT (code
== PRE_INC
2371 ? GET_MODE_SIZE (mem_mode
)
2372 : -GET_MODE_SIZE (mem_mode
)));
2378 return cleanup_auto_inc_dec (code
== PRE_MODIFY
2379 ? XEXP (x
, 1) : XEXP (x
, 0),
2386 /* Copy the various flags, fields, and other information. We assume
2387 that all fields need copying, and then clear the fields that should
2388 not be copied. That is the sensible default behavior, and forces
2389 us to explicitly document why we are *not* copying a flag. */
2390 x
= shallow_copy_rtx (x
);
2392 /* We do not copy the USED flag, which is used as a mark bit during
2393 walks over the RTL. */
2394 RTX_FLAG (x
, used
) = 0;
2396 /* We do not copy FRAME_RELATED for INSNs. */
2398 RTX_FLAG (x
, frame_related
) = 0;
2400 fmt
= GET_RTX_FORMAT (code
);
2401 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2403 XEXP (x
, i
) = cleanup_auto_inc_dec (XEXP (x
, i
), mem_mode
);
2404 else if (fmt
[i
] == 'E' || fmt
[i
] == 'V')
2407 XVEC (x
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2408 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2410 = cleanup_auto_inc_dec (XVECEXP (src
, i
, j
), mem_mode
);
2417 /* Auxiliary data structure for propagate_for_debug_stmt. */
2419 struct rtx_subst_pair
2425 /* DATA points to an rtx_subst_pair. Return the value that should be
2429 propagate_for_debug_subst (rtx from
, const_rtx old_rtx
, void *data
)
2431 struct rtx_subst_pair
*pair
= (struct rtx_subst_pair
*)data
;
2433 if (!rtx_equal_p (from
, old_rtx
))
2435 if (!pair
->adjusted
)
2437 pair
->adjusted
= true;
2439 pair
->to
= cleanup_auto_inc_dec (pair
->to
, VOIDmode
);
2441 pair
->to
= copy_rtx (pair
->to
);
2443 pair
->to
= make_compound_operation (pair
->to
, SET
);
2446 return copy_rtx (pair
->to
);
2449 /* Replace all the occurrences of DEST with SRC in DEBUG_INSNs between INSN
2450 and LAST, not including INSN, but including LAST. Also stop at the end
2451 of THIS_BASIC_BLOCK. */
2454 propagate_for_debug (rtx insn
, rtx last
, rtx dest
, rtx src
)
2456 rtx next
, loc
, end
= NEXT_INSN (BB_END (this_basic_block
));
2458 struct rtx_subst_pair p
;
2462 next
= NEXT_INSN (insn
);
2463 last
= NEXT_INSN (last
);
2464 while (next
!= last
&& next
!= end
)
2467 next
= NEXT_INSN (insn
);
2468 if (DEBUG_INSN_P (insn
))
2470 loc
= simplify_replace_fn_rtx (INSN_VAR_LOCATION_LOC (insn
),
2471 dest
, propagate_for_debug_subst
, &p
);
2472 if (loc
== INSN_VAR_LOCATION_LOC (insn
))
2474 INSN_VAR_LOCATION_LOC (insn
) = loc
;
2475 df_insn_rescan (insn
);
2480 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2481 Note that the INSN should be deleted *after* removing dead edges, so
2482 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2483 but not for a (set (pc) (label_ref FOO)). */
2486 update_cfg_for_uncondjump (rtx insn
)
2488 basic_block bb
= BLOCK_FOR_INSN (insn
);
2489 bool at_end
= (BB_END (bb
) == insn
);
2492 purge_dead_edges (bb
);
2495 if (at_end
&& EDGE_COUNT (bb
->succs
) == 1)
2499 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2501 /* Remove barriers from the footer if there are any. */
2502 for (insn
= bb
->il
.rtl
->footer
; insn
; insn
= NEXT_INSN (insn
))
2503 if (BARRIER_P (insn
))
2505 if (PREV_INSN (insn
))
2506 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2508 bb
->il
.rtl
->footer
= NEXT_INSN (insn
);
2509 if (NEXT_INSN (insn
))
2510 PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2512 else if (LABEL_P (insn
))
2517 /* Try to combine the insns I0, I1 and I2 into I3.
2518 Here I0, I1 and I2 appear earlier than I3.
2519 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2522 If we are combining more than two insns and the resulting insn is not
2523 recognized, try splitting it into two insns. If that happens, I2 and I3
2524 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2525 Otherwise, I0, I1 and I2 are pseudo-deleted.
2527 Return 0 if the combination does not work. Then nothing is changed.
2528 If we did the combination, return the insn at which combine should
2531 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2532 new direct jump instruction.
2534 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2535 been I3 passed to an earlier try_combine within the same basic
2539 try_combine (rtx i3
, rtx i2
, rtx i1
, rtx i0
, int *new_direct_jump_p
,
2540 rtx last_combined_insn
)
2542 /* New patterns for I3 and I2, respectively. */
2543 rtx newpat
, newi2pat
= 0;
2544 rtvec newpat_vec_with_clobbers
= 0;
2545 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2546 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2548 int added_sets_0
, added_sets_1
, added_sets_2
;
2549 /* Total number of SETs to put into I3. */
2551 /* Nonzero if I2's or I1's body now appears in I3. */
2552 int i2_is_used
= 0, i1_is_used
= 0;
2553 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2554 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2555 /* Contains I3 if the destination of I3 is used in its source, which means
2556 that the old life of I3 is being killed. If that usage is placed into
2557 I2 and not in I3, a REG_DEAD note must be made. */
2558 rtx i3dest_killed
= 0;
2559 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2560 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2561 /* Copy of SET_SRC of I1 and I0, if needed. */
2562 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2563 /* Set if I2DEST was reused as a scratch register. */
2564 bool i2scratch
= false;
2565 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2566 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2567 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2568 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2569 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2570 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2571 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2572 /* Notes that must be added to REG_NOTES in I3 and I2. */
2573 rtx new_i3_notes
, new_i2_notes
;
2574 /* Notes that we substituted I3 into I2 instead of the normal case. */
2575 int i3_subst_into_i2
= 0;
2576 /* Notes that I1, I2 or I3 is a MULT operation. */
2579 int changed_i3_dest
= 0;
2585 rtx new_other_notes
;
2588 /* Only try four-insn combinations when there's high likelihood of
2589 success. Look for simple insns, such as loads of constants or
2590 binary operations involving a constant. */
2597 if (!flag_expensive_optimizations
)
2600 for (i
= 0; i
< 4; i
++)
2602 rtx insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2603 rtx set
= single_set (insn
);
2607 src
= SET_SRC (set
);
2608 if (CONSTANT_P (src
))
2613 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2615 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2616 || GET_CODE (src
) == LSHIFTRT
)
2619 if (ngood
< 2 && nshift
< 2)
2623 /* Exit early if one of the insns involved can't be used for
2625 if (cant_combine_insn_p (i3
)
2626 || cant_combine_insn_p (i2
)
2627 || (i1
&& cant_combine_insn_p (i1
))
2628 || (i0
&& cant_combine_insn_p (i0
))
2629 || likely_spilled_retval_p (i3
))
2633 undobuf
.other_insn
= 0;
2635 /* Reset the hard register usage information. */
2636 CLEAR_HARD_REG_SET (newpat_used_regs
);
2638 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2641 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2642 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2644 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2645 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2647 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2648 INSN_UID (i2
), INSN_UID (i3
));
2651 /* If multiple insns feed into one of I2 or I3, they can be in any
2652 order. To simplify the code below, reorder them in sequence. */
2653 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2654 temp
= i2
, i2
= i0
, i0
= temp
;
2655 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2656 temp
= i1
, i1
= i0
, i0
= temp
;
2657 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2658 temp
= i1
, i1
= i2
, i2
= temp
;
2660 added_links_insn
= 0;
2662 /* First check for one important special case that the code below will
2663 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2664 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2665 we may be able to replace that destination with the destination of I3.
2666 This occurs in the common code where we compute both a quotient and
2667 remainder into a structure, in which case we want to do the computation
2668 directly into the structure to avoid register-register copies.
2670 Note that this case handles both multiple sets in I2 and also cases
2671 where I2 has a number of CLOBBERs inside the PARALLEL.
2673 We make very conservative checks below and only try to handle the
2674 most common cases of this. For example, we only handle the case
2675 where I2 and I3 are adjacent to avoid making difficult register
2678 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2679 && REG_P (SET_SRC (PATTERN (i3
)))
2680 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2681 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2682 && GET_CODE (PATTERN (i2
)) == PARALLEL
2683 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2684 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2685 below would need to check what is inside (and reg_overlap_mentioned_p
2686 doesn't support those codes anyway). Don't allow those destinations;
2687 the resulting insn isn't likely to be recognized anyway. */
2688 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2689 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2690 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2691 SET_DEST (PATTERN (i3
)))
2692 && next_active_insn (i2
) == i3
)
2694 rtx p2
= PATTERN (i2
);
2696 /* Make sure that the destination of I3,
2697 which we are going to substitute into one output of I2,
2698 is not used within another output of I2. We must avoid making this:
2699 (parallel [(set (mem (reg 69)) ...)
2700 (set (reg 69) ...)])
2701 which is not well-defined as to order of actions.
2702 (Besides, reload can't handle output reloads for this.)
2704 The problem can also happen if the dest of I3 is a memory ref,
2705 if another dest in I2 is an indirect memory ref. */
2706 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2707 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2708 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2709 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2710 SET_DEST (XVECEXP (p2
, 0, i
))))
2713 if (i
== XVECLEN (p2
, 0))
2714 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2715 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2716 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2721 subst_low_luid
= DF_INSN_LUID (i2
);
2723 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2724 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2725 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2726 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2728 /* Replace the dest in I2 with our dest and make the resulting
2729 insn the new pattern for I3. Then skip to where we validate
2730 the pattern. Everything was set up above. */
2731 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2733 i3_subst_into_i2
= 1;
2734 goto validate_replacement
;
2738 /* If I2 is setting a pseudo to a constant and I3 is setting some
2739 sub-part of it to another constant, merge them by making a new
2742 && (temp
= single_set (i2
)) != 0
2743 && (CONST_INT_P (SET_SRC (temp
))
2744 || GET_CODE (SET_SRC (temp
)) == CONST_DOUBLE
)
2745 && GET_CODE (PATTERN (i3
)) == SET
2746 && (CONST_INT_P (SET_SRC (PATTERN (i3
)))
2747 || GET_CODE (SET_SRC (PATTERN (i3
))) == CONST_DOUBLE
)
2748 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp
)))
2750 rtx dest
= SET_DEST (PATTERN (i3
));
2754 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2756 if (CONST_INT_P (XEXP (dest
, 1))
2757 && CONST_INT_P (XEXP (dest
, 2)))
2759 width
= INTVAL (XEXP (dest
, 1));
2760 offset
= INTVAL (XEXP (dest
, 2));
2761 dest
= XEXP (dest
, 0);
2762 if (BITS_BIG_ENDIAN
)
2763 offset
= GET_MODE_BITSIZE (GET_MODE (dest
)) - width
- offset
;
2768 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2769 dest
= XEXP (dest
, 0);
2770 width
= GET_MODE_BITSIZE (GET_MODE (dest
));
2776 /* If this is the low part, we're done. */
2777 if (subreg_lowpart_p (dest
))
2779 /* Handle the case where inner is twice the size of outer. */
2780 else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp
)))
2781 == 2 * GET_MODE_BITSIZE (GET_MODE (dest
)))
2782 offset
+= GET_MODE_BITSIZE (GET_MODE (dest
));
2783 /* Otherwise give up for now. */
2789 && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp
)))
2790 <= HOST_BITS_PER_DOUBLE_INT
))
2793 rtx inner
= SET_SRC (PATTERN (i3
));
2794 rtx outer
= SET_SRC (temp
);
2796 o
= rtx_to_double_int (outer
);
2797 i
= rtx_to_double_int (inner
);
2799 m
= double_int_mask (width
);
2800 i
= double_int_and (i
, m
);
2801 m
= double_int_lshift (m
, offset
, HOST_BITS_PER_DOUBLE_INT
, false);
2802 i
= double_int_lshift (i
, offset
, HOST_BITS_PER_DOUBLE_INT
, false);
2803 o
= double_int_ior (double_int_and_not (o
, m
), i
);
2807 subst_low_luid
= DF_INSN_LUID (i2
);
2808 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2809 i2dest
= SET_DEST (temp
);
2810 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2812 /* Replace the source in I2 with the new constant and make the
2813 resulting insn the new pattern for I3. Then skip to where we
2814 validate the pattern. Everything was set up above. */
2815 SUBST (SET_SRC (temp
),
2816 immed_double_int_const (o
, GET_MODE (SET_DEST (temp
))));
2818 newpat
= PATTERN (i2
);
2820 /* The dest of I3 has been replaced with the dest of I2. */
2821 changed_i3_dest
= 1;
2822 goto validate_replacement
;
2827 /* If we have no I1 and I2 looks like:
2828 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2830 make up a dummy I1 that is
2833 (set (reg:CC X) (compare:CC Y (const_int 0)))
2835 (We can ignore any trailing CLOBBERs.)
2837 This undoes a previous combination and allows us to match a branch-and-
2840 if (i1
== 0 && GET_CODE (PATTERN (i2
)) == PARALLEL
2841 && XVECLEN (PATTERN (i2
), 0) >= 2
2842 && GET_CODE (XVECEXP (PATTERN (i2
), 0, 0)) == SET
2843 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2845 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2846 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2847 && GET_CODE (XVECEXP (PATTERN (i2
), 0, 1)) == SET
2848 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)))
2849 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2850 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1))))
2852 for (i
= XVECLEN (PATTERN (i2
), 0) - 1; i
>= 2; i
--)
2853 if (GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) != CLOBBER
)
2858 /* We make I1 with the same INSN_UID as I2. This gives it
2859 the same DF_INSN_LUID for value tracking. Our fake I1 will
2860 never appear in the insn stream so giving it the same INSN_UID
2861 as I2 will not cause a problem. */
2863 i1
= gen_rtx_INSN (VOIDmode
, INSN_UID (i2
), NULL_RTX
, i2
,
2864 BLOCK_FOR_INSN (i2
), XVECEXP (PATTERN (i2
), 0, 1),
2865 INSN_LOCATOR (i2
), -1, NULL_RTX
);
2867 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2868 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2869 SET_DEST (PATTERN (i1
)));
2874 /* Verify that I2 and I1 are valid for combining. */
2875 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL_RTX
, NULL_RTX
, &i2dest
, &i2src
)
2876 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL_RTX
, i2
, NULL_RTX
,
2878 || (i0
&& ! can_combine_p (i0
, i3
, NULL_RTX
, NULL_RTX
, i1
, i2
,
2885 /* Record whether I2DEST is used in I2SRC and similarly for the other
2886 cases. Knowing this will help in register status updating below. */
2887 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
2888 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
2889 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
2890 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
2891 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
2892 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
2893 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2894 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
2895 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
2897 /* For the earlier insns, determine which of the subsequent ones they
2899 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
2900 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
2901 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
2902 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
2903 && reg_overlap_mentioned_p (i0dest
, i2src
))));
2905 /* Ensure that I3's pattern can be the destination of combines. */
2906 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
2907 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
2908 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
2909 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
2916 /* See if any of the insns is a MULT operation. Unless one is, we will
2917 reject a combination that is, since it must be slower. Be conservative
2919 if (GET_CODE (i2src
) == MULT
2920 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
2921 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
2922 || (GET_CODE (PATTERN (i3
)) == SET
2923 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
2926 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2927 We used to do this EXCEPT in one case: I3 has a post-inc in an
2928 output operand. However, that exception can give rise to insns like
2930 which is a famous insn on the PDP-11 where the value of r3 used as the
2931 source was model-dependent. Avoid this sort of thing. */
2934 if (!(GET_CODE (PATTERN (i3
)) == SET
2935 && REG_P (SET_SRC (PATTERN (i3
)))
2936 && MEM_P (SET_DEST (PATTERN (i3
)))
2937 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
2938 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
2939 /* It's not the exception. */
2942 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
2943 if (REG_NOTE_KIND (link
) == REG_INC
2944 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
2946 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
2953 /* See if the SETs in I1 or I2 need to be kept around in the merged
2954 instruction: whenever the value set there is still needed past I3.
2955 For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2957 For the SET in I1, we have two cases: If I1 and I2 independently
2958 feed into I3, the set in I1 needs to be kept around if I1DEST dies
2959 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2960 in I1 needs to be kept around unless I1DEST dies or is set in either
2961 I2 or I3. The same consideration applies to I0. */
2963 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
2966 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
2967 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
2972 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
2973 || (i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
2974 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)));
2978 /* We are about to copy insns for the case where they need to be kept
2979 around. Check that they can be copied in the merged instruction. */
2981 if (targetm
.cannot_copy_insn_p
2982 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
2983 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
2984 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
2990 /* If the set in I2 needs to be kept around, we must make a copy of
2991 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2992 PATTERN (I2), we are only substituting for the original I1DEST, not into
2993 an already-substituted copy. This also prevents making self-referential
2994 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2999 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3000 i2pat
= gen_rtx_SET (VOIDmode
, i2dest
, copy_rtx (i2src
));
3002 i2pat
= copy_rtx (PATTERN (i2
));
3007 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3008 i1pat
= gen_rtx_SET (VOIDmode
, i1dest
, copy_rtx (i1src
));
3010 i1pat
= copy_rtx (PATTERN (i1
));
3015 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3016 i0pat
= gen_rtx_SET (VOIDmode
, i0dest
, copy_rtx (i0src
));
3018 i0pat
= copy_rtx (PATTERN (i0
));
3023 /* Substitute in the latest insn for the regs set by the earlier ones. */
3025 maxreg
= max_reg_num ();
3030 /* Many machines that don't use CC0 have insns that can both perform an
3031 arithmetic operation and set the condition code. These operations will
3032 be represented as a PARALLEL with the first element of the vector
3033 being a COMPARE of an arithmetic operation with the constant zero.
3034 The second element of the vector will set some pseudo to the result
3035 of the same arithmetic operation. If we simplify the COMPARE, we won't
3036 match such a pattern and so will generate an extra insn. Here we test
3037 for this case, where both the comparison and the operation result are
3038 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3039 I2SRC. Later we will make the PARALLEL that contains I2. */
3041 if (i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3042 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3043 && XEXP (SET_SRC (PATTERN (i3
)), 1) == const0_rtx
3044 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3046 #ifdef SELECT_CC_MODE
3048 enum machine_mode compare_mode
;
3051 newpat
= PATTERN (i3
);
3052 SUBST (XEXP (SET_SRC (newpat
), 0), i2src
);
3056 #ifdef SELECT_CC_MODE
3057 /* See if a COMPARE with the operand we substituted in should be done
3058 with the mode that is currently being used. If not, do the same
3059 processing we do in `subst' for a SET; namely, if the destination
3060 is used only once, try to replace it with a register of the proper
3061 mode and also replace the COMPARE. */
3062 if (undobuf
.other_insn
== 0
3063 && (cc_use
= find_single_use (SET_DEST (newpat
), i3
,
3064 &undobuf
.other_insn
))
3065 && ((compare_mode
= SELECT_CC_MODE (GET_CODE (*cc_use
),
3067 != GET_MODE (SET_DEST (newpat
))))
3069 if (can_change_dest_mode (SET_DEST (newpat
), added_sets_2
,
3072 unsigned int regno
= REGNO (SET_DEST (newpat
));
3075 if (regno
< FIRST_PSEUDO_REGISTER
)
3076 new_dest
= gen_rtx_REG (compare_mode
, regno
);
3079 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3080 new_dest
= regno_reg_rtx
[regno
];
3083 SUBST (SET_DEST (newpat
), new_dest
);
3084 SUBST (XEXP (*cc_use
, 0), new_dest
);
3085 SUBST (SET_SRC (newpat
),
3086 gen_rtx_COMPARE (compare_mode
, i2src
, const0_rtx
));
3089 undobuf
.other_insn
= 0;
3096 /* It is possible that the source of I2 or I1 may be performing
3097 an unneeded operation, such as a ZERO_EXTEND of something
3098 that is known to have the high part zero. Handle that case
3099 by letting subst look at the innermost one of them.
3101 Another way to do this would be to have a function that tries
3102 to simplify a single insn instead of merging two or more
3103 insns. We don't do this because of the potential of infinite
3104 loops and because of the potential extra memory required.
3105 However, doing it the way we are is a bit of a kludge and
3106 doesn't catch all cases.
3108 But only do this if -fexpensive-optimizations since it slows
3109 things down and doesn't usually win.
3111 This is not done in the COMPARE case above because the
3112 unmodified I2PAT is used in the PARALLEL and so a pattern
3113 with a modified I2SRC would not match. */
3115 if (flag_expensive_optimizations
)
3117 /* Pass pc_rtx so no substitutions are done, just
3121 subst_low_luid
= DF_INSN_LUID (i1
);
3122 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0);
3126 subst_low_luid
= DF_INSN_LUID (i2
);
3127 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0);
3131 n_occurrences
= 0; /* `subst' counts here */
3132 subst_low_luid
= DF_INSN_LUID (i2
);
3134 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3135 copy of I2SRC each time we substitute it, in order to avoid creating
3136 self-referential RTL when we will be substituting I1SRC for I1DEST
3137 later. Likewise if I0 feeds into I2, either directly or indirectly
3138 through I1, and I0DEST is in I0SRC. */
3139 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0,
3140 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3141 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3142 && i0dest_in_i0src
));
3145 /* Record whether I2's body now appears within I3's body. */
3146 i2_is_used
= n_occurrences
;
3149 /* If we already got a failure, don't try to do more. Otherwise, try to
3150 substitute I1 if we have it. */
3152 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3154 /* Check that an autoincrement side-effect on I1 has not been lost.
3155 This happens if I1DEST is mentioned in I2 and dies there, and
3156 has disappeared from the new pattern. */
3157 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3159 && dead_or_set_p (i2
, i1dest
)
3160 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3161 /* Before we can do this substitution, we must redo the test done
3162 above (see detailed comments there) that ensures I1DEST isn't
3163 mentioned in any SETs in NEWPAT that are field assignments. */
3164 || !combinable_i3pat (NULL_RTX
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3172 subst_low_luid
= DF_INSN_LUID (i1
);
3174 /* If the following substitution will modify I1SRC, make a copy of it
3175 for the case where it is substituted for I1DEST in I2PAT later. */
3176 if (added_sets_2
&& i1_feeds_i2_n
)
3177 i1src_copy
= copy_rtx (i1src
);
3179 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3180 copy of I1SRC each time we substitute it, in order to avoid creating
3181 self-referential RTL when we will be substituting I0SRC for I0DEST
3183 newpat
= subst (newpat
, i1dest
, i1src
, 0,
3184 i0_feeds_i1_n
&& i0dest_in_i0src
);
3187 /* Record whether I1's body now appears within I3's body. */
3188 i1_is_used
= n_occurrences
;
3191 /* Likewise for I0 if we have it. */
3193 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3195 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3196 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3197 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3198 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3199 || !combinable_i3pat (NULL_RTX
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3206 /* If the following substitution will modify I0SRC, make a copy of it
3207 for the case where it is substituted for I0DEST in I1PAT later. */
3208 if (added_sets_1
&& i0_feeds_i1_n
)
3209 i0src_copy
= copy_rtx (i0src
);
3210 /* And a copy for I0DEST in I2PAT substitution. */
3211 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3212 || (i0_feeds_i2_n
)))
3213 i0src_copy2
= copy_rtx (i0src
);
3216 subst_low_luid
= DF_INSN_LUID (i0
);
3217 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0);
3221 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3222 to count all the ways that I2SRC and I1SRC can be used. */
3223 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3224 && i2_is_used
+ added_sets_2
> 1)
3225 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3226 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3228 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3229 && (n_occurrences
+ added_sets_0
3230 + (added_sets_1
&& i0_feeds_i1_n
)
3231 + (added_sets_2
&& i0_feeds_i2_n
)
3233 /* Fail if we tried to make a new register. */
3234 || max_reg_num () != maxreg
3235 /* Fail if we couldn't do something and have a CLOBBER. */
3236 || GET_CODE (newpat
) == CLOBBER
3237 /* Fail if this new pattern is a MULT and we didn't have one before
3238 at the outer level. */
3239 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3246 /* If the actions of the earlier insns must be kept
3247 in addition to substituting them into the latest one,
3248 we must make a new PARALLEL for the latest insn
3249 to hold additional the SETs. */
3251 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3253 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3256 if (GET_CODE (newpat
) == PARALLEL
)
3258 rtvec old
= XVEC (newpat
, 0);
3259 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3260 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3261 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3262 sizeof (old
->elem
[0]) * old
->num_elem
);
3267 total_sets
= 1 + extra_sets
;
3268 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3269 XVECEXP (newpat
, 0, 0) = old
;
3273 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3279 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0);
3281 XVECEXP (newpat
, 0, --total_sets
) = t
;
3287 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0,
3288 i0_feeds_i1_n
&& i0dest_in_i0src
);
3289 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3290 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0);
3292 XVECEXP (newpat
, 0, --total_sets
) = t
;
3296 validate_replacement
:
3298 /* Note which hard regs this insn has as inputs. */
3299 mark_used_regs_combine (newpat
);
3301 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3302 consider splitting this pattern, we might need these clobbers. */
3303 if (i1
&& GET_CODE (newpat
) == PARALLEL
3304 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3306 int len
= XVECLEN (newpat
, 0);
3308 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3309 for (i
= 0; i
< len
; i
++)
3310 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3313 /* Is the result of combination a valid instruction? */
3314 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3316 /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3317 the second SET's destination is a register that is unused and isn't
3318 marked as an instruction that might trap in an EH region. In that case,
3319 we just need the first SET. This can occur when simplifying a divmod
3320 insn. We *must* test for this case here because the code below that
3321 splits two independent SETs doesn't handle this case correctly when it
3322 updates the register status.
3324 It's pointless doing this if we originally had two sets, one from
3325 i3, and one from i2. Combining then splitting the parallel results
3326 in the original i2 again plus an invalid insn (which we delete).
3327 The net effect is only to move instructions around, which makes
3328 debug info less accurate.
3330 Also check the case where the first SET's destination is unused.
3331 That would not cause incorrect code, but does cause an unneeded
3334 if (insn_code_number
< 0
3335 && !(added_sets_2
&& i1
== 0)
3336 && GET_CODE (newpat
) == PARALLEL
3337 && XVECLEN (newpat
, 0) == 2
3338 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3339 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3340 && asm_noperands (newpat
) < 0)
3342 rtx set0
= XVECEXP (newpat
, 0, 0);
3343 rtx set1
= XVECEXP (newpat
, 0, 1);
3345 if (((REG_P (SET_DEST (set1
))
3346 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3347 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3348 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3349 && insn_nothrow_p (i3
)
3350 && !side_effects_p (SET_SRC (set1
)))
3353 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3356 else if (((REG_P (SET_DEST (set0
))
3357 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3358 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3359 && find_reg_note (i3
, REG_UNUSED
,
3360 SUBREG_REG (SET_DEST (set0
)))))
3361 && insn_nothrow_p (i3
)
3362 && !side_effects_p (SET_SRC (set0
)))
3365 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3367 if (insn_code_number
>= 0)
3368 changed_i3_dest
= 1;
3372 /* If we were combining three insns and the result is a simple SET
3373 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3374 insns. There are two ways to do this. It can be split using a
3375 machine-specific method (like when you have an addition of a large
3376 constant) or by combine in the function find_split_point. */
3378 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3379 && asm_noperands (newpat
) < 0)
3381 rtx parallel
, m_split
, *split
;
3383 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3384 use I2DEST as a scratch register will help. In the latter case,
3385 convert I2DEST to the mode of the source of NEWPAT if we can. */
3387 m_split
= combine_split_insns (newpat
, i3
);
3389 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3390 inputs of NEWPAT. */
3392 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3393 possible to try that as a scratch reg. This would require adding
3394 more code to make it work though. */
3396 if (m_split
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3398 enum machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3400 /* First try to split using the original register as a
3401 scratch register. */
3402 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3403 gen_rtvec (2, newpat
,
3404 gen_rtx_CLOBBER (VOIDmode
,
3406 m_split
= combine_split_insns (parallel
, i3
);
3408 /* If that didn't work, try changing the mode of I2DEST if
3411 && new_mode
!= GET_MODE (i2dest
)
3412 && new_mode
!= VOIDmode
3413 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3415 enum machine_mode old_mode
= GET_MODE (i2dest
);
3418 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3419 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3422 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3423 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3426 parallel
= (gen_rtx_PARALLEL
3428 gen_rtvec (2, newpat
,
3429 gen_rtx_CLOBBER (VOIDmode
,
3431 m_split
= combine_split_insns (parallel
, i3
);
3434 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3438 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3439 buf
= undobuf
.undos
;
3440 undobuf
.undos
= buf
->next
;
3441 buf
->next
= undobuf
.frees
;
3442 undobuf
.frees
= buf
;
3446 i2scratch
= m_split
!= 0;
3449 /* If recog_for_combine has discarded clobbers, try to use them
3450 again for the split. */
3451 if (m_split
== 0 && newpat_vec_with_clobbers
)
3453 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3454 m_split
= combine_split_insns (parallel
, i3
);
3457 if (m_split
&& NEXT_INSN (m_split
) == NULL_RTX
)
3459 m_split
= PATTERN (m_split
);
3460 insn_code_number
= recog_for_combine (&m_split
, i3
, &new_i3_notes
);
3461 if (insn_code_number
>= 0)
3464 else if (m_split
&& NEXT_INSN (NEXT_INSN (m_split
)) == NULL_RTX
3465 && (next_real_insn (i2
) == i3
3466 || ! use_crosses_set_p (PATTERN (m_split
), DF_INSN_LUID (i2
))))
3469 rtx newi3pat
= PATTERN (NEXT_INSN (m_split
));
3470 newi2pat
= PATTERN (m_split
);
3472 i3set
= single_set (NEXT_INSN (m_split
));
3473 i2set
= single_set (m_split
);
3475 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3477 /* If I2 or I3 has multiple SETs, we won't know how to track
3478 register status, so don't use these insns. If I2's destination
3479 is used between I2 and I3, we also can't use these insns. */
3481 if (i2_code_number
>= 0 && i2set
&& i3set
3482 && (next_real_insn (i2
) == i3
3483 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3484 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3486 if (insn_code_number
>= 0)
3489 /* It is possible that both insns now set the destination of I3.
3490 If so, we must show an extra use of it. */
3492 if (insn_code_number
>= 0)
3494 rtx new_i3_dest
= SET_DEST (i3set
);
3495 rtx new_i2_dest
= SET_DEST (i2set
);
3497 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3498 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3499 || GET_CODE (new_i3_dest
) == SUBREG
)
3500 new_i3_dest
= XEXP (new_i3_dest
, 0);
3502 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3503 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3504 || GET_CODE (new_i2_dest
) == SUBREG
)
3505 new_i2_dest
= XEXP (new_i2_dest
, 0);
3507 if (REG_P (new_i3_dest
)
3508 && REG_P (new_i2_dest
)
3509 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
))
3510 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3514 /* If we can split it and use I2DEST, go ahead and see if that
3515 helps things be recognized. Verify that none of the registers
3516 are set between I2 and I3. */
3517 if (insn_code_number
< 0
3518 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3522 /* We need I2DEST in the proper mode. If it is a hard register
3523 or the only use of a pseudo, we can change its mode.
3524 Make sure we don't change a hard register to have a mode that
3525 isn't valid for it, or change the number of registers. */
3526 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3527 || GET_MODE (*split
) == VOIDmode
3528 || can_change_dest_mode (i2dest
, added_sets_2
,
3530 && (next_real_insn (i2
) == i3
3531 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3532 /* We can't overwrite I2DEST if its value is still used by
3534 && ! reg_referenced_p (i2dest
, newpat
))
3536 rtx newdest
= i2dest
;
3537 enum rtx_code split_code
= GET_CODE (*split
);
3538 enum machine_mode split_mode
= GET_MODE (*split
);
3539 bool subst_done
= false;
3540 newi2pat
= NULL_RTX
;
3544 /* *SPLIT may be part of I2SRC, so make sure we have the
3545 original expression around for later debug processing.
3546 We should not need I2SRC any more in other cases. */
3547 if (MAY_HAVE_DEBUG_INSNS
)
3548 i2src
= copy_rtx (i2src
);
3552 /* Get NEWDEST as a register in the proper mode. We have already
3553 validated that we can do this. */
3554 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3556 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3557 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3560 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3561 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3565 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3566 an ASHIFT. This can occur if it was inside a PLUS and hence
3567 appeared to be a memory address. This is a kludge. */
3568 if (split_code
== MULT
3569 && CONST_INT_P (XEXP (*split
, 1))
3570 && INTVAL (XEXP (*split
, 1)) > 0
3571 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3573 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3574 XEXP (*split
, 0), GEN_INT (i
)));
3575 /* Update split_code because we may not have a multiply
3577 split_code
= GET_CODE (*split
);
3580 #ifdef INSN_SCHEDULING
3581 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3582 be written as a ZERO_EXTEND. */
3583 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3585 #ifdef LOAD_EXTEND_OP
3586 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3587 what it really is. */
3588 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split
)))
3590 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3591 SUBREG_REG (*split
)));
3594 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3595 SUBREG_REG (*split
)));
3599 /* Attempt to split binary operators using arithmetic identities. */
3600 if (BINARY_P (SET_SRC (newpat
))
3601 && split_mode
== GET_MODE (SET_SRC (newpat
))
3602 && ! side_effects_p (SET_SRC (newpat
)))
3604 rtx setsrc
= SET_SRC (newpat
);
3605 enum machine_mode mode
= GET_MODE (setsrc
);
3606 enum rtx_code code
= GET_CODE (setsrc
);
3607 rtx src_op0
= XEXP (setsrc
, 0);
3608 rtx src_op1
= XEXP (setsrc
, 1);
3610 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3611 if (rtx_equal_p (src_op0
, src_op1
))
3613 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, src_op0
);
3614 SUBST (XEXP (setsrc
, 0), newdest
);
3615 SUBST (XEXP (setsrc
, 1), newdest
);
3618 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3619 else if ((code
== PLUS
|| code
== MULT
)
3620 && GET_CODE (src_op0
) == code
3621 && GET_CODE (XEXP (src_op0
, 0)) == code
3622 && (INTEGRAL_MODE_P (mode
)
3623 || (FLOAT_MODE_P (mode
)
3624 && flag_unsafe_math_optimizations
)))
3626 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3627 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3628 rtx r
= XEXP (src_op0
, 1);
3631 /* Split both "((X op Y) op X) op Y" and
3632 "((X op Y) op Y) op X" as "T op T" where T is
3634 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3635 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3637 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
,
3639 SUBST (XEXP (setsrc
, 0), newdest
);
3640 SUBST (XEXP (setsrc
, 1), newdest
);
3643 /* Split "((X op X) op Y) op Y)" as "T op T" where
3645 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3647 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3648 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, tmp
);
3649 SUBST (XEXP (setsrc
, 0), newdest
);
3650 SUBST (XEXP (setsrc
, 1), newdest
);
3658 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, *split
);
3659 SUBST (*split
, newdest
);
3662 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3664 /* recog_for_combine might have added CLOBBERs to newi2pat.
3665 Make sure NEWPAT does not depend on the clobbered regs. */
3666 if (GET_CODE (newi2pat
) == PARALLEL
)
3667 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3668 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3670 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3671 if (reg_overlap_mentioned_p (reg
, newpat
))
3678 /* If the split point was a MULT and we didn't have one before,
3679 don't use one now. */
3680 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3681 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3685 /* Check for a case where we loaded from memory in a narrow mode and
3686 then sign extended it, but we need both registers. In that case,
3687 we have a PARALLEL with both loads from the same memory location.
3688 We can split this into a load from memory followed by a register-register
3689 copy. This saves at least one insn, more if register allocation can
3692 We cannot do this if the destination of the first assignment is a
3693 condition code register or cc0. We eliminate this case by making sure
3694 the SET_DEST and SET_SRC have the same mode.
3696 We cannot do this if the destination of the second assignment is
3697 a register that we have already assumed is zero-extended. Similarly
3698 for a SUBREG of such a register. */
3700 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3701 && GET_CODE (newpat
) == PARALLEL
3702 && XVECLEN (newpat
, 0) == 2
3703 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3704 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3705 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3706 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3707 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3708 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3709 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3710 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3712 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3713 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3714 && ! (temp
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3716 && VEC_index (reg_stat_type
, reg_stat
,
3717 REGNO (temp
))->nonzero_bits
!= 0
3718 && GET_MODE_BITSIZE (GET_MODE (temp
)) < BITS_PER_WORD
3719 && GET_MODE_BITSIZE (GET_MODE (temp
)) < HOST_BITS_PER_INT
3720 && (VEC_index (reg_stat_type
, reg_stat
,
3721 REGNO (temp
))->nonzero_bits
3722 != GET_MODE_MASK (word_mode
))))
3723 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3724 && (temp
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3726 && VEC_index (reg_stat_type
, reg_stat
,
3727 REGNO (temp
))->nonzero_bits
!= 0
3728 && GET_MODE_BITSIZE (GET_MODE (temp
)) < BITS_PER_WORD
3729 && GET_MODE_BITSIZE (GET_MODE (temp
)) < HOST_BITS_PER_INT
3730 && (VEC_index (reg_stat_type
, reg_stat
,
3731 REGNO (temp
))->nonzero_bits
3732 != GET_MODE_MASK (word_mode
)))))
3733 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3734 SET_SRC (XVECEXP (newpat
, 0, 1)))
3735 && ! find_reg_note (i3
, REG_UNUSED
,
3736 SET_DEST (XVECEXP (newpat
, 0, 0))))
3740 newi2pat
= XVECEXP (newpat
, 0, 0);
3741 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3742 newpat
= XVECEXP (newpat
, 0, 1);
3743 SUBST (SET_SRC (newpat
),
3744 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3745 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3747 if (i2_code_number
>= 0)
3748 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3750 if (insn_code_number
>= 0)
3754 /* Similarly, check for a case where we have a PARALLEL of two independent
3755 SETs but we started with three insns. In this case, we can do the sets
3756 as two separate insns. This case occurs when some SET allows two
3757 other insns to combine, but the destination of that SET is still live. */
3759 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3760 && GET_CODE (newpat
) == PARALLEL
3761 && XVECLEN (newpat
, 0) == 2
3762 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3763 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3764 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3765 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3766 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3767 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3768 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3769 XVECEXP (newpat
, 0, 0))
3770 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3771 XVECEXP (newpat
, 0, 1))
3772 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3773 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3775 /* Normally, it doesn't matter which of the two is done first,
3776 but the one that references cc0 can't be the second, and
3777 one which uses any regs/memory set in between i2 and i3 can't
3779 if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3782 && !reg_referenced_p (cc0_rtx
, XVECEXP (newpat
, 0, 0))
3786 newi2pat
= XVECEXP (newpat
, 0, 1);
3787 newpat
= XVECEXP (newpat
, 0, 0);
3789 else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 0)),
3792 && !reg_referenced_p (cc0_rtx
, XVECEXP (newpat
, 0, 1))
3796 newi2pat
= XVECEXP (newpat
, 0, 0);
3797 newpat
= XVECEXP (newpat
, 0, 1);
3805 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3807 if (i2_code_number
>= 0)
3809 /* recog_for_combine might have added CLOBBERs to newi2pat.
3810 Make sure NEWPAT does not depend on the clobbered regs. */
3811 if (GET_CODE (newi2pat
) == PARALLEL
)
3813 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3814 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3816 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3817 if (reg_overlap_mentioned_p (reg
, newpat
))
3825 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3829 /* If it still isn't recognized, fail and change things back the way they
3831 if ((insn_code_number
< 0
3832 /* Is the result a reasonable ASM_OPERANDS? */
3833 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
3839 /* If we had to change another insn, make sure it is valid also. */
3840 if (undobuf
.other_insn
)
3842 CLEAR_HARD_REG_SET (newpat_used_regs
);
3844 other_pat
= PATTERN (undobuf
.other_insn
);
3845 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
3848 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
3856 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3857 they are adjacent to each other or not. */
3859 rtx p
= prev_nonnote_insn (i3
);
3860 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
3861 && sets_cc0_p (newi2pat
))
3869 /* Only allow this combination if insn_rtx_costs reports that the
3870 replacement instructions are cheaper than the originals. */
3871 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
3877 if (MAY_HAVE_DEBUG_INSNS
)
3881 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
3882 if (undo
->kind
== UNDO_MODE
)
3884 rtx reg
= *undo
->where
.r
;
3885 enum machine_mode new_mode
= GET_MODE (reg
);
3886 enum machine_mode old_mode
= undo
->old_contents
.m
;
3888 /* Temporarily revert mode back. */
3889 adjust_reg_mode (reg
, old_mode
);
3891 if (reg
== i2dest
&& i2scratch
)
3893 /* If we used i2dest as a scratch register with a
3894 different mode, substitute it for the original
3895 i2src while its original mode is temporarily
3896 restored, and then clear i2scratch so that we don't
3897 do it again later. */
3898 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
);
3900 /* Put back the new mode. */
3901 adjust_reg_mode (reg
, new_mode
);
3905 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
3911 last
= last_combined_insn
;
3916 last
= undobuf
.other_insn
;
3918 if (DF_INSN_LUID (last
)
3919 < DF_INSN_LUID (last_combined_insn
))
3920 last
= last_combined_insn
;
3923 /* We're dealing with a reg that changed mode but not
3924 meaning, so we want to turn it into a subreg for
3925 the new mode. However, because of REG sharing and
3926 because its mode had already changed, we have to do
3927 it in two steps. First, replace any debug uses of
3928 reg, with its original mode temporarily restored,
3929 with this copy we have created; then, replace the
3930 copy with the SUBREG of the original shared reg,
3931 once again changed to the new mode. */
3932 propagate_for_debug (first
, last
, reg
, tempreg
);
3933 adjust_reg_mode (reg
, new_mode
);
3934 propagate_for_debug (first
, last
, tempreg
,
3935 lowpart_subreg (old_mode
, reg
, new_mode
));
3940 /* If we will be able to accept this, we have made a
3941 change to the destination of I3. This requires us to
3942 do a few adjustments. */
3944 if (changed_i3_dest
)
3946 PATTERN (i3
) = newpat
;
3947 adjust_for_new_dest (i3
);
3950 /* We now know that we can do this combination. Merge the insns and
3951 update the status of registers and LOG_LINKS. */
3953 if (undobuf
.other_insn
)
3957 PATTERN (undobuf
.other_insn
) = other_pat
;
3959 /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
3960 are still valid. Then add any non-duplicate notes added by
3961 recog_for_combine. */
3962 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
3964 next
= XEXP (note
, 1);
3966 if (REG_NOTE_KIND (note
) == REG_UNUSED
3967 && ! reg_set_p (XEXP (note
, 0), PATTERN (undobuf
.other_insn
)))
3968 remove_note (undobuf
.other_insn
, note
);
3971 distribute_notes (new_other_notes
, undobuf
.other_insn
,
3972 undobuf
.other_insn
, NULL_RTX
, NULL_RTX
, NULL_RTX
,
3982 /* I3 now uses what used to be its destination and which is now
3983 I2's destination. This requires us to do a few adjustments. */
3984 PATTERN (i3
) = newpat
;
3985 adjust_for_new_dest (i3
);
3987 /* We need a LOG_LINK from I3 to I2. But we used to have one,
3990 However, some later insn might be using I2's dest and have
3991 a LOG_LINK pointing at I3. We must remove this link.
3992 The simplest way to remove the link is to point it at I1,
3993 which we know will be a NOTE. */
3995 /* newi2pat is usually a SET here; however, recog_for_combine might
3996 have added some clobbers. */
3997 if (GET_CODE (newi2pat
) == PARALLEL
)
3998 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4000 ni2dest
= SET_DEST (newi2pat
);
4002 for (insn
= NEXT_INSN (i3
);
4003 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR
4004 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4005 insn
= NEXT_INSN (insn
))
4007 if (INSN_P (insn
) && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4009 for (link
= LOG_LINKS (insn
); link
;
4010 link
= XEXP (link
, 1))
4011 if (XEXP (link
, 0) == i3
)
4012 XEXP (link
, 0) = i1
;
4020 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4021 rtx i3links
, i2links
, i1links
= 0, i0links
= 0;
4025 /* Compute which registers we expect to eliminate. newi2pat may be setting
4026 either i3dest or i2dest, so we must check it. Also, i1dest may be the
4027 same as i3dest, in which case newi2pat may be setting i1dest. */
4028 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4029 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4032 rtx elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4033 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4036 rtx elim_i0
= (i0
== 0 || i0dest_in_i0src
4037 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4041 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4043 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4044 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4046 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4048 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4050 /* Ensure that we do not have something that should not be shared but
4051 occurs multiple times in the new insns. Check this by first
4052 resetting all the `used' flags and then copying anything is shared. */
4054 reset_used_flags (i3notes
);
4055 reset_used_flags (i2notes
);
4056 reset_used_flags (i1notes
);
4057 reset_used_flags (i0notes
);
4058 reset_used_flags (newpat
);
4059 reset_used_flags (newi2pat
);
4060 if (undobuf
.other_insn
)
4061 reset_used_flags (PATTERN (undobuf
.other_insn
));
4063 i3notes
= copy_rtx_if_shared (i3notes
);
4064 i2notes
= copy_rtx_if_shared (i2notes
);
4065 i1notes
= copy_rtx_if_shared (i1notes
);
4066 i0notes
= copy_rtx_if_shared (i0notes
);
4067 newpat
= copy_rtx_if_shared (newpat
);
4068 newi2pat
= copy_rtx_if_shared (newi2pat
);
4069 if (undobuf
.other_insn
)
4070 reset_used_flags (PATTERN (undobuf
.other_insn
));
4072 INSN_CODE (i3
) = insn_code_number
;
4073 PATTERN (i3
) = newpat
;
4075 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4077 rtx call_usage
= CALL_INSN_FUNCTION_USAGE (i3
);
4079 reset_used_flags (call_usage
);
4080 call_usage
= copy_rtx (call_usage
);
4084 /* I2SRC must still be meaningful at this point. Some splitting
4085 operations can invalidate I2SRC, but those operations do not
4088 replace_rtx (call_usage
, i2dest
, i2src
);
4092 replace_rtx (call_usage
, i1dest
, i1src
);
4094 replace_rtx (call_usage
, i0dest
, i0src
);
4096 CALL_INSN_FUNCTION_USAGE (i3
) = call_usage
;
4099 if (undobuf
.other_insn
)
4100 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4102 /* We had one special case above where I2 had more than one set and
4103 we replaced a destination of one of those sets with the destination
4104 of I3. In that case, we have to update LOG_LINKS of insns later
4105 in this basic block. Note that this (expensive) case is rare.
4107 Also, in this case, we must pretend that all REG_NOTEs for I2
4108 actually came from I3, so that REG_UNUSED notes from I2 will be
4109 properly handled. */
4111 if (i3_subst_into_i2
)
4113 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4114 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4115 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4116 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4117 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4118 && ! find_reg_note (i2
, REG_UNUSED
,
4119 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4120 for (temp
= NEXT_INSN (i2
);
4121 temp
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR
4122 || BB_HEAD (this_basic_block
) != temp
);
4123 temp
= NEXT_INSN (temp
))
4124 if (temp
!= i3
&& INSN_P (temp
))
4125 for (link
= LOG_LINKS (temp
); link
; link
= XEXP (link
, 1))
4126 if (XEXP (link
, 0) == i2
)
4127 XEXP (link
, 0) = i3
;
4132 while (XEXP (link
, 1))
4133 link
= XEXP (link
, 1);
4134 XEXP (link
, 1) = i2notes
;
4148 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4149 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
);
4150 INSN_CODE (i2
) = i2_code_number
;
4151 PATTERN (i2
) = newi2pat
;
4155 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4156 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
);
4157 SET_INSN_DELETED (i2
);
4164 if (MAY_HAVE_DEBUG_INSNS
)
4165 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
);
4166 SET_INSN_DELETED (i1
);
4173 if (MAY_HAVE_DEBUG_INSNS
)
4174 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
);
4175 SET_INSN_DELETED (i0
);
4178 /* Get death notes for everything that is now used in either I3 or
4179 I2 and used to die in a previous insn. If we built two new
4180 patterns, move from I1 to I2 then I2 to I3 so that we get the
4181 proper movement on registers that I2 modifies. */
4184 from_luid
= DF_INSN_LUID (i0
);
4186 from_luid
= DF_INSN_LUID (i1
);
4188 from_luid
= DF_INSN_LUID (i2
);
4190 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4191 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4193 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4195 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL_RTX
,
4196 elim_i2
, elim_i1
, elim_i0
);
4198 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL_RTX
,
4199 elim_i2
, elim_i1
, elim_i0
);
4201 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL_RTX
,
4202 elim_i2
, elim_i1
, elim_i0
);
4204 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL_RTX
,
4205 elim_i2
, elim_i1
, elim_i0
);
4207 distribute_notes (midnotes
, NULL_RTX
, i3
, newi2pat
? i2
: NULL_RTX
,
4208 elim_i2
, elim_i1
, elim_i0
);
4210 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4211 know these are REG_UNUSED and want them to go to the desired insn,
4212 so we always pass it as i3. */
4214 if (newi2pat
&& new_i2_notes
)
4215 distribute_notes (new_i2_notes
, i2
, i2
, NULL_RTX
, NULL_RTX
, NULL_RTX
,
4219 distribute_notes (new_i3_notes
, i3
, i3
, NULL_RTX
, NULL_RTX
, NULL_RTX
,
4222 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4223 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4224 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4225 in that case, it might delete I2. Similarly for I2 and I1.
4226 Show an additional death due to the REG_DEAD note we make here. If
4227 we discard it in distribute_notes, we will decrement it again. */
4231 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4232 distribute_notes (alloc_reg_note (REG_DEAD
, i3dest_killed
,
4234 NULL_RTX
, i2
, NULL_RTX
, elim_i2
, elim_i1
, elim_i0
);
4236 distribute_notes (alloc_reg_note (REG_DEAD
, i3dest_killed
,
4238 NULL_RTX
, i3
, newi2pat
? i2
: NULL_RTX
,
4239 elim_i2
, elim_i1
, elim_i0
);
4242 if (i2dest_in_i2src
)
4244 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4245 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4246 distribute_notes (new_note
, NULL_RTX
, i2
, NULL_RTX
, NULL_RTX
,
4247 NULL_RTX
, NULL_RTX
);
4249 distribute_notes (new_note
, NULL_RTX
, i3
, newi2pat
? i2
: NULL_RTX
,
4250 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4253 if (i1dest_in_i1src
)
4255 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4256 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4257 distribute_notes (new_note
, NULL_RTX
, i2
, NULL_RTX
, NULL_RTX
,
4258 NULL_RTX
, NULL_RTX
);
4260 distribute_notes (new_note
, NULL_RTX
, i3
, newi2pat
? i2
: NULL_RTX
,
4261 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4264 if (i0dest_in_i0src
)
4266 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4267 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4268 distribute_notes (new_note
, NULL_RTX
, i2
, NULL_RTX
, NULL_RTX
,
4269 NULL_RTX
, NULL_RTX
);
4271 distribute_notes (new_note
, NULL_RTX
, i3
, newi2pat
? i2
: NULL_RTX
,
4272 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4275 distribute_links (i3links
);
4276 distribute_links (i2links
);
4277 distribute_links (i1links
);
4278 distribute_links (i0links
);
4283 rtx i2_insn
= 0, i2_val
= 0, set
;
4285 /* The insn that used to set this register doesn't exist, and
4286 this life of the register may not exist either. See if one of
4287 I3's links points to an insn that sets I2DEST. If it does,
4288 that is now the last known value for I2DEST. If we don't update
4289 this and I2 set the register to a value that depended on its old
4290 contents, we will get confused. If this insn is used, thing
4291 will be set correctly in combine_instructions. */
4293 for (link
= LOG_LINKS (i3
); link
; link
= XEXP (link
, 1))
4294 if ((set
= single_set (XEXP (link
, 0))) != 0
4295 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4296 i2_insn
= XEXP (link
, 0), i2_val
= SET_SRC (set
);
4298 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4300 /* If the reg formerly set in I2 died only once and that was in I3,
4301 zero its use count so it won't make `reload' do any work. */
4303 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4304 && ! i2dest_in_i2src
)
4306 regno
= REGNO (i2dest
);
4307 INC_REG_N_SETS (regno
, -1);
4311 if (i1
&& REG_P (i1dest
))
4314 rtx i1_insn
= 0, i1_val
= 0, set
;
4316 for (link
= LOG_LINKS (i3
); link
; link
= XEXP (link
, 1))
4317 if ((set
= single_set (XEXP (link
, 0))) != 0
4318 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4319 i1_insn
= XEXP (link
, 0), i1_val
= SET_SRC (set
);
4321 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4323 regno
= REGNO (i1dest
);
4324 if (! added_sets_1
&& ! i1dest_in_i1src
)
4325 INC_REG_N_SETS (regno
, -1);
4328 if (i0
&& REG_P (i0dest
))
4331 rtx i0_insn
= 0, i0_val
= 0, set
;
4333 for (link
= LOG_LINKS (i3
); link
; link
= XEXP (link
, 1))
4334 if ((set
= single_set (XEXP (link
, 0))) != 0
4335 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4336 i0_insn
= XEXP (link
, 0), i0_val
= SET_SRC (set
);
4338 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4340 regno
= REGNO (i0dest
);
4341 if (! added_sets_0
&& ! i0dest_in_i0src
)
4342 INC_REG_N_SETS (regno
, -1);
4345 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4346 been made to this insn. The order of
4347 set_nonzero_bits_and_sign_copies() is important. Because newi2pat
4348 can affect nonzero_bits of newpat */
4350 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4351 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4354 if (undobuf
.other_insn
!= NULL_RTX
)
4358 fprintf (dump_file
, "modifying other_insn ");
4359 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4361 df_insn_rescan (undobuf
.other_insn
);
4364 if (i0
&& !(NOTE_P(i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4368 fprintf (dump_file
, "modifying insn i1 ");
4369 dump_insn_slim (dump_file
, i0
);
4371 df_insn_rescan (i0
);
4374 if (i1
&& !(NOTE_P(i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4378 fprintf (dump_file
, "modifying insn i1 ");
4379 dump_insn_slim (dump_file
, i1
);
4381 df_insn_rescan (i1
);
4384 if (i2
&& !(NOTE_P(i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4388 fprintf (dump_file
, "modifying insn i2 ");
4389 dump_insn_slim (dump_file
, i2
);
4391 df_insn_rescan (i2
);
4394 if (i3
&& !(NOTE_P(i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4398 fprintf (dump_file
, "modifying insn i3 ");
4399 dump_insn_slim (dump_file
, i3
);
4401 df_insn_rescan (i3
);
4404 /* Set new_direct_jump_p if a new return or simple jump instruction
4405 has been created. Adjust the CFG accordingly. */
4407 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4409 *new_direct_jump_p
= 1;
4410 mark_jump_label (PATTERN (i3
), i3
, 0);
4411 update_cfg_for_uncondjump (i3
);
4414 if (undobuf
.other_insn
!= NULL_RTX
4415 && (returnjump_p (undobuf
.other_insn
)
4416 || any_uncondjump_p (undobuf
.other_insn
)))
4418 *new_direct_jump_p
= 1;
4419 update_cfg_for_uncondjump (undobuf
.other_insn
);
4422 /* A noop might also need cleaning up of CFG, if it comes from the
4423 simplification of a jump. */
4424 if (GET_CODE (newpat
) == SET
4425 && SET_SRC (newpat
) == pc_rtx
4426 && SET_DEST (newpat
) == pc_rtx
)
4428 *new_direct_jump_p
= 1;
4429 update_cfg_for_uncondjump (i3
);
4432 if (undobuf
.other_insn
!= NULL_RTX
4433 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4434 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4435 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4437 *new_direct_jump_p
= 1;
4438 update_cfg_for_uncondjump (undobuf
.other_insn
);
4441 combine_successes
++;
4444 if (added_links_insn
4445 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4446 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4447 return added_links_insn
;
4449 return newi2pat
? i2
: i3
;
4452 /* Undo all the modifications recorded in undobuf. */
4457 struct undo
*undo
, *next
;
4459 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4465 *undo
->where
.r
= undo
->old_contents
.r
;
4468 *undo
->where
.i
= undo
->old_contents
.i
;
4471 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4477 undo
->next
= undobuf
.frees
;
4478 undobuf
.frees
= undo
;
4484 /* We've committed to accepting the changes we made. Move all
4485 of the undos to the free list. */
4490 struct undo
*undo
, *next
;
4492 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4495 undo
->next
= undobuf
.frees
;
4496 undobuf
.frees
= undo
;
4501 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4502 where we have an arithmetic expression and return that point. LOC will
4505 try_combine will call this function to see if an insn can be split into
4509 find_split_point (rtx
*loc
, rtx insn
, bool set_src
)
4512 enum rtx_code code
= GET_CODE (x
);
4514 unsigned HOST_WIDE_INT len
= 0;
4515 HOST_WIDE_INT pos
= 0;
4517 rtx inner
= NULL_RTX
;
4519 /* First special-case some codes. */
4523 #ifdef INSN_SCHEDULING
4524 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4526 if (MEM_P (SUBREG_REG (x
)))
4529 return find_split_point (&SUBREG_REG (x
), insn
, false);
4533 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4534 using LO_SUM and HIGH. */
4535 if (GET_CODE (XEXP (x
, 0)) == CONST
4536 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
4538 enum machine_mode address_mode
4539 = targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (x
));
4542 gen_rtx_LO_SUM (address_mode
,
4543 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4545 return &XEXP (XEXP (x
, 0), 0);
4549 /* If we have a PLUS whose second operand is a constant and the
4550 address is not valid, perhaps will can split it up using
4551 the machine-specific way to split large constants. We use
4552 the first pseudo-reg (one of the virtual regs) as a placeholder;
4553 it will not remain in the result. */
4554 if (GET_CODE (XEXP (x
, 0)) == PLUS
4555 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4556 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4557 MEM_ADDR_SPACE (x
)))
4559 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4560 rtx seq
= combine_split_insns (gen_rtx_SET (VOIDmode
, reg
,
4564 /* This should have produced two insns, each of which sets our
4565 placeholder. If the source of the second is a valid address,
4566 we can make put both sources together and make a split point
4570 && NEXT_INSN (seq
) != NULL_RTX
4571 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4572 && NONJUMP_INSN_P (seq
)
4573 && GET_CODE (PATTERN (seq
)) == SET
4574 && SET_DEST (PATTERN (seq
)) == reg
4575 && ! reg_mentioned_p (reg
,
4576 SET_SRC (PATTERN (seq
)))
4577 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4578 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4579 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4580 && memory_address_addr_space_p
4581 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4582 MEM_ADDR_SPACE (x
)))
4584 rtx src1
= SET_SRC (PATTERN (seq
));
4585 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4587 /* Replace the placeholder in SRC2 with SRC1. If we can
4588 find where in SRC2 it was placed, that can become our
4589 split point and we can replace this address with SRC2.
4590 Just try two obvious places. */
4592 src2
= replace_rtx (src2
, reg
, src1
);
4594 if (XEXP (src2
, 0) == src1
)
4595 split
= &XEXP (src2
, 0);
4596 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4597 && XEXP (XEXP (src2
, 0), 0) == src1
)
4598 split
= &XEXP (XEXP (src2
, 0), 0);
4602 SUBST (XEXP (x
, 0), src2
);
4607 /* If that didn't work, perhaps the first operand is complex and
4608 needs to be computed separately, so make a split point there.
4609 This will occur on machines that just support REG + CONST
4610 and have a constant moved through some previous computation. */
4612 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4613 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4614 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4615 return &XEXP (XEXP (x
, 0), 0);
4618 /* If we have a PLUS whose first operand is complex, try computing it
4619 separately by making a split there. */
4620 if (GET_CODE (XEXP (x
, 0)) == PLUS
4621 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4623 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4624 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4625 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4626 return &XEXP (XEXP (x
, 0), 0);
4631 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4632 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4633 we need to put the operand into a register. So split at that
4636 if (SET_DEST (x
) == cc0_rtx
4637 && GET_CODE (SET_SRC (x
)) != COMPARE
4638 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4639 && !OBJECT_P (SET_SRC (x
))
4640 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4641 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4642 return &SET_SRC (x
);
4645 /* See if we can split SET_SRC as it stands. */
4646 split
= find_split_point (&SET_SRC (x
), insn
, true);
4647 if (split
&& split
!= &SET_SRC (x
))
4650 /* See if we can split SET_DEST as it stands. */
4651 split
= find_split_point (&SET_DEST (x
), insn
, false);
4652 if (split
&& split
!= &SET_DEST (x
))
4655 /* See if this is a bitfield assignment with everything constant. If
4656 so, this is an IOR of an AND, so split it into that. */
4657 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4658 && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x
), 0)))
4659 <= HOST_BITS_PER_WIDE_INT
)
4660 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4661 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4662 && CONST_INT_P (SET_SRC (x
))
4663 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4664 + INTVAL (XEXP (SET_DEST (x
), 2)))
4665 <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x
), 0))))
4666 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4668 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4669 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4670 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4671 rtx dest
= XEXP (SET_DEST (x
), 0);
4672 enum machine_mode mode
= GET_MODE (dest
);
4673 unsigned HOST_WIDE_INT mask
4674 = ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
4677 if (BITS_BIG_ENDIAN
)
4678 pos
= GET_MODE_BITSIZE (mode
) - len
- pos
;
4680 or_mask
= gen_int_mode (src
<< pos
, mode
);
4683 simplify_gen_binary (IOR
, mode
, dest
, or_mask
));
4686 rtx negmask
= gen_int_mode (~(mask
<< pos
), mode
);
4688 simplify_gen_binary (IOR
, mode
,
4689 simplify_gen_binary (AND
, mode
,
4694 SUBST (SET_DEST (x
), dest
);
4696 split
= find_split_point (&SET_SRC (x
), insn
, true);
4697 if (split
&& split
!= &SET_SRC (x
))
4701 /* Otherwise, see if this is an operation that we can split into two.
4702 If so, try to split that. */
4703 code
= GET_CODE (SET_SRC (x
));
4708 /* If we are AND'ing with a large constant that is only a single
4709 bit and the result is only being used in a context where we
4710 need to know if it is zero or nonzero, replace it with a bit
4711 extraction. This will avoid the large constant, which might
4712 have taken more than one insn to make. If the constant were
4713 not a valid argument to the AND but took only one insn to make,
4714 this is no worse, but if it took more than one insn, it will
4717 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4718 && REG_P (XEXP (SET_SRC (x
), 0))
4719 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4720 && REG_P (SET_DEST (x
))
4721 && (split
= find_single_use (SET_DEST (x
), insn
, (rtx
*) 0)) != 0
4722 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4723 && XEXP (*split
, 0) == SET_DEST (x
)
4724 && XEXP (*split
, 1) == const0_rtx
)
4726 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
4727 XEXP (SET_SRC (x
), 0),
4728 pos
, NULL_RTX
, 1, 1, 0, 0);
4729 if (extraction
!= 0)
4731 SUBST (SET_SRC (x
), extraction
);
4732 return find_split_point (loc
, insn
, false);
4738 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4739 is known to be on, this can be converted into a NEG of a shift. */
4740 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
4741 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
4742 && 1 <= (pos
= exact_log2
4743 (nonzero_bits (XEXP (SET_SRC (x
), 0),
4744 GET_MODE (XEXP (SET_SRC (x
), 0))))))
4746 enum machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
4750 gen_rtx_LSHIFTRT (mode
,
4751 XEXP (SET_SRC (x
), 0),
4754 split
= find_split_point (&SET_SRC (x
), insn
, true);
4755 if (split
&& split
!= &SET_SRC (x
))
4761 inner
= XEXP (SET_SRC (x
), 0);
4763 /* We can't optimize if either mode is a partial integer
4764 mode as we don't know how many bits are significant
4766 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_PARTIAL_INT
4767 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
4771 len
= GET_MODE_BITSIZE (GET_MODE (inner
));
4777 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4778 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
4780 inner
= XEXP (SET_SRC (x
), 0);
4781 len
= INTVAL (XEXP (SET_SRC (x
), 1));
4782 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
4784 if (BITS_BIG_ENDIAN
)
4785 pos
= GET_MODE_BITSIZE (GET_MODE (inner
)) - len
- pos
;
4786 unsignedp
= (code
== ZERO_EXTRACT
);
4794 if (len
&& pos
>= 0 && pos
+ len
<= GET_MODE_BITSIZE (GET_MODE (inner
)))
4796 enum machine_mode mode
= GET_MODE (SET_SRC (x
));
4798 /* For unsigned, we have a choice of a shift followed by an
4799 AND or two shifts. Use two shifts for field sizes where the
4800 constant might be too large. We assume here that we can
4801 always at least get 8-bit constants in an AND insn, which is
4802 true for every current RISC. */
4804 if (unsignedp
&& len
<= 8)
4809 (mode
, gen_lowpart (mode
, inner
),
4811 GEN_INT (((unsigned HOST_WIDE_INT
) 1 << len
)
4814 split
= find_split_point (&SET_SRC (x
), insn
, true);
4815 if (split
&& split
!= &SET_SRC (x
))
4822 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
4823 gen_rtx_ASHIFT (mode
,
4824 gen_lowpart (mode
, inner
),
4825 GEN_INT (GET_MODE_BITSIZE (mode
)
4827 GEN_INT (GET_MODE_BITSIZE (mode
) - len
)));
4829 split
= find_split_point (&SET_SRC (x
), insn
, true);
4830 if (split
&& split
!= &SET_SRC (x
))
4835 /* See if this is a simple operation with a constant as the second
4836 operand. It might be that this constant is out of range and hence
4837 could be used as a split point. */
4838 if (BINARY_P (SET_SRC (x
))
4839 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
4840 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
4841 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
4842 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
4843 return &XEXP (SET_SRC (x
), 1);
4845 /* Finally, see if this is a simple operation with its first operand
4846 not in a register. The operation might require this operand in a
4847 register, so return it as a split point. We can always do this
4848 because if the first operand were another operation, we would have
4849 already found it as a split point. */
4850 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
4851 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
4852 return &XEXP (SET_SRC (x
), 0);
4858 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4859 it is better to write this as (not (ior A B)) so we can split it.
4860 Similarly for IOR. */
4861 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
4864 gen_rtx_NOT (GET_MODE (x
),
4865 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
4867 XEXP (XEXP (x
, 0), 0),
4868 XEXP (XEXP (x
, 1), 0))));
4869 return find_split_point (loc
, insn
, set_src
);
4872 /* Many RISC machines have a large set of logical insns. If the
4873 second operand is a NOT, put it first so we will try to split the
4874 other operand first. */
4875 if (GET_CODE (XEXP (x
, 1)) == NOT
)
4877 rtx tem
= XEXP (x
, 0);
4878 SUBST (XEXP (x
, 0), XEXP (x
, 1));
4879 SUBST (XEXP (x
, 1), tem
);
4885 /* Canonicalization can produce (minus A (mult B C)), where C is a
4886 constant. It may be better to try splitting (plus (mult B -C) A)
4887 instead if this isn't a multiply by a power of two. */
4888 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
4889 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
4890 && exact_log2 (INTVAL (XEXP (XEXP (x
, 1), 1))) < 0)
4892 enum machine_mode mode
= GET_MODE (x
);
4893 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
4894 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
4895 SUBST (*loc
, gen_rtx_PLUS (mode
, gen_rtx_MULT (mode
,
4896 XEXP (XEXP (x
, 1), 0),
4897 GEN_INT (other_int
)),
4899 return find_split_point (loc
, insn
, set_src
);
4902 /* Split at a multiply-accumulate instruction. However if this is
4903 the SET_SRC, we likely do not have such an instruction and it's
4904 worthless to try this split. */
4905 if (!set_src
&& GET_CODE (XEXP (x
, 0)) == MULT
)
4912 /* Otherwise, select our actions depending on our rtx class. */
4913 switch (GET_RTX_CLASS (code
))
4915 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
4917 split
= find_split_point (&XEXP (x
, 2), insn
, false);
4920 /* ... fall through ... */
4922 case RTX_COMM_ARITH
:
4924 case RTX_COMM_COMPARE
:
4925 split
= find_split_point (&XEXP (x
, 1), insn
, false);
4928 /* ... fall through ... */
4930 /* Some machines have (and (shift ...) ...) insns. If X is not
4931 an AND, but XEXP (X, 0) is, use it as our split point. */
4932 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
4933 return &XEXP (x
, 0);
4935 split
= find_split_point (&XEXP (x
, 0), insn
, false);
4941 /* Otherwise, we don't have a split point. */
4946 /* Throughout X, replace FROM with TO, and return the result.
4947 The result is TO if X is FROM;
4948 otherwise the result is X, but its contents may have been modified.
4949 If they were modified, a record was made in undobuf so that
4950 undo_all will (among other things) return X to its original state.
4952 If the number of changes necessary is too much to record to undo,
4953 the excess changes are not made, so the result is invalid.
4954 The changes already made can still be undone.
4955 undobuf.num_undo is incremented for such changes, so by testing that
4956 the caller can tell whether the result is valid.
4958 `n_occurrences' is incremented each time FROM is replaced.
4960 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4962 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
4963 by copying if `n_occurrences' is nonzero. */
4966 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int unique_copy
)
4968 enum rtx_code code
= GET_CODE (x
);
4969 enum machine_mode op0_mode
= VOIDmode
;
4974 /* Two expressions are equal if they are identical copies of a shared
4975 RTX or if they are both registers with the same register number
4978 #define COMBINE_RTX_EQUAL_P(X,Y) \
4980 || (REG_P (X) && REG_P (Y) \
4981 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4983 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
4986 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
4989 /* If X and FROM are the same register but different modes, they
4990 will not have been seen as equal above. However, the log links code
4991 will make a LOG_LINKS entry for that case. If we do nothing, we
4992 will try to rerecognize our original insn and, when it succeeds,
4993 we will delete the feeding insn, which is incorrect.
4995 So force this insn not to match in this (rare) case. */
4996 if (! in_dest
&& code
== REG
&& REG_P (from
)
4997 && reg_overlap_mentioned_p (x
, from
))
4998 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5000 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5001 of which may contain things that can be combined. */
5002 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5005 /* It is possible to have a subexpression appear twice in the insn.
5006 Suppose that FROM is a register that appears within TO.
5007 Then, after that subexpression has been scanned once by `subst',
5008 the second time it is scanned, TO may be found. If we were
5009 to scan TO here, we would find FROM within it and create a
5010 self-referent rtl structure which is completely wrong. */
5011 if (COMBINE_RTX_EQUAL_P (x
, to
))
5014 /* Parallel asm_operands need special attention because all of the
5015 inputs are shared across the arms. Furthermore, unsharing the
5016 rtl results in recognition failures. Failure to handle this case
5017 specially can result in circular rtl.
5019 Solve this by doing a normal pass across the first entry of the
5020 parallel, and only processing the SET_DESTs of the subsequent
5023 if (code
== PARALLEL
5024 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5025 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5027 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, unique_copy
);
5029 /* If this substitution failed, this whole thing fails. */
5030 if (GET_CODE (new_rtx
) == CLOBBER
5031 && XEXP (new_rtx
, 0) == const0_rtx
)
5034 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5036 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5038 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5041 && GET_CODE (dest
) != CC0
5042 && GET_CODE (dest
) != PC
)
5044 new_rtx
= subst (dest
, from
, to
, 0, unique_copy
);
5046 /* If this substitution failed, this whole thing fails. */
5047 if (GET_CODE (new_rtx
) == CLOBBER
5048 && XEXP (new_rtx
, 0) == const0_rtx
)
5051 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5057 len
= GET_RTX_LENGTH (code
);
5058 fmt
= GET_RTX_FORMAT (code
);
5060 /* We don't need to process a SET_DEST that is a register, CC0,
5061 or PC, so set up to skip this common case. All other cases
5062 where we want to suppress replacing something inside a
5063 SET_SRC are handled via the IN_DEST operand. */
5065 && (REG_P (SET_DEST (x
))
5066 || GET_CODE (SET_DEST (x
)) == CC0
5067 || GET_CODE (SET_DEST (x
)) == PC
))
5070 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5073 op0_mode
= GET_MODE (XEXP (x
, 0));
5075 for (i
= 0; i
< len
; i
++)
5080 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5082 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5084 new_rtx
= (unique_copy
&& n_occurrences
5085 ? copy_rtx (to
) : to
);
5090 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0,
5093 /* If this substitution failed, this whole thing
5095 if (GET_CODE (new_rtx
) == CLOBBER
5096 && XEXP (new_rtx
, 0) == const0_rtx
)
5100 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5103 else if (fmt
[i
] == 'e')
5105 /* If this is a register being set, ignore it. */
5106 new_rtx
= XEXP (x
, i
);
5109 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5111 || code
== STRICT_LOW_PART
))
5114 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5116 /* In general, don't install a subreg involving two
5117 modes not tieable. It can worsen register
5118 allocation, and can even make invalid reload
5119 insns, since the reg inside may need to be copied
5120 from in the outside mode, and that may be invalid
5121 if it is an fp reg copied in integer mode.
5123 We allow two exceptions to this: It is valid if
5124 it is inside another SUBREG and the mode of that
5125 SUBREG and the mode of the inside of TO is
5126 tieable and it is valid if X is a SET that copies
5129 if (GET_CODE (to
) == SUBREG
5130 && ! MODES_TIEABLE_P (GET_MODE (to
),
5131 GET_MODE (SUBREG_REG (to
)))
5132 && ! (code
== SUBREG
5133 && MODES_TIEABLE_P (GET_MODE (x
),
5134 GET_MODE (SUBREG_REG (to
))))
5136 && ! (code
== SET
&& i
== 1 && XEXP (x
, 0) == cc0_rtx
)
5139 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5141 #ifdef CANNOT_CHANGE_MODE_CLASS
5144 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5145 && REG_CANNOT_CHANGE_MODE_P (REGNO (to
),
5148 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5151 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5155 /* If we are in a SET_DEST, suppress most cases unless we
5156 have gone inside a MEM, in which case we want to
5157 simplify the address. We assume here that things that
5158 are actually part of the destination have their inner
5159 parts in the first expression. This is true for SUBREG,
5160 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5161 things aside from REG and MEM that should appear in a
5163 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5165 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5166 || code
== ZERO_EXTRACT
))
5168 && i
== 0), unique_copy
);
5170 /* If we found that we will have to reject this combination,
5171 indicate that by returning the CLOBBER ourselves, rather than
5172 an expression containing it. This will speed things up as
5173 well as prevent accidents where two CLOBBERs are considered
5174 to be equal, thus producing an incorrect simplification. */
5176 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5179 if (GET_CODE (x
) == SUBREG
5180 && (CONST_INT_P (new_rtx
)
5181 || GET_CODE (new_rtx
) == CONST_DOUBLE
))
5183 enum machine_mode mode
= GET_MODE (x
);
5185 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5186 GET_MODE (SUBREG_REG (x
)),
5189 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5191 else if (CONST_INT_P (new_rtx
)
5192 && GET_CODE (x
) == ZERO_EXTEND
)
5194 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5195 new_rtx
, GET_MODE (XEXP (x
, 0)));
5199 SUBST (XEXP (x
, i
), new_rtx
);
5204 /* Check if we are loading something from the constant pool via float
5205 extension; in this case we would undo compress_float_constant
5206 optimization and degenerate constant load to an immediate value. */
5207 if (GET_CODE (x
) == FLOAT_EXTEND
5208 && MEM_P (XEXP (x
, 0))
5209 && MEM_READONLY_P (XEXP (x
, 0)))
5211 rtx tmp
= avoid_constant_pool_reference (x
);
5216 /* Try to simplify X. If the simplification changed the code, it is likely
5217 that further simplification will help, so loop, but limit the number
5218 of repetitions that will be performed. */
5220 for (i
= 0; i
< 4; i
++)
5222 /* If X is sufficiently simple, don't bother trying to do anything
5224 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5225 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
);
5227 if (GET_CODE (x
) == code
)
5230 code
= GET_CODE (x
);
5232 /* We no longer know the original mode of operand 0 since we
5233 have changed the form of X) */
5234 op0_mode
= VOIDmode
;
5240 /* Simplify X, a piece of RTL. We just operate on the expression at the
5241 outer level; call `subst' to simplify recursively. Return the new
5244 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5245 if we are inside a SET_DEST. */
5248 combine_simplify_rtx (rtx x
, enum machine_mode op0_mode
, int in_dest
)
5250 enum rtx_code code
= GET_CODE (x
);
5251 enum machine_mode mode
= GET_MODE (x
);
5255 /* If this is a commutative operation, put a constant last and a complex
5256 expression first. We don't need to do this for comparisons here. */
5257 if (COMMUTATIVE_ARITH_P (x
)
5258 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5261 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5262 SUBST (XEXP (x
, 1), temp
);
5265 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5266 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5267 things. Check for cases where both arms are testing the same
5270 Don't do anything if all operands are very simple. */
5273 && ((!OBJECT_P (XEXP (x
, 0))
5274 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5275 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5276 || (!OBJECT_P (XEXP (x
, 1))
5277 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5278 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5280 && (!OBJECT_P (XEXP (x
, 0))
5281 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5282 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5284 rtx cond
, true_rtx
, false_rtx
;
5286 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5288 /* If everything is a comparison, what we have is highly unlikely
5289 to be simpler, so don't use it. */
5290 && ! (COMPARISON_P (x
)
5291 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5293 rtx cop1
= const0_rtx
;
5294 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5296 if (cond_code
== NE
&& COMPARISON_P (cond
))
5299 /* Simplify the alternative arms; this may collapse the true and
5300 false arms to store-flag values. Be careful to use copy_rtx
5301 here since true_rtx or false_rtx might share RTL with x as a
5302 result of the if_then_else_cond call above. */
5303 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0);
5304 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0);
5306 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5307 is unlikely to be simpler. */
5308 if (general_operand (true_rtx
, VOIDmode
)
5309 && general_operand (false_rtx
, VOIDmode
))
5311 enum rtx_code reversed
;
5313 /* Restarting if we generate a store-flag expression will cause
5314 us to loop. Just drop through in this case. */
5316 /* If the result values are STORE_FLAG_VALUE and zero, we can
5317 just make the comparison operation. */
5318 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5319 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5321 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5322 && ((reversed
= reversed_comparison_code_parts
5323 (cond_code
, cond
, cop1
, NULL
))
5325 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5328 /* Likewise, we can make the negate of a comparison operation
5329 if the result values are - STORE_FLAG_VALUE and zero. */
5330 else if (CONST_INT_P (true_rtx
)
5331 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5332 && false_rtx
== const0_rtx
)
5333 x
= simplify_gen_unary (NEG
, mode
,
5334 simplify_gen_relational (cond_code
,
5338 else if (CONST_INT_P (false_rtx
)
5339 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5340 && true_rtx
== const0_rtx
5341 && ((reversed
= reversed_comparison_code_parts
5342 (cond_code
, cond
, cop1
, NULL
))
5344 x
= simplify_gen_unary (NEG
, mode
,
5345 simplify_gen_relational (reversed
,
5350 return gen_rtx_IF_THEN_ELSE (mode
,
5351 simplify_gen_relational (cond_code
,
5356 true_rtx
, false_rtx
);
5358 code
= GET_CODE (x
);
5359 op0_mode
= VOIDmode
;
5364 /* Try to fold this expression in case we have constants that weren't
5367 switch (GET_RTX_CLASS (code
))
5370 if (op0_mode
== VOIDmode
)
5371 op0_mode
= GET_MODE (XEXP (x
, 0));
5372 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5375 case RTX_COMM_COMPARE
:
5377 enum machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5378 if (cmp_mode
== VOIDmode
)
5380 cmp_mode
= GET_MODE (XEXP (x
, 1));
5381 if (cmp_mode
== VOIDmode
)
5382 cmp_mode
= op0_mode
;
5384 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5385 XEXP (x
, 0), XEXP (x
, 1));
5388 case RTX_COMM_ARITH
:
5390 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5392 case RTX_BITFIELD_OPS
:
5394 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5395 XEXP (x
, 1), XEXP (x
, 2));
5404 code
= GET_CODE (temp
);
5405 op0_mode
= VOIDmode
;
5406 mode
= GET_MODE (temp
);
5409 /* First see if we can apply the inverse distributive law. */
5410 if (code
== PLUS
|| code
== MINUS
5411 || code
== AND
|| code
== IOR
|| code
== XOR
)
5413 x
= apply_distributive_law (x
);
5414 code
= GET_CODE (x
);
5415 op0_mode
= VOIDmode
;
5418 /* If CODE is an associative operation not otherwise handled, see if we
5419 can associate some operands. This can win if they are constants or
5420 if they are logically related (i.e. (a & b) & a). */
5421 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5422 || code
== AND
|| code
== IOR
|| code
== XOR
5423 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5424 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5425 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5427 if (GET_CODE (XEXP (x
, 0)) == code
)
5429 rtx other
= XEXP (XEXP (x
, 0), 0);
5430 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5431 rtx inner_op1
= XEXP (x
, 1);
5434 /* Make sure we pass the constant operand if any as the second
5435 one if this is a commutative operation. */
5436 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5438 rtx tem
= inner_op0
;
5439 inner_op0
= inner_op1
;
5442 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5443 : code
== DIV
? MULT
5445 mode
, inner_op0
, inner_op1
);
5447 /* For commutative operations, try the other pair if that one
5449 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5451 other
= XEXP (XEXP (x
, 0), 1);
5452 inner
= simplify_binary_operation (code
, mode
,
5453 XEXP (XEXP (x
, 0), 0),
5458 return simplify_gen_binary (code
, mode
, other
, inner
);
5462 /* A little bit of algebraic simplification here. */
5466 /* Ensure that our address has any ASHIFTs converted to MULT in case
5467 address-recognizing predicates are called later. */
5468 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5469 SUBST (XEXP (x
, 0), temp
);
5473 if (op0_mode
== VOIDmode
)
5474 op0_mode
= GET_MODE (SUBREG_REG (x
));
5476 /* See if this can be moved to simplify_subreg. */
5477 if (CONSTANT_P (SUBREG_REG (x
))
5478 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5479 /* Don't call gen_lowpart if the inner mode
5480 is VOIDmode and we cannot simplify it, as SUBREG without
5481 inner mode is invalid. */
5482 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5483 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5484 return gen_lowpart (mode
, SUBREG_REG (x
));
5486 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5490 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5496 /* Don't change the mode of the MEM if that would change the meaning
5498 if (MEM_P (SUBREG_REG (x
))
5499 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5500 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0))))
5501 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5503 /* Note that we cannot do any narrowing for non-constants since
5504 we might have been counting on using the fact that some bits were
5505 zero. We now do this in the SET. */
5510 temp
= expand_compound_operation (XEXP (x
, 0));
5512 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5513 replaced by (lshiftrt X C). This will convert
5514 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5516 if (GET_CODE (temp
) == ASHIFTRT
5517 && CONST_INT_P (XEXP (temp
, 1))
5518 && INTVAL (XEXP (temp
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
5519 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5520 INTVAL (XEXP (temp
, 1)));
5522 /* If X has only a single bit that might be nonzero, say, bit I, convert
5523 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5524 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5525 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5526 or a SUBREG of one since we'd be making the expression more
5527 complex if it was just a register. */
5530 && ! (GET_CODE (temp
) == SUBREG
5531 && REG_P (SUBREG_REG (temp
)))
5532 && (i
= exact_log2 (nonzero_bits (temp
, mode
))) >= 0)
5534 rtx temp1
= simplify_shift_const
5535 (NULL_RTX
, ASHIFTRT
, mode
,
5536 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, temp
,
5537 GET_MODE_BITSIZE (mode
) - 1 - i
),
5538 GET_MODE_BITSIZE (mode
) - 1 - i
);
5540 /* If all we did was surround TEMP with the two shifts, we
5541 haven't improved anything, so don't use it. Otherwise,
5542 we are better off with TEMP1. */
5543 if (GET_CODE (temp1
) != ASHIFTRT
5544 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5545 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5551 /* We can't handle truncation to a partial integer mode here
5552 because we don't know the real bitsize of the partial
5554 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5557 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
5559 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5560 GET_MODE_MASK (mode
), 0));
5562 /* We can truncate a constant value and return it. */
5563 if (CONST_INT_P (XEXP (x
, 0)))
5564 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5566 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5567 whose value is a comparison can be replaced with a subreg if
5568 STORE_FLAG_VALUE permits. */
5569 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5570 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5571 && (temp
= get_last_value (XEXP (x
, 0)))
5572 && COMPARISON_P (temp
))
5573 return gen_lowpart (mode
, XEXP (x
, 0));
5577 /* (const (const X)) can become (const X). Do it this way rather than
5578 returning the inner CONST since CONST can be shared with a
5580 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5581 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5586 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5587 can add in an offset. find_split_point will split this address up
5588 again if it doesn't match. */
5589 if (GET_CODE (XEXP (x
, 0)) == HIGH
5590 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5596 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5597 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5598 bit-field and can be replaced by either a sign_extend or a
5599 sign_extract. The `and' may be a zero_extend and the two
5600 <c>, -<c> constants may be reversed. */
5601 if (GET_CODE (XEXP (x
, 0)) == XOR
5602 && CONST_INT_P (XEXP (x
, 1))
5603 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5604 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5605 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5606 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5607 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5608 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5609 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5610 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5611 == ((unsigned HOST_WIDE_INT
) 1 << (i
+ 1)) - 1))
5612 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5613 && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5614 == (unsigned int) i
+ 1))))
5615 return simplify_shift_const
5616 (NULL_RTX
, ASHIFTRT
, mode
,
5617 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5618 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5619 GET_MODE_BITSIZE (mode
) - (i
+ 1)),
5620 GET_MODE_BITSIZE (mode
) - (i
+ 1));
5622 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5623 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5624 the bitsize of the mode - 1. This allows simplification of
5625 "a = (b & 8) == 0;" */
5626 if (XEXP (x
, 1) == constm1_rtx
5627 && !REG_P (XEXP (x
, 0))
5628 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5629 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5630 && nonzero_bits (XEXP (x
, 0), mode
) == 1)
5631 return simplify_shift_const (NULL_RTX
, ASHIFTRT
, mode
,
5632 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5633 gen_rtx_XOR (mode
, XEXP (x
, 0), const1_rtx
),
5634 GET_MODE_BITSIZE (mode
) - 1),
5635 GET_MODE_BITSIZE (mode
) - 1);
5637 /* If we are adding two things that have no bits in common, convert
5638 the addition into an IOR. This will often be further simplified,
5639 for example in cases like ((a & 1) + (a & 2)), which can
5642 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5643 && (nonzero_bits (XEXP (x
, 0), mode
)
5644 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5646 /* Try to simplify the expression further. */
5647 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5648 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
);
5650 /* If we could, great. If not, do not go ahead with the IOR
5651 replacement, since PLUS appears in many special purpose
5652 address arithmetic instructions. */
5653 if (GET_CODE (temp
) != CLOBBER
&& temp
!= tor
)
5659 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5660 (and <foo> (const_int pow2-1)) */
5661 if (GET_CODE (XEXP (x
, 1)) == AND
5662 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
5663 && exact_log2 (-UINTVAL (XEXP (XEXP (x
, 1), 1))) >= 0
5664 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
5665 return simplify_and_const_int (NULL_RTX
, mode
, XEXP (x
, 0),
5666 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
5670 /* If we have (mult (plus A B) C), apply the distributive law and then
5671 the inverse distributive law to see if things simplify. This
5672 occurs mostly in addresses, often when unrolling loops. */
5674 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
5676 rtx result
= distribute_and_simplify_rtx (x
, 0);
5681 /* Try simplify a*(b/c) as (a*b)/c. */
5682 if (FLOAT_MODE_P (mode
) && flag_associative_math
5683 && GET_CODE (XEXP (x
, 0)) == DIV
)
5685 rtx tem
= simplify_binary_operation (MULT
, mode
,
5686 XEXP (XEXP (x
, 0), 0),
5689 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
5694 /* If this is a divide by a power of two, treat it as a shift if
5695 its first operand is a shift. */
5696 if (CONST_INT_P (XEXP (x
, 1))
5697 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
5698 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
5699 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5700 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
5701 || GET_CODE (XEXP (x
, 0)) == ROTATE
5702 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
5703 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (x
, 0), i
);
5707 case GT
: case GTU
: case GE
: case GEU
:
5708 case LT
: case LTU
: case LE
: case LEU
:
5709 case UNEQ
: case LTGT
:
5710 case UNGT
: case UNGE
:
5711 case UNLT
: case UNLE
:
5712 case UNORDERED
: case ORDERED
:
5713 /* If the first operand is a condition code, we can't do anything
5715 if (GET_CODE (XEXP (x
, 0)) == COMPARE
5716 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
5717 && ! CC0_P (XEXP (x
, 0))))
5719 rtx op0
= XEXP (x
, 0);
5720 rtx op1
= XEXP (x
, 1);
5721 enum rtx_code new_code
;
5723 if (GET_CODE (op0
) == COMPARE
)
5724 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
5726 /* Simplify our comparison, if possible. */
5727 new_code
= simplify_comparison (code
, &op0
, &op1
);
5729 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5730 if only the low-order bit is possibly nonzero in X (such as when
5731 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5732 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5733 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5736 Remove any ZERO_EXTRACT we made when thinking this was a
5737 comparison. It may now be simpler to use, e.g., an AND. If a
5738 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5739 the call to make_compound_operation in the SET case. */
5741 if (STORE_FLAG_VALUE
== 1
5742 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5743 && op1
== const0_rtx
5744 && mode
== GET_MODE (op0
)
5745 && nonzero_bits (op0
, mode
) == 1)
5746 return gen_lowpart (mode
,
5747 expand_compound_operation (op0
));
5749 else if (STORE_FLAG_VALUE
== 1
5750 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5751 && op1
== const0_rtx
5752 && mode
== GET_MODE (op0
)
5753 && (num_sign_bit_copies (op0
, mode
)
5754 == GET_MODE_BITSIZE (mode
)))
5756 op0
= expand_compound_operation (op0
);
5757 return simplify_gen_unary (NEG
, mode
,
5758 gen_lowpart (mode
, op0
),
5762 else if (STORE_FLAG_VALUE
== 1
5763 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5764 && op1
== const0_rtx
5765 && mode
== GET_MODE (op0
)
5766 && nonzero_bits (op0
, mode
) == 1)
5768 op0
= expand_compound_operation (op0
);
5769 return simplify_gen_binary (XOR
, mode
,
5770 gen_lowpart (mode
, op0
),
5774 else if (STORE_FLAG_VALUE
== 1
5775 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5776 && op1
== const0_rtx
5777 && mode
== GET_MODE (op0
)
5778 && (num_sign_bit_copies (op0
, mode
)
5779 == GET_MODE_BITSIZE (mode
)))
5781 op0
= expand_compound_operation (op0
);
5782 return plus_constant (gen_lowpart (mode
, op0
), 1);
5785 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5787 if (STORE_FLAG_VALUE
== -1
5788 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5789 && op1
== const0_rtx
5790 && (num_sign_bit_copies (op0
, mode
)
5791 == GET_MODE_BITSIZE (mode
)))
5792 return gen_lowpart (mode
,
5793 expand_compound_operation (op0
));
5795 else if (STORE_FLAG_VALUE
== -1
5796 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5797 && op1
== const0_rtx
5798 && mode
== GET_MODE (op0
)
5799 && nonzero_bits (op0
, mode
) == 1)
5801 op0
= expand_compound_operation (op0
);
5802 return simplify_gen_unary (NEG
, mode
,
5803 gen_lowpart (mode
, op0
),
5807 else if (STORE_FLAG_VALUE
== -1
5808 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5809 && op1
== const0_rtx
5810 && mode
== GET_MODE (op0
)
5811 && (num_sign_bit_copies (op0
, mode
)
5812 == GET_MODE_BITSIZE (mode
)))
5814 op0
= expand_compound_operation (op0
);
5815 return simplify_gen_unary (NOT
, mode
,
5816 gen_lowpart (mode
, op0
),
5820 /* If X is 0/1, (eq X 0) is X-1. */
5821 else if (STORE_FLAG_VALUE
== -1
5822 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5823 && op1
== const0_rtx
5824 && mode
== GET_MODE (op0
)
5825 && nonzero_bits (op0
, mode
) == 1)
5827 op0
= expand_compound_operation (op0
);
5828 return plus_constant (gen_lowpart (mode
, op0
), -1);
5831 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5832 one bit that might be nonzero, we can convert (ne x 0) to
5833 (ashift x c) where C puts the bit in the sign bit. Remove any
5834 AND with STORE_FLAG_VALUE when we are done, since we are only
5835 going to test the sign bit. */
5836 if (new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5837 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5838 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5839 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
5840 && op1
== const0_rtx
5841 && mode
== GET_MODE (op0
)
5842 && (i
= exact_log2 (nonzero_bits (op0
, mode
))) >= 0)
5844 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5845 expand_compound_operation (op0
),
5846 GET_MODE_BITSIZE (mode
) - 1 - i
);
5847 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
5853 /* If the code changed, return a whole new comparison. */
5854 if (new_code
!= code
)
5855 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
5857 /* Otherwise, keep this operation, but maybe change its operands.
5858 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5859 SUBST (XEXP (x
, 0), op0
);
5860 SUBST (XEXP (x
, 1), op1
);
5865 return simplify_if_then_else (x
);
5871 /* If we are processing SET_DEST, we are done. */
5875 return expand_compound_operation (x
);
5878 return simplify_set (x
);
5882 return simplify_logical (x
);
5889 /* If this is a shift by a constant amount, simplify it. */
5890 if (CONST_INT_P (XEXP (x
, 1)))
5891 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
5892 INTVAL (XEXP (x
, 1)));
5894 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
5896 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
5897 ((unsigned HOST_WIDE_INT
) 1
5898 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
5910 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
5913 simplify_if_then_else (rtx x
)
5915 enum machine_mode mode
= GET_MODE (x
);
5916 rtx cond
= XEXP (x
, 0);
5917 rtx true_rtx
= XEXP (x
, 1);
5918 rtx false_rtx
= XEXP (x
, 2);
5919 enum rtx_code true_code
= GET_CODE (cond
);
5920 int comparison_p
= COMPARISON_P (cond
);
5923 enum rtx_code false_code
;
5926 /* Simplify storing of the truth value. */
5927 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5928 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
5929 XEXP (cond
, 0), XEXP (cond
, 1));
5931 /* Also when the truth value has to be reversed. */
5933 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5934 && (reversed
= reversed_comparison (cond
, mode
)))
5937 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5938 in it is being compared against certain values. Get the true and false
5939 comparisons and see if that says anything about the value of each arm. */
5942 && ((false_code
= reversed_comparison_code (cond
, NULL
))
5944 && REG_P (XEXP (cond
, 0)))
5947 rtx from
= XEXP (cond
, 0);
5948 rtx true_val
= XEXP (cond
, 1);
5949 rtx false_val
= true_val
;
5952 /* If FALSE_CODE is EQ, swap the codes and arms. */
5954 if (false_code
== EQ
)
5956 swapped
= 1, true_code
= EQ
, false_code
= NE
;
5957 temp
= true_rtx
, true_rtx
= false_rtx
, false_rtx
= temp
;
5960 /* If we are comparing against zero and the expression being tested has
5961 only a single bit that might be nonzero, that is its value when it is
5962 not equal to zero. Similarly if it is known to be -1 or 0. */
5964 if (true_code
== EQ
&& true_val
== const0_rtx
5965 && exact_log2 (nzb
= nonzero_bits (from
, GET_MODE (from
))) >= 0)
5968 false_val
= GEN_INT (trunc_int_for_mode (nzb
, GET_MODE (from
)));
5970 else if (true_code
== EQ
&& true_val
== const0_rtx
5971 && (num_sign_bit_copies (from
, GET_MODE (from
))
5972 == GET_MODE_BITSIZE (GET_MODE (from
))))
5975 false_val
= constm1_rtx
;
5978 /* Now simplify an arm if we know the value of the register in the
5979 branch and it is used in the arm. Be careful due to the potential
5980 of locally-shared RTL. */
5982 if (reg_mentioned_p (from
, true_rtx
))
5983 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
5985 pc_rtx
, pc_rtx
, 0, 0);
5986 if (reg_mentioned_p (from
, false_rtx
))
5987 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
5989 pc_rtx
, pc_rtx
, 0, 0);
5991 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
5992 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
5994 true_rtx
= XEXP (x
, 1);
5995 false_rtx
= XEXP (x
, 2);
5996 true_code
= GET_CODE (cond
);
5999 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6000 reversed, do so to avoid needing two sets of patterns for
6001 subtract-and-branch insns. Similarly if we have a constant in the true
6002 arm, the false arm is the same as the first operand of the comparison, or
6003 the false arm is more complicated than the true arm. */
6006 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6007 && (true_rtx
== pc_rtx
6008 || (CONSTANT_P (true_rtx
)
6009 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6010 || true_rtx
== const0_rtx
6011 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6012 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6013 && !OBJECT_P (false_rtx
))
6014 || reg_mentioned_p (true_rtx
, false_rtx
)
6015 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6017 true_code
= reversed_comparison_code (cond
, NULL
);
6018 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6019 SUBST (XEXP (x
, 1), false_rtx
);
6020 SUBST (XEXP (x
, 2), true_rtx
);
6022 temp
= true_rtx
, true_rtx
= false_rtx
, false_rtx
= temp
;
6025 /* It is possible that the conditional has been simplified out. */
6026 true_code
= GET_CODE (cond
);
6027 comparison_p
= COMPARISON_P (cond
);
6030 /* If the two arms are identical, we don't need the comparison. */
6032 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6035 /* Convert a == b ? b : a to "a". */
6036 if (true_code
== EQ
&& ! side_effects_p (cond
)
6037 && !HONOR_NANS (mode
)
6038 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6039 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6041 else if (true_code
== NE
&& ! side_effects_p (cond
)
6042 && !HONOR_NANS (mode
)
6043 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6044 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6047 /* Look for cases where we have (abs x) or (neg (abs X)). */
6049 if (GET_MODE_CLASS (mode
) == MODE_INT
6051 && XEXP (cond
, 1) == const0_rtx
6052 && GET_CODE (false_rtx
) == NEG
6053 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6054 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6055 && ! side_effects_p (true_rtx
))
6060 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6064 simplify_gen_unary (NEG
, mode
,
6065 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6071 /* Look for MIN or MAX. */
6073 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6075 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6076 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6077 && ! side_effects_p (cond
))
6082 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6085 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6088 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6091 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6096 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6097 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6098 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6099 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6100 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6101 neither 1 or -1, but it isn't worth checking for. */
6103 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6105 && GET_MODE_CLASS (mode
) == MODE_INT
6106 && ! side_effects_p (x
))
6108 rtx t
= make_compound_operation (true_rtx
, SET
);
6109 rtx f
= make_compound_operation (false_rtx
, SET
);
6110 rtx cond_op0
= XEXP (cond
, 0);
6111 rtx cond_op1
= XEXP (cond
, 1);
6112 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6113 enum machine_mode m
= mode
;
6114 rtx z
= 0, c1
= NULL_RTX
;
6116 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6117 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6118 || GET_CODE (t
) == ASHIFT
6119 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6120 && rtx_equal_p (XEXP (t
, 0), f
))
6121 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6123 /* If an identity-zero op is commutative, check whether there
6124 would be a match if we swapped the operands. */
6125 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6126 || GET_CODE (t
) == XOR
)
6127 && rtx_equal_p (XEXP (t
, 1), f
))
6128 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6129 else if (GET_CODE (t
) == SIGN_EXTEND
6130 && (GET_CODE (XEXP (t
, 0)) == PLUS
6131 || GET_CODE (XEXP (t
, 0)) == MINUS
6132 || GET_CODE (XEXP (t
, 0)) == IOR
6133 || GET_CODE (XEXP (t
, 0)) == XOR
6134 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6135 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6136 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6137 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6138 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6139 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6140 && (num_sign_bit_copies (f
, GET_MODE (f
))
6142 (GET_MODE_BITSIZE (mode
)
6143 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6145 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6146 extend_op
= SIGN_EXTEND
;
6147 m
= GET_MODE (XEXP (t
, 0));
6149 else if (GET_CODE (t
) == SIGN_EXTEND
6150 && (GET_CODE (XEXP (t
, 0)) == PLUS
6151 || GET_CODE (XEXP (t
, 0)) == IOR
6152 || GET_CODE (XEXP (t
, 0)) == XOR
)
6153 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6154 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6155 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6156 && (num_sign_bit_copies (f
, GET_MODE (f
))
6158 (GET_MODE_BITSIZE (mode
)
6159 - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6161 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6162 extend_op
= SIGN_EXTEND
;
6163 m
= GET_MODE (XEXP (t
, 0));
6165 else if (GET_CODE (t
) == ZERO_EXTEND
6166 && (GET_CODE (XEXP (t
, 0)) == PLUS
6167 || GET_CODE (XEXP (t
, 0)) == MINUS
6168 || GET_CODE (XEXP (t
, 0)) == IOR
6169 || GET_CODE (XEXP (t
, 0)) == XOR
6170 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6171 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6172 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6173 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6174 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
6175 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6176 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6177 && ((nonzero_bits (f
, GET_MODE (f
))
6178 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6181 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6182 extend_op
= ZERO_EXTEND
;
6183 m
= GET_MODE (XEXP (t
, 0));
6185 else if (GET_CODE (t
) == ZERO_EXTEND
6186 && (GET_CODE (XEXP (t
, 0)) == PLUS
6187 || GET_CODE (XEXP (t
, 0)) == IOR
6188 || GET_CODE (XEXP (t
, 0)) == XOR
)
6189 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6190 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
6191 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6192 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6193 && ((nonzero_bits (f
, GET_MODE (f
))
6194 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6197 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6198 extend_op
= ZERO_EXTEND
;
6199 m
= GET_MODE (XEXP (t
, 0));
6204 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6205 cond_op0
, cond_op1
),
6206 pc_rtx
, pc_rtx
, 0, 0);
6207 temp
= simplify_gen_binary (MULT
, m
, temp
,
6208 simplify_gen_binary (MULT
, m
, c1
,
6210 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0);
6211 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6213 if (extend_op
!= UNKNOWN
)
6214 temp
= simplify_gen_unary (extend_op
, mode
, temp
, m
);
6220 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6221 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6222 negation of a single bit, we can convert this operation to a shift. We
6223 can actually do this more generally, but it doesn't seem worth it. */
6225 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6226 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6227 && ((1 == nonzero_bits (XEXP (cond
, 0), mode
)
6228 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6229 || ((num_sign_bit_copies (XEXP (cond
, 0), mode
)
6230 == GET_MODE_BITSIZE (mode
))
6231 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6233 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6234 gen_lowpart (mode
, XEXP (cond
, 0)), i
);
6236 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6237 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6238 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6239 && GET_MODE (XEXP (cond
, 0)) == mode
6240 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6241 == nonzero_bits (XEXP (cond
, 0), mode
)
6242 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6243 return XEXP (cond
, 0);
6248 /* Simplify X, a SET expression. Return the new expression. */
6251 simplify_set (rtx x
)
6253 rtx src
= SET_SRC (x
);
6254 rtx dest
= SET_DEST (x
);
6255 enum machine_mode mode
6256 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6260 /* (set (pc) (return)) gets written as (return). */
6261 if (GET_CODE (dest
) == PC
&& GET_CODE (src
) == RETURN
)
6264 /* Now that we know for sure which bits of SRC we are using, see if we can
6265 simplify the expression for the object knowing that we only need the
6268 if (GET_MODE_CLASS (mode
) == MODE_INT
6269 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
6271 src
= force_to_mode (src
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
6272 SUBST (SET_SRC (x
), src
);
6275 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6276 the comparison result and try to simplify it unless we already have used
6277 undobuf.other_insn. */
6278 if ((GET_MODE_CLASS (mode
) == MODE_CC
6279 || GET_CODE (src
) == COMPARE
6281 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6282 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6283 && COMPARISON_P (*cc_use
)
6284 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6286 enum rtx_code old_code
= GET_CODE (*cc_use
);
6287 enum rtx_code new_code
;
6289 int other_changed
= 0;
6290 enum machine_mode compare_mode
= GET_MODE (dest
);
6292 if (GET_CODE (src
) == COMPARE
)
6293 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6295 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6297 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6300 new_code
= old_code
;
6301 else if (!CONSTANT_P (tmp
))
6303 new_code
= GET_CODE (tmp
);
6304 op0
= XEXP (tmp
, 0);
6305 op1
= XEXP (tmp
, 1);
6309 rtx pat
= PATTERN (other_insn
);
6310 undobuf
.other_insn
= other_insn
;
6311 SUBST (*cc_use
, tmp
);
6313 /* Attempt to simplify CC user. */
6314 if (GET_CODE (pat
) == SET
)
6316 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6317 if (new_rtx
!= NULL_RTX
)
6318 SUBST (SET_SRC (pat
), new_rtx
);
6321 /* Convert X into a no-op move. */
6322 SUBST (SET_DEST (x
), pc_rtx
);
6323 SUBST (SET_SRC (x
), pc_rtx
);
6327 /* Simplify our comparison, if possible. */
6328 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6330 #ifdef SELECT_CC_MODE
6331 /* If this machine has CC modes other than CCmode, check to see if we
6332 need to use a different CC mode here. */
6333 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6334 compare_mode
= GET_MODE (op0
);
6336 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6339 /* If the mode changed, we have to change SET_DEST, the mode in the
6340 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6341 a hard register, just build new versions with the proper mode. If it
6342 is a pseudo, we lose unless it is only time we set the pseudo, in
6343 which case we can safely change its mode. */
6344 if (compare_mode
!= GET_MODE (dest
))
6346 if (can_change_dest_mode (dest
, 0, compare_mode
))
6348 unsigned int regno
= REGNO (dest
);
6351 if (regno
< FIRST_PSEUDO_REGISTER
)
6352 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6355 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6356 new_dest
= regno_reg_rtx
[regno
];
6359 SUBST (SET_DEST (x
), new_dest
);
6360 SUBST (XEXP (*cc_use
, 0), new_dest
);
6367 #endif /* SELECT_CC_MODE */
6369 /* If the code changed, we have to build a new comparison in
6370 undobuf.other_insn. */
6371 if (new_code
!= old_code
)
6373 int other_changed_previously
= other_changed
;
6374 unsigned HOST_WIDE_INT mask
;
6375 rtx old_cc_use
= *cc_use
;
6377 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6381 /* If the only change we made was to change an EQ into an NE or
6382 vice versa, OP0 has only one bit that might be nonzero, and OP1
6383 is zero, check if changing the user of the condition code will
6384 produce a valid insn. If it won't, we can keep the original code
6385 in that insn by surrounding our operation with an XOR. */
6387 if (((old_code
== NE
&& new_code
== EQ
)
6388 || (old_code
== EQ
&& new_code
== NE
))
6389 && ! other_changed_previously
&& op1
== const0_rtx
6390 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
6391 && exact_log2 (mask
= nonzero_bits (op0
, GET_MODE (op0
))) >= 0)
6393 rtx pat
= PATTERN (other_insn
), note
= 0;
6395 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6396 && ! check_asm_operands (pat
)))
6398 *cc_use
= old_cc_use
;
6401 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
),
6402 op0
, GEN_INT (mask
));
6408 undobuf
.other_insn
= other_insn
;
6410 /* Otherwise, if we didn't previously have a COMPARE in the
6411 correct mode, we need one. */
6412 if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
)
6414 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6417 else if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6419 SUBST (SET_SRC (x
), op0
);
6422 /* Otherwise, update the COMPARE if needed. */
6423 else if (XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6425 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6431 /* Get SET_SRC in a form where we have placed back any
6432 compound expressions. Then do the checks below. */
6433 src
= make_compound_operation (src
, SET
);
6434 SUBST (SET_SRC (x
), src
);
6437 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6438 and X being a REG or (subreg (reg)), we may be able to convert this to
6439 (set (subreg:m2 x) (op)).
6441 We can always do this if M1 is narrower than M2 because that means that
6442 we only care about the low bits of the result.
6444 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6445 perform a narrower operation than requested since the high-order bits will
6446 be undefined. On machine where it is defined, this transformation is safe
6447 as long as M1 and M2 have the same number of words. */
6449 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6450 && !OBJECT_P (SUBREG_REG (src
))
6451 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6453 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6454 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6455 #ifndef WORD_REGISTER_OPERATIONS
6456 && (GET_MODE_SIZE (GET_MODE (src
))
6457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))
6459 #ifdef CANNOT_CHANGE_MODE_CLASS
6460 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6461 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6462 GET_MODE (SUBREG_REG (src
)),
6466 || (GET_CODE (dest
) == SUBREG
6467 && REG_P (SUBREG_REG (dest
)))))
6469 SUBST (SET_DEST (x
),
6470 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6472 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6474 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6478 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6481 && GET_CODE (src
) == SUBREG
6482 && subreg_lowpart_p (src
)
6483 && (GET_MODE_BITSIZE (GET_MODE (src
))
6484 < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src
)))))
6486 rtx inner
= SUBREG_REG (src
);
6487 enum machine_mode inner_mode
= GET_MODE (inner
);
6489 /* Here we make sure that we don't have a sign bit on. */
6490 if (GET_MODE_BITSIZE (inner_mode
) <= HOST_BITS_PER_WIDE_INT
6491 && (nonzero_bits (inner
, inner_mode
)
6492 < ((unsigned HOST_WIDE_INT
) 1
6493 << (GET_MODE_BITSIZE (GET_MODE (src
)) - 1))))
6495 SUBST (SET_SRC (x
), inner
);
6501 #ifdef LOAD_EXTEND_OP
6502 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6503 would require a paradoxical subreg. Replace the subreg with a
6504 zero_extend to avoid the reload that would otherwise be required. */
6506 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6507 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src
)))
6508 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))) != UNKNOWN
6509 && SUBREG_BYTE (src
) == 0
6510 && (GET_MODE_SIZE (GET_MODE (src
))
6511 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))
6512 && MEM_P (SUBREG_REG (src
)))
6515 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))),
6516 GET_MODE (src
), SUBREG_REG (src
)));
6522 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6523 are comparing an item known to be 0 or -1 against 0, use a logical
6524 operation instead. Check for one of the arms being an IOR of the other
6525 arm with some value. We compute three terms to be IOR'ed together. In
6526 practice, at most two will be nonzero. Then we do the IOR's. */
6528 if (GET_CODE (dest
) != PC
6529 && GET_CODE (src
) == IF_THEN_ELSE
6530 && GET_MODE_CLASS (GET_MODE (src
)) == MODE_INT
6531 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6532 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6533 && GET_MODE (src
) == GET_MODE (XEXP (XEXP (src
, 0), 0))
6534 #ifdef HAVE_conditional_move
6535 && ! can_conditionally_move_p (GET_MODE (src
))
6537 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0),
6538 GET_MODE (XEXP (XEXP (src
, 0), 0)))
6539 == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src
, 0), 0))))
6540 && ! side_effects_p (src
))
6542 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6543 ? XEXP (src
, 1) : XEXP (src
, 2));
6544 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6545 ? XEXP (src
, 2) : XEXP (src
, 1));
6546 rtx term1
= const0_rtx
, term2
, term3
;
6548 if (GET_CODE (true_rtx
) == IOR
6549 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6550 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6551 else if (GET_CODE (true_rtx
) == IOR
6552 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6553 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6554 else if (GET_CODE (false_rtx
) == IOR
6555 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6556 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6557 else if (GET_CODE (false_rtx
) == IOR
6558 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6559 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6561 term2
= simplify_gen_binary (AND
, GET_MODE (src
),
6562 XEXP (XEXP (src
, 0), 0), true_rtx
);
6563 term3
= simplify_gen_binary (AND
, GET_MODE (src
),
6564 simplify_gen_unary (NOT
, GET_MODE (src
),
6565 XEXP (XEXP (src
, 0), 0),
6570 simplify_gen_binary (IOR
, GET_MODE (src
),
6571 simplify_gen_binary (IOR
, GET_MODE (src
),
6578 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6579 whole thing fail. */
6580 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6582 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6585 /* Convert this into a field assignment operation, if possible. */
6586 return make_field_assignment (x
);
6589 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6593 simplify_logical (rtx x
)
6595 enum machine_mode mode
= GET_MODE (x
);
6596 rtx op0
= XEXP (x
, 0);
6597 rtx op1
= XEXP (x
, 1);
6599 switch (GET_CODE (x
))
6602 /* We can call simplify_and_const_int only if we don't lose
6603 any (sign) bits when converting INTVAL (op1) to
6604 "unsigned HOST_WIDE_INT". */
6605 if (CONST_INT_P (op1
)
6606 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
6607 || INTVAL (op1
) > 0))
6609 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6610 if (GET_CODE (x
) != AND
)
6617 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6618 apply the distributive law and then the inverse distributive
6619 law to see if things simplify. */
6620 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
6622 rtx result
= distribute_and_simplify_rtx (x
, 0);
6626 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
6628 rtx result
= distribute_and_simplify_rtx (x
, 1);
6635 /* If we have (ior (and A B) C), apply the distributive law and then
6636 the inverse distributive law to see if things simplify. */
6638 if (GET_CODE (op0
) == AND
)
6640 rtx result
= distribute_and_simplify_rtx (x
, 0);
6645 if (GET_CODE (op1
) == AND
)
6647 rtx result
= distribute_and_simplify_rtx (x
, 1);
6660 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6661 operations" because they can be replaced with two more basic operations.
6662 ZERO_EXTEND is also considered "compound" because it can be replaced with
6663 an AND operation, which is simpler, though only one operation.
6665 The function expand_compound_operation is called with an rtx expression
6666 and will convert it to the appropriate shifts and AND operations,
6667 simplifying at each stage.
6669 The function make_compound_operation is called to convert an expression
6670 consisting of shifts and ANDs into the equivalent compound expression.
6671 It is the inverse of this function, loosely speaking. */
6674 expand_compound_operation (rtx x
)
6676 unsigned HOST_WIDE_INT pos
= 0, len
;
6678 unsigned int modewidth
;
6681 switch (GET_CODE (x
))
6686 /* We can't necessarily use a const_int for a multiword mode;
6687 it depends on implicitly extending the value.
6688 Since we don't know the right way to extend it,
6689 we can't tell whether the implicit way is right.
6691 Even for a mode that is no wider than a const_int,
6692 we can't win, because we need to sign extend one of its bits through
6693 the rest of it, and we don't know which bit. */
6694 if (CONST_INT_P (XEXP (x
, 0)))
6697 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6698 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6699 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6700 reloaded. If not for that, MEM's would very rarely be safe.
6702 Reject MODEs bigger than a word, because we might not be able
6703 to reference a two-register group starting with an arbitrary register
6704 (and currently gen_lowpart might crash for a SUBREG). */
6706 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) > UNITS_PER_WORD
)
6709 /* Reject MODEs that aren't scalar integers because turning vector
6710 or complex modes into shifts causes problems. */
6712 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
6715 len
= GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)));
6716 /* If the inner object has VOIDmode (the only way this can happen
6717 is if it is an ASM_OPERANDS), we can't do anything since we don't
6718 know how much masking to do. */
6727 /* ... fall through ... */
6730 /* If the operand is a CLOBBER, just return it. */
6731 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
6734 if (!CONST_INT_P (XEXP (x
, 1))
6735 || !CONST_INT_P (XEXP (x
, 2))
6736 || GET_MODE (XEXP (x
, 0)) == VOIDmode
)
6739 /* Reject MODEs that aren't scalar integers because turning vector
6740 or complex modes into shifts causes problems. */
6742 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
6745 len
= INTVAL (XEXP (x
, 1));
6746 pos
= INTVAL (XEXP (x
, 2));
6748 /* This should stay within the object being extracted, fail otherwise. */
6749 if (len
+ pos
> GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0))))
6752 if (BITS_BIG_ENDIAN
)
6753 pos
= GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0))) - len
- pos
;
6760 /* Convert sign extension to zero extension, if we know that the high
6761 bit is not set, as this is easier to optimize. It will be converted
6762 back to cheaper alternative in make_extraction. */
6763 if (GET_CODE (x
) == SIGN_EXTEND
6764 && (GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
6765 && ((nonzero_bits (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
6766 & ~(((unsigned HOST_WIDE_INT
)
6767 GET_MODE_MASK (GET_MODE (XEXP (x
, 0))))
6771 rtx temp
= gen_rtx_ZERO_EXTEND (GET_MODE (x
), XEXP (x
, 0));
6772 rtx temp2
= expand_compound_operation (temp
);
6774 /* Make sure this is a profitable operation. */
6775 if (rtx_cost (x
, SET
, optimize_this_for_speed_p
)
6776 > rtx_cost (temp2
, SET
, optimize_this_for_speed_p
))
6778 else if (rtx_cost (x
, SET
, optimize_this_for_speed_p
)
6779 > rtx_cost (temp
, SET
, optimize_this_for_speed_p
))
6785 /* We can optimize some special cases of ZERO_EXTEND. */
6786 if (GET_CODE (x
) == ZERO_EXTEND
)
6788 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6789 know that the last value didn't have any inappropriate bits
6791 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
6792 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
6793 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
6794 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
6795 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
6796 return XEXP (XEXP (x
, 0), 0);
6798 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6799 if (GET_CODE (XEXP (x
, 0)) == SUBREG
6800 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
6801 && subreg_lowpart_p (XEXP (x
, 0))
6802 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
6803 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
6804 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
6805 return SUBREG_REG (XEXP (x
, 0));
6807 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6808 is a comparison and STORE_FLAG_VALUE permits. This is like
6809 the first case, but it works even when GET_MODE (x) is larger
6810 than HOST_WIDE_INT. */
6811 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
6812 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
6813 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
6814 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
6815 <= HOST_BITS_PER_WIDE_INT
)
6816 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
6817 return XEXP (XEXP (x
, 0), 0);
6819 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6820 if (GET_CODE (XEXP (x
, 0)) == SUBREG
6821 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
6822 && subreg_lowpart_p (XEXP (x
, 0))
6823 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
6824 && (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
6825 <= HOST_BITS_PER_WIDE_INT
)
6826 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
6827 return SUBREG_REG (XEXP (x
, 0));
6831 /* If we reach here, we want to return a pair of shifts. The inner
6832 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6833 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6834 logical depending on the value of UNSIGNEDP.
6836 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6837 converted into an AND of a shift.
6839 We must check for the case where the left shift would have a negative
6840 count. This can happen in a case like (x >> 31) & 255 on machines
6841 that can't shift by a constant. On those machines, we would first
6842 combine the shift with the AND to produce a variable-position
6843 extraction. Then the constant of 31 would be substituted in
6844 to produce such a position. */
6846 modewidth
= GET_MODE_BITSIZE (GET_MODE (x
));
6847 if (modewidth
>= pos
+ len
)
6849 enum machine_mode mode
= GET_MODE (x
);
6850 tem
= gen_lowpart (mode
, XEXP (x
, 0));
6851 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
6853 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6854 tem
, modewidth
- pos
- len
);
6855 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
6856 mode
, tem
, modewidth
- len
);
6858 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
6859 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
6860 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
6863 ((unsigned HOST_WIDE_INT
) 1 << len
) - 1);
6865 /* Any other cases we can't handle. */
6868 /* If we couldn't do this for some reason, return the original
6870 if (GET_CODE (tem
) == CLOBBER
)
6876 /* X is a SET which contains an assignment of one object into
6877 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6878 or certain SUBREGS). If possible, convert it into a series of
6881 We half-heartedly support variable positions, but do not at all
6882 support variable lengths. */
6885 expand_field_assignment (const_rtx x
)
6888 rtx pos
; /* Always counts from low bit. */
6890 rtx mask
, cleared
, masked
;
6891 enum machine_mode compute_mode
;
6893 /* Loop until we find something we can't simplify. */
6896 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
6897 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
6899 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
6900 len
= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x
), 0)));
6901 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
6903 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
6904 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
6906 inner
= XEXP (SET_DEST (x
), 0);
6907 len
= INTVAL (XEXP (SET_DEST (x
), 1));
6908 pos
= XEXP (SET_DEST (x
), 2);
6910 /* A constant position should stay within the width of INNER. */
6911 if (CONST_INT_P (pos
)
6912 && INTVAL (pos
) + len
> GET_MODE_BITSIZE (GET_MODE (inner
)))
6915 if (BITS_BIG_ENDIAN
)
6917 if (CONST_INT_P (pos
))
6918 pos
= GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner
)) - len
6920 else if (GET_CODE (pos
) == MINUS
6921 && CONST_INT_P (XEXP (pos
, 1))
6922 && (INTVAL (XEXP (pos
, 1))
6923 == GET_MODE_BITSIZE (GET_MODE (inner
)) - len
))
6924 /* If position is ADJUST - X, new position is X. */
6925 pos
= XEXP (pos
, 0);
6927 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
6928 GEN_INT (GET_MODE_BITSIZE (
6935 /* A SUBREG between two modes that occupy the same numbers of words
6936 can be done by moving the SUBREG to the source. */
6937 else if (GET_CODE (SET_DEST (x
)) == SUBREG
6938 /* We need SUBREGs to compute nonzero_bits properly. */
6939 && nonzero_sign_valid
6940 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
6941 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
6942 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
6943 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
6945 x
= gen_rtx_SET (VOIDmode
, SUBREG_REG (SET_DEST (x
)),
6947 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
6954 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
6955 inner
= SUBREG_REG (inner
);
6957 compute_mode
= GET_MODE (inner
);
6959 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
6960 if (! SCALAR_INT_MODE_P (compute_mode
))
6962 enum machine_mode imode
;
6964 /* Don't do anything for vector or complex integral types. */
6965 if (! FLOAT_MODE_P (compute_mode
))
6968 /* Try to find an integral mode to pun with. */
6969 imode
= mode_for_size (GET_MODE_BITSIZE (compute_mode
), MODE_INT
, 0);
6970 if (imode
== BLKmode
)
6973 compute_mode
= imode
;
6974 inner
= gen_lowpart (imode
, inner
);
6977 /* Compute a mask of LEN bits, if we can do this on the host machine. */
6978 if (len
>= HOST_BITS_PER_WIDE_INT
)
6981 /* Now compute the equivalent expression. Make a copy of INNER
6982 for the SET_DEST in case it is a MEM into which we will substitute;
6983 we don't want shared RTL in that case. */
6984 mask
= GEN_INT (((unsigned HOST_WIDE_INT
) 1 << len
) - 1);
6985 cleared
= simplify_gen_binary (AND
, compute_mode
,
6986 simplify_gen_unary (NOT
, compute_mode
,
6987 simplify_gen_binary (ASHIFT
,
6992 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
6993 simplify_gen_binary (
6995 gen_lowpart (compute_mode
, SET_SRC (x
)),
6999 x
= gen_rtx_SET (VOIDmode
, copy_rtx (inner
),
7000 simplify_gen_binary (IOR
, compute_mode
,
7007 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7008 it is an RTX that represents a variable starting position; otherwise,
7009 POS is the (constant) starting bit position (counted from the LSB).
7011 UNSIGNEDP is nonzero for an unsigned reference and zero for a
7014 IN_DEST is nonzero if this is a reference in the destination of a
7015 SET. This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7016 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7019 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7020 ZERO_EXTRACT should be built even for bits starting at bit 0.
7022 MODE is the desired mode of the result (if IN_DEST == 0).
7024 The result is an RTX for the extraction or NULL_RTX if the target
7028 make_extraction (enum machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7029 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7030 int in_dest
, int in_compare
)
7032 /* This mode describes the size of the storage area
7033 to fetch the overall value from. Within that, we
7034 ignore the POS lowest bits, etc. */
7035 enum machine_mode is_mode
= GET_MODE (inner
);
7036 enum machine_mode inner_mode
;
7037 enum machine_mode wanted_inner_mode
;
7038 enum machine_mode wanted_inner_reg_mode
= word_mode
;
7039 enum machine_mode pos_mode
= word_mode
;
7040 enum machine_mode extraction_mode
= word_mode
;
7041 enum machine_mode tmode
= mode_for_size (len
, MODE_INT
, 1);
7043 rtx orig_pos_rtx
= pos_rtx
;
7044 HOST_WIDE_INT orig_pos
;
7046 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7048 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7049 consider just the QI as the memory to extract from.
7050 The subreg adds or removes high bits; its mode is
7051 irrelevant to the meaning of this extraction,
7052 since POS and LEN count from the lsb. */
7053 if (MEM_P (SUBREG_REG (inner
)))
7054 is_mode
= GET_MODE (SUBREG_REG (inner
));
7055 inner
= SUBREG_REG (inner
);
7057 else if (GET_CODE (inner
) == ASHIFT
7058 && CONST_INT_P (XEXP (inner
, 1))
7059 && pos_rtx
== 0 && pos
== 0
7060 && len
> UINTVAL (XEXP (inner
, 1)))
7062 /* We're extracting the least significant bits of an rtx
7063 (ashift X (const_int C)), where LEN > C. Extract the
7064 least significant (LEN - C) bits of X, giving an rtx
7065 whose mode is MODE, then shift it left C times. */
7066 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7067 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7068 unsignedp
, in_dest
, in_compare
);
7070 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7073 inner_mode
= GET_MODE (inner
);
7075 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7076 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7078 /* See if this can be done without an extraction. We never can if the
7079 width of the field is not the same as that of some integer mode. For
7080 registers, we can only avoid the extraction if the position is at the
7081 low-order bit and this is either not in the destination or we have the
7082 appropriate STRICT_LOW_PART operation available.
7084 For MEM, we can avoid an extract if the field starts on an appropriate
7085 boundary and we can change the mode of the memory reference. */
7087 if (tmode
!= BLKmode
7088 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7090 && (inner_mode
== tmode
7092 || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode
),
7093 GET_MODE_BITSIZE (inner_mode
))
7094 || reg_truncated_to_mode (tmode
, inner
))
7097 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7098 || (MEM_P (inner
) && pos_rtx
== 0
7100 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7101 : BITS_PER_UNIT
)) == 0
7102 /* We can't do this if we are widening INNER_MODE (it
7103 may not be aligned, for one thing). */
7104 && GET_MODE_BITSIZE (inner_mode
) >= GET_MODE_BITSIZE (tmode
)
7105 && (inner_mode
== tmode
7106 || (! mode_dependent_address_p (XEXP (inner
, 0))
7107 && ! MEM_VOLATILE_P (inner
))))))
7109 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7110 field. If the original and current mode are the same, we need not
7111 adjust the offset. Otherwise, we do if bytes big endian.
7113 If INNER is not a MEM, get a piece consisting of just the field
7114 of interest (in this case POS % BITS_PER_WORD must be 0). */
7118 HOST_WIDE_INT offset
;
7120 /* POS counts from lsb, but make OFFSET count in memory order. */
7121 if (BYTES_BIG_ENDIAN
)
7122 offset
= (GET_MODE_BITSIZE (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7124 offset
= pos
/ BITS_PER_UNIT
;
7126 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7128 else if (REG_P (inner
))
7130 if (tmode
!= inner_mode
)
7132 /* We can't call gen_lowpart in a DEST since we
7133 always want a SUBREG (see below) and it would sometimes
7134 return a new hard register. */
7137 HOST_WIDE_INT final_word
= pos
/ BITS_PER_WORD
;
7139 if (WORDS_BIG_ENDIAN
7140 && GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7141 final_word
= ((GET_MODE_SIZE (inner_mode
)
7142 - GET_MODE_SIZE (tmode
))
7143 / UNITS_PER_WORD
) - final_word
;
7145 final_word
*= UNITS_PER_WORD
;
7146 if (BYTES_BIG_ENDIAN
&&
7147 GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (tmode
))
7148 final_word
+= (GET_MODE_SIZE (inner_mode
)
7149 - GET_MODE_SIZE (tmode
)) % UNITS_PER_WORD
;
7151 /* Avoid creating invalid subregs, for example when
7152 simplifying (x>>32)&255. */
7153 if (!validate_subreg (tmode
, inner_mode
, inner
, final_word
))
7156 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, final_word
);
7159 new_rtx
= gen_lowpart (tmode
, inner
);
7165 new_rtx
= force_to_mode (inner
, tmode
,
7166 len
>= HOST_BITS_PER_WIDE_INT
7167 ? ~(unsigned HOST_WIDE_INT
) 0
7168 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
7171 /* If this extraction is going into the destination of a SET,
7172 make a STRICT_LOW_PART unless we made a MEM. */
7175 return (MEM_P (new_rtx
) ? new_rtx
7176 : (GET_CODE (new_rtx
) != SUBREG
7177 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7178 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7183 if (CONST_INT_P (new_rtx
)
7184 || GET_CODE (new_rtx
) == CONST_DOUBLE
)
7185 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7186 mode
, new_rtx
, tmode
);
7188 /* If we know that no extraneous bits are set, and that the high
7189 bit is not set, convert the extraction to the cheaper of
7190 sign and zero extension, that are equivalent in these cases. */
7191 if (flag_expensive_optimizations
7192 && (GET_MODE_BITSIZE (tmode
) <= HOST_BITS_PER_WIDE_INT
7193 && ((nonzero_bits (new_rtx
, tmode
)
7194 & ~(((unsigned HOST_WIDE_INT
)
7195 GET_MODE_MASK (tmode
))
7199 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7200 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7202 /* Prefer ZERO_EXTENSION, since it gives more information to
7204 if (rtx_cost (temp
, SET
, optimize_this_for_speed_p
)
7205 <= rtx_cost (temp1
, SET
, optimize_this_for_speed_p
))
7210 /* Otherwise, sign- or zero-extend unless we already are in the
7213 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7217 /* Unless this is a COMPARE or we have a funny memory reference,
7218 don't do anything with zero-extending field extracts starting at
7219 the low-order bit since they are simple AND operations. */
7220 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7221 && ! in_compare
&& unsignedp
)
7224 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7225 if the position is not a constant and the length is not 1. In all
7226 other cases, we would only be going outside our object in cases when
7227 an original shift would have been undefined. */
7229 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_BITSIZE (is_mode
))
7230 || (pos_rtx
!= 0 && len
!= 1)))
7233 /* Get the mode to use should INNER not be a MEM, the mode for the position,
7234 and the mode for the result. */
7235 if (in_dest
&& mode_for_extraction (EP_insv
, -1) != MAX_MACHINE_MODE
)
7237 wanted_inner_reg_mode
= mode_for_extraction (EP_insv
, 0);
7238 pos_mode
= mode_for_extraction (EP_insv
, 2);
7239 extraction_mode
= mode_for_extraction (EP_insv
, 3);
7242 if (! in_dest
&& unsignedp
7243 && mode_for_extraction (EP_extzv
, -1) != MAX_MACHINE_MODE
)
7245 wanted_inner_reg_mode
= mode_for_extraction (EP_extzv
, 1);
7246 pos_mode
= mode_for_extraction (EP_extzv
, 3);
7247 extraction_mode
= mode_for_extraction (EP_extzv
, 0);
7250 if (! in_dest
&& ! unsignedp
7251 && mode_for_extraction (EP_extv
, -1) != MAX_MACHINE_MODE
)
7253 wanted_inner_reg_mode
= mode_for_extraction (EP_extv
, 1);
7254 pos_mode
= mode_for_extraction (EP_extv
, 3);
7255 extraction_mode
= mode_for_extraction (EP_extv
, 0);
7258 /* Never narrow an object, since that might not be safe. */
7260 if (mode
!= VOIDmode
7261 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7262 extraction_mode
= mode
;
7264 if (pos_rtx
&& GET_MODE (pos_rtx
) != VOIDmode
7265 && GET_MODE_SIZE (pos_mode
) < GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7266 pos_mode
= GET_MODE (pos_rtx
);
7268 /* If this is not from memory, the desired mode is the preferred mode
7269 for an extraction pattern's first input operand, or word_mode if there
7272 wanted_inner_mode
= wanted_inner_reg_mode
;
7275 /* Be careful not to go beyond the extracted object and maintain the
7276 natural alignment of the memory. */
7277 wanted_inner_mode
= smallest_mode_for_size (len
, MODE_INT
);
7278 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7279 > GET_MODE_BITSIZE (wanted_inner_mode
))
7281 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
);
7282 gcc_assert (wanted_inner_mode
!= VOIDmode
);
7285 /* If we have to change the mode of memory and cannot, the desired mode
7286 is EXTRACTION_MODE. */
7287 if (inner_mode
!= wanted_inner_mode
7288 && (mode_dependent_address_p (XEXP (inner
, 0))
7289 || MEM_VOLATILE_P (inner
)
7291 wanted_inner_mode
= extraction_mode
;
7296 if (BITS_BIG_ENDIAN
)
7298 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7299 BITS_BIG_ENDIAN style. If position is constant, compute new
7300 position. Otherwise, build subtraction.
7301 Note that POS is relative to the mode of the original argument.
7302 If it's a MEM we need to recompute POS relative to that.
7303 However, if we're extracting from (or inserting into) a register,
7304 we want to recompute POS relative to wanted_inner_mode. */
7305 int width
= (MEM_P (inner
)
7306 ? GET_MODE_BITSIZE (is_mode
)
7307 : GET_MODE_BITSIZE (wanted_inner_mode
));
7310 pos
= width
- len
- pos
;
7313 = gen_rtx_MINUS (GET_MODE (pos_rtx
), GEN_INT (width
- len
), pos_rtx
);
7314 /* POS may be less than 0 now, but we check for that below.
7315 Note that it can only be less than 0 if !MEM_P (inner). */
7318 /* If INNER has a wider mode, and this is a constant extraction, try to
7319 make it smaller and adjust the byte to point to the byte containing
7321 if (wanted_inner_mode
!= VOIDmode
7322 && inner_mode
!= wanted_inner_mode
7324 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7326 && ! mode_dependent_address_p (XEXP (inner
, 0))
7327 && ! MEM_VOLATILE_P (inner
))
7331 /* The computations below will be correct if the machine is big
7332 endian in both bits and bytes or little endian in bits and bytes.
7333 If it is mixed, we must adjust. */
7335 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7336 adjust OFFSET to compensate. */
7337 if (BYTES_BIG_ENDIAN
7338 && GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (is_mode
))
7339 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7341 /* We can now move to the desired byte. */
7342 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7343 * GET_MODE_SIZE (wanted_inner_mode
);
7344 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7346 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7347 && is_mode
!= wanted_inner_mode
)
7348 offset
= (GET_MODE_SIZE (is_mode
)
7349 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7351 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7354 /* If INNER is not memory, get it into the proper mode. If we are changing
7355 its mode, POS must be a constant and smaller than the size of the new
7357 else if (!MEM_P (inner
))
7359 /* On the LHS, don't create paradoxical subregs implicitely truncating
7360 the register unless TRULY_NOOP_TRUNCATION. */
7362 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner
)),
7363 GET_MODE_BITSIZE (wanted_inner_mode
)))
7366 if (GET_MODE (inner
) != wanted_inner_mode
7368 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7374 inner
= force_to_mode (inner
, wanted_inner_mode
,
7376 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7377 ? ~(unsigned HOST_WIDE_INT
) 0
7378 : ((((unsigned HOST_WIDE_INT
) 1 << len
) - 1)
7383 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7384 have to zero extend. Otherwise, we can just use a SUBREG. */
7386 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7388 rtx temp
= gen_rtx_ZERO_EXTEND (pos_mode
, pos_rtx
);
7390 /* If we know that no extraneous bits are set, and that the high
7391 bit is not set, convert extraction to cheaper one - either
7392 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7394 if (flag_expensive_optimizations
7395 && (GET_MODE_BITSIZE (GET_MODE (pos_rtx
)) <= HOST_BITS_PER_WIDE_INT
7396 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7397 & ~(((unsigned HOST_WIDE_INT
)
7398 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7402 rtx temp1
= gen_rtx_SIGN_EXTEND (pos_mode
, pos_rtx
);
7404 /* Prefer ZERO_EXTENSION, since it gives more information to
7406 if (rtx_cost (temp1
, SET
, optimize_this_for_speed_p
)
7407 < rtx_cost (temp
, SET
, optimize_this_for_speed_p
))
7412 else if (pos_rtx
!= 0
7413 && GET_MODE_SIZE (pos_mode
) < GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7414 pos_rtx
= gen_lowpart (pos_mode
, pos_rtx
);
7416 /* Make POS_RTX unless we already have it and it is correct. If we don't
7417 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7419 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7420 pos_rtx
= orig_pos_rtx
;
7422 else if (pos_rtx
== 0)
7423 pos_rtx
= GEN_INT (pos
);
7425 /* Make the required operation. See if we can use existing rtx. */
7426 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7427 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7429 new_rtx
= gen_lowpart (mode
, new_rtx
);
7434 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7435 with any other operations in X. Return X without that shift if so. */
7438 extract_left_shift (rtx x
, int count
)
7440 enum rtx_code code
= GET_CODE (x
);
7441 enum machine_mode mode
= GET_MODE (x
);
7447 /* This is the shift itself. If it is wide enough, we will return
7448 either the value being shifted if the shift count is equal to
7449 COUNT or a shift for the difference. */
7450 if (CONST_INT_P (XEXP (x
, 1))
7451 && INTVAL (XEXP (x
, 1)) >= count
)
7452 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7453 INTVAL (XEXP (x
, 1)) - count
);
7457 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7458 return simplify_gen_unary (code
, mode
, tem
, mode
);
7462 case PLUS
: case IOR
: case XOR
: case AND
:
7463 /* If we can safely shift this constant and we find the inner shift,
7464 make a new operation. */
7465 if (CONST_INT_P (XEXP (x
, 1))
7466 && (UINTVAL (XEXP (x
, 1))
7467 & ((((unsigned HOST_WIDE_INT
) 1 << count
)) - 1)) == 0
7468 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7469 return simplify_gen_binary (code
, mode
, tem
,
7470 GEN_INT (INTVAL (XEXP (x
, 1)) >> count
));
7481 /* Look at the expression rooted at X. Look for expressions
7482 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7483 Form these expressions.
7485 Return the new rtx, usually just X.
7487 Also, for machines like the VAX that don't have logical shift insns,
7488 try to convert logical to arithmetic shift operations in cases where
7489 they are equivalent. This undoes the canonicalizations to logical
7490 shifts done elsewhere.
7492 We try, as much as possible, to re-use rtl expressions to save memory.
7494 IN_CODE says what kind of expression we are processing. Normally, it is
7495 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7496 being kludges), it is MEM. When processing the arguments of a comparison
7497 or a COMPARE against zero, it is COMPARE. */
7500 make_compound_operation (rtx x
, enum rtx_code in_code
)
7502 enum rtx_code code
= GET_CODE (x
);
7503 enum machine_mode mode
= GET_MODE (x
);
7504 int mode_width
= GET_MODE_BITSIZE (mode
);
7506 enum rtx_code next_code
;
7512 /* Select the code to be used in recursive calls. Once we are inside an
7513 address, we stay there. If we have a comparison, set to COMPARE,
7514 but once inside, go back to our default of SET. */
7516 next_code
= (code
== MEM
? MEM
7517 : ((code
== PLUS
|| code
== MINUS
)
7518 && SCALAR_INT_MODE_P (mode
)) ? MEM
7519 : ((code
== COMPARE
|| COMPARISON_P (x
))
7520 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
7521 : in_code
== COMPARE
? SET
: in_code
);
7523 /* Process depending on the code of this operation. If NEW is set
7524 nonzero, it will be returned. */
7529 /* Convert shifts by constants into multiplications if inside
7531 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7532 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7533 && INTVAL (XEXP (x
, 1)) >= 0
7534 && SCALAR_INT_MODE_P (mode
))
7536 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7537 HOST_WIDE_INT multval
= (HOST_WIDE_INT
) 1 << count
;
7539 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7540 if (GET_CODE (new_rtx
) == NEG
)
7542 new_rtx
= XEXP (new_rtx
, 0);
7545 multval
= trunc_int_for_mode (multval
, mode
);
7546 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, GEN_INT (multval
));
7553 lhs
= make_compound_operation (lhs
, next_code
);
7554 rhs
= make_compound_operation (rhs
, next_code
);
7555 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
7556 && SCALAR_INT_MODE_P (mode
))
7558 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7560 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7562 else if (GET_CODE (lhs
) == MULT
7563 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7565 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7566 simplify_gen_unary (NEG
, mode
,
7569 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7573 SUBST (XEXP (x
, 0), lhs
);
7574 SUBST (XEXP (x
, 1), rhs
);
7577 x
= gen_lowpart (mode
, new_rtx
);
7583 lhs
= make_compound_operation (lhs
, next_code
);
7584 rhs
= make_compound_operation (rhs
, next_code
);
7585 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
7586 && SCALAR_INT_MODE_P (mode
))
7588 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7590 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7592 else if (GET_CODE (rhs
) == MULT
7593 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7595 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7596 simplify_gen_unary (NEG
, mode
,
7599 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7603 SUBST (XEXP (x
, 0), lhs
);
7604 SUBST (XEXP (x
, 1), rhs
);
7607 return gen_lowpart (mode
, new_rtx
);
7610 /* If the second operand is not a constant, we can't do anything
7612 if (!CONST_INT_P (XEXP (x
, 1)))
7615 /* If the constant is a power of two minus one and the first operand
7616 is a logical right shift, make an extraction. */
7617 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7618 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7620 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7621 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7622 0, in_code
== COMPARE
);
7625 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7626 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7627 && subreg_lowpart_p (XEXP (x
, 0))
7628 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7629 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7631 new_rtx
= make_compound_operation (XEXP (SUBREG_REG (XEXP (x
, 0)), 0),
7633 new_rtx
= make_extraction (GET_MODE (SUBREG_REG (XEXP (x
, 0))), new_rtx
, 0,
7634 XEXP (SUBREG_REG (XEXP (x
, 0)), 1), i
, 1,
7635 0, in_code
== COMPARE
);
7637 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7638 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7639 || GET_CODE (XEXP (x
, 0)) == IOR
)
7640 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7641 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7642 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7644 /* Apply the distributive law, and then try to make extractions. */
7645 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7646 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7648 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7650 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7653 /* If we are have (and (rotate X C) M) and C is larger than the number
7654 of bits in M, this is an extraction. */
7656 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
7657 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7658 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
7659 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
7661 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7662 new_rtx
= make_extraction (mode
, new_rtx
,
7663 (GET_MODE_BITSIZE (mode
)
7664 - INTVAL (XEXP (XEXP (x
, 0), 1))),
7665 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7668 /* On machines without logical shifts, if the operand of the AND is
7669 a logical shift and our mask turns off all the propagated sign
7670 bits, we can replace the logical shift with an arithmetic shift. */
7671 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7672 && !have_insn_for (LSHIFTRT
, mode
)
7673 && have_insn_for (ASHIFTRT
, mode
)
7674 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7675 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
7676 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
7677 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
7679 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
7681 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
7682 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
7684 gen_rtx_ASHIFTRT (mode
,
7685 make_compound_operation
7686 (XEXP (XEXP (x
, 0), 0), next_code
),
7687 XEXP (XEXP (x
, 0), 1)));
7690 /* If the constant is one less than a power of two, this might be
7691 representable by an extraction even if no shift is present.
7692 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7693 we are in a COMPARE. */
7694 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7695 new_rtx
= make_extraction (mode
,
7696 make_compound_operation (XEXP (x
, 0),
7698 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7700 /* If we are in a comparison and this is an AND with a power of two,
7701 convert this into the appropriate bit extract. */
7702 else if (in_code
== COMPARE
7703 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
7704 new_rtx
= make_extraction (mode
,
7705 make_compound_operation (XEXP (x
, 0),
7707 i
, NULL_RTX
, 1, 1, 0, 1);
7712 /* If the sign bit is known to be zero, replace this with an
7713 arithmetic shift. */
7714 if (have_insn_for (ASHIFTRT
, mode
)
7715 && ! have_insn_for (LSHIFTRT
, mode
)
7716 && mode_width
<= HOST_BITS_PER_WIDE_INT
7717 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
7719 new_rtx
= gen_rtx_ASHIFTRT (mode
,
7720 make_compound_operation (XEXP (x
, 0),
7726 /* ... fall through ... */
7732 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7733 this is a SIGN_EXTRACT. */
7734 if (CONST_INT_P (rhs
)
7735 && GET_CODE (lhs
) == ASHIFT
7736 && CONST_INT_P (XEXP (lhs
, 1))
7737 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
7738 && INTVAL (XEXP (lhs
, 1)) >= 0
7739 && INTVAL (rhs
) < mode_width
)
7741 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
7742 new_rtx
= make_extraction (mode
, new_rtx
,
7743 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
7744 NULL_RTX
, mode_width
- INTVAL (rhs
),
7745 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
7749 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7750 If so, try to merge the shifts into a SIGN_EXTEND. We could
7751 also do this for some cases of SIGN_EXTRACT, but it doesn't
7752 seem worth the effort; the case checked for occurs on Alpha. */
7755 && ! (GET_CODE (lhs
) == SUBREG
7756 && (OBJECT_P (SUBREG_REG (lhs
))))
7757 && CONST_INT_P (rhs
)
7758 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
7759 && INTVAL (rhs
) < mode_width
7760 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
7761 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
7762 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
7763 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
7768 /* Call ourselves recursively on the inner expression. If we are
7769 narrowing the object and it has a different RTL code from
7770 what it originally did, do this SUBREG as a force_to_mode. */
7772 rtx inner
= SUBREG_REG (x
), simplified
;
7774 tem
= make_compound_operation (inner
, in_code
);
7777 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
7781 if (GET_CODE (tem
) != GET_CODE (inner
)
7782 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
7783 && subreg_lowpart_p (x
))
7786 = force_to_mode (tem
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
7788 /* If we have something other than a SUBREG, we might have
7789 done an expansion, so rerun ourselves. */
7790 if (GET_CODE (newer
) != SUBREG
)
7791 newer
= make_compound_operation (newer
, in_code
);
7793 /* force_to_mode can expand compounds. If it just re-expanded the
7794 compound, use gen_lowpart to convert to the desired mode. */
7795 if (rtx_equal_p (newer
, x
)
7796 /* Likewise if it re-expanded the compound only partially.
7797 This happens for SUBREG of ZERO_EXTRACT if they extract
7798 the same number of bits. */
7799 || (GET_CODE (newer
) == SUBREG
7800 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
7801 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
7802 && GET_CODE (inner
) == AND
7803 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
7804 return gen_lowpart (GET_MODE (x
), tem
);
7820 x
= gen_lowpart (mode
, new_rtx
);
7821 code
= GET_CODE (x
);
7824 /* Now recursively process each operand of this operation. */
7825 fmt
= GET_RTX_FORMAT (code
);
7826 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
7829 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
7830 SUBST (XEXP (x
, i
), new_rtx
);
7832 else if (fmt
[i
] == 'E')
7833 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
7835 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
7836 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
7840 /* If this is a commutative operation, the changes to the operands
7841 may have made it noncanonical. */
7842 if (COMMUTATIVE_ARITH_P (x
)
7843 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
7846 SUBST (XEXP (x
, 0), XEXP (x
, 1));
7847 SUBST (XEXP (x
, 1), tem
);
7853 /* Given M see if it is a value that would select a field of bits
7854 within an item, but not the entire word. Return -1 if not.
7855 Otherwise, return the starting position of the field, where 0 is the
7858 *PLEN is set to the length of the field. */
7861 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
7863 /* Get the bit number of the first 1 bit from the right, -1 if none. */
7864 int pos
= m
? ctz_hwi (m
) : -1;
7868 /* Now shift off the low-order zero bits and see if we have a
7869 power of two minus 1. */
7870 len
= exact_log2 ((m
>> pos
) + 1);
7879 /* If X refers to a register that equals REG in value, replace these
7880 references with REG. */
7882 canon_reg_for_combine (rtx x
, rtx reg
)
7889 enum rtx_code code
= GET_CODE (x
);
7890 switch (GET_RTX_CLASS (code
))
7893 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
7894 if (op0
!= XEXP (x
, 0))
7895 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
7900 case RTX_COMM_ARITH
:
7901 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
7902 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
7903 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
7904 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
7908 case RTX_COMM_COMPARE
:
7909 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
7910 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
7911 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
7912 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
7913 GET_MODE (op0
), op0
, op1
);
7917 case RTX_BITFIELD_OPS
:
7918 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
7919 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
7920 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
7921 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
7922 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
7923 GET_MODE (op0
), op0
, op1
, op2
);
7928 if (rtx_equal_p (get_last_value (reg
), x
)
7929 || rtx_equal_p (reg
, get_last_value (x
)))
7938 fmt
= GET_RTX_FORMAT (code
);
7940 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
7943 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
7944 if (op
!= XEXP (x
, i
))
7954 else if (fmt
[i
] == 'E')
7957 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
7959 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
7960 if (op
!= XVECEXP (x
, i
, j
))
7967 XVECEXP (x
, i
, j
) = op
;
7978 /* Return X converted to MODE. If the value is already truncated to
7979 MODE we can just return a subreg even though in the general case we
7980 would need an explicit truncation. */
7983 gen_lowpart_or_truncate (enum machine_mode mode
, rtx x
)
7985 if (!CONST_INT_P (x
)
7986 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
7987 && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
7988 GET_MODE_BITSIZE (GET_MODE (x
)))
7989 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
7991 /* Bit-cast X into an integer mode. */
7992 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
7993 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)), x
);
7994 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
),
7998 return gen_lowpart (mode
, x
);
8001 /* See if X can be simplified knowing that we will only refer to it in
8002 MODE and will only refer to those bits that are nonzero in MASK.
8003 If other bits are being computed or if masking operations are done
8004 that select a superset of the bits in MASK, they can sometimes be
8007 Return a possibly simplified expression, but always convert X to
8008 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8010 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8011 are all off in X. This is used when X will be complemented, by either
8012 NOT, NEG, or XOR. */
8015 force_to_mode (rtx x
, enum machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8018 enum rtx_code code
= GET_CODE (x
);
8019 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8020 enum machine_mode op_mode
;
8021 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8024 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8025 code below will do the wrong thing since the mode of such an
8026 expression is VOIDmode.
8028 Also do nothing if X is a CLOBBER; this can happen if X was
8029 the return value from a call to gen_lowpart. */
8030 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8033 /* We want to perform the operation is its present mode unless we know
8034 that the operation is valid in MODE, in which case we do the operation
8036 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8037 && have_insn_for (code
, mode
))
8038 ? mode
: GET_MODE (x
));
8040 /* It is not valid to do a right-shift in a narrower mode
8041 than the one it came in with. */
8042 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8043 && GET_MODE_BITSIZE (mode
) < GET_MODE_BITSIZE (GET_MODE (x
)))
8044 op_mode
= GET_MODE (x
);
8046 /* Truncate MASK to fit OP_MODE. */
8048 mask
&= GET_MODE_MASK (op_mode
);
8050 /* When we have an arithmetic operation, or a shift whose count we
8051 do not know, we need to assume that all bits up to the highest-order
8052 bit in MASK will be needed. This is how we form such a mask. */
8053 if (mask
& ((unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)))
8054 fuller_mask
= ~(unsigned HOST_WIDE_INT
) 0;
8056 fuller_mask
= (((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mask
) + 1))
8059 /* Determine what bits of X are guaranteed to be (non)zero. */
8060 nonzero
= nonzero_bits (x
, mode
);
8062 /* If none of the bits in X are needed, return a zero. */
8063 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8066 /* If X is a CONST_INT, return a new one. Do this here since the
8067 test below will fail. */
8068 if (CONST_INT_P (x
))
8070 if (SCALAR_INT_MODE_P (mode
))
8071 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8074 x
= GEN_INT (INTVAL (x
) & mask
);
8075 return gen_lowpart_common (mode
, x
);
8079 /* If X is narrower than MODE and we want all the bits in X's mode, just
8080 get X in the proper mode. */
8081 if (GET_MODE_SIZE (GET_MODE (x
)) < GET_MODE_SIZE (mode
)
8082 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8083 return gen_lowpart (mode
, x
);
8085 /* We can ignore the effect of a SUBREG if it narrows the mode or
8086 if the constant masks to zero all the bits the mode doesn't have. */
8087 if (GET_CODE (x
) == SUBREG
8088 && subreg_lowpart_p (x
)
8089 && ((GET_MODE_SIZE (GET_MODE (x
))
8090 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8092 & GET_MODE_MASK (GET_MODE (x
))
8093 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8094 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8096 /* The arithmetic simplifications here only work for scalar integer modes. */
8097 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8098 return gen_lowpart_or_truncate (mode
, x
);
8103 /* If X is a (clobber (const_int)), return it since we know we are
8104 generating something that won't match. */
8111 x
= expand_compound_operation (x
);
8112 if (GET_CODE (x
) != code
)
8113 return force_to_mode (x
, mode
, mask
, next_select
);
8117 /* Similarly for a truncate. */
8118 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8121 /* If this is an AND with a constant, convert it into an AND
8122 whose constant is the AND of that constant with MASK. If it
8123 remains an AND of MASK, delete it since it is redundant. */
8125 if (CONST_INT_P (XEXP (x
, 1)))
8127 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8128 mask
& INTVAL (XEXP (x
, 1)));
8130 /* If X is still an AND, see if it is an AND with a mask that
8131 is just some low-order bits. If so, and it is MASK, we don't
8134 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8135 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8139 /* If it remains an AND, try making another AND with the bits
8140 in the mode mask that aren't in MASK turned on. If the
8141 constant in the AND is wide enough, this might make a
8142 cheaper constant. */
8144 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8145 && GET_MODE_MASK (GET_MODE (x
)) != mask
8146 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
)
8148 unsigned HOST_WIDE_INT cval
8149 = UINTVAL (XEXP (x
, 1))
8150 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8151 int width
= GET_MODE_BITSIZE (GET_MODE (x
));
8154 /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8155 number, sign extend it. */
8156 if (width
> 0 && width
< HOST_BITS_PER_WIDE_INT
8157 && (cval
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1))) != 0)
8158 cval
|= (unsigned HOST_WIDE_INT
) -1 << width
;
8160 y
= simplify_gen_binary (AND
, GET_MODE (x
),
8161 XEXP (x
, 0), GEN_INT (cval
));
8162 if (rtx_cost (y
, SET
, optimize_this_for_speed_p
)
8163 < rtx_cost (x
, SET
, optimize_this_for_speed_p
))
8173 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8174 low-order bits (as in an alignment operation) and FOO is already
8175 aligned to that boundary, mask C1 to that boundary as well.
8176 This may eliminate that PLUS and, later, the AND. */
8179 unsigned int width
= GET_MODE_BITSIZE (mode
);
8180 unsigned HOST_WIDE_INT smask
= mask
;
8182 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8183 number, sign extend it. */
8185 if (width
< HOST_BITS_PER_WIDE_INT
8186 && (smask
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1))) != 0)
8187 smask
|= (unsigned HOST_WIDE_INT
) (-1) << width
;
8189 if (CONST_INT_P (XEXP (x
, 1))
8190 && exact_log2 (- smask
) >= 0
8191 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8192 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8193 return force_to_mode (plus_constant (XEXP (x
, 0),
8194 (INTVAL (XEXP (x
, 1)) & smask
)),
8195 mode
, smask
, next_select
);
8198 /* ... fall through ... */
8201 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8202 most significant bit in MASK since carries from those bits will
8203 affect the bits we are interested in. */
8208 /* If X is (minus C Y) where C's least set bit is larger than any bit
8209 in the mask, then we may replace with (neg Y). */
8210 if (CONST_INT_P (XEXP (x
, 0))
8211 && (((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 0))
8212 & -INTVAL (XEXP (x
, 0))))
8215 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8217 return force_to_mode (x
, mode
, mask
, next_select
);
8220 /* Similarly, if C contains every bit in the fuller_mask, then we may
8221 replace with (not Y). */
8222 if (CONST_INT_P (XEXP (x
, 0))
8223 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8225 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8226 XEXP (x
, 1), GET_MODE (x
));
8227 return force_to_mode (x
, mode
, mask
, next_select
);
8235 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8236 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8237 operation which may be a bitfield extraction. Ensure that the
8238 constant we form is not wider than the mode of X. */
8240 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8241 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8242 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8243 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8244 && CONST_INT_P (XEXP (x
, 1))
8245 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8246 + floor_log2 (INTVAL (XEXP (x
, 1))))
8247 < GET_MODE_BITSIZE (GET_MODE (x
)))
8248 && (UINTVAL (XEXP (x
, 1))
8249 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8251 temp
= GEN_INT ((INTVAL (XEXP (x
, 1)) & mask
)
8252 << INTVAL (XEXP (XEXP (x
, 0), 1)));
8253 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8254 XEXP (XEXP (x
, 0), 0), temp
);
8255 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8256 XEXP (XEXP (x
, 0), 1));
8257 return force_to_mode (x
, mode
, mask
, next_select
);
8261 /* For most binary operations, just propagate into the operation and
8262 change the mode if we have an operation of that mode. */
8264 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8265 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8267 /* If we ended up truncating both operands, truncate the result of the
8268 operation instead. */
8269 if (GET_CODE (op0
) == TRUNCATE
8270 && GET_CODE (op1
) == TRUNCATE
)
8272 op0
= XEXP (op0
, 0);
8273 op1
= XEXP (op1
, 0);
8276 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8277 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8279 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8280 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8284 /* For left shifts, do the same, but just for the first operand.
8285 However, we cannot do anything with shifts where we cannot
8286 guarantee that the counts are smaller than the size of the mode
8287 because such a count will have a different meaning in a
8290 if (! (CONST_INT_P (XEXP (x
, 1))
8291 && INTVAL (XEXP (x
, 1)) >= 0
8292 && INTVAL (XEXP (x
, 1)) < GET_MODE_BITSIZE (mode
))
8293 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8294 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8295 < (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
))))
8298 /* If the shift count is a constant and we can do arithmetic in
8299 the mode of the shift, refine which bits we need. Otherwise, use the
8300 conservative form of the mask. */
8301 if (CONST_INT_P (XEXP (x
, 1))
8302 && INTVAL (XEXP (x
, 1)) >= 0
8303 && INTVAL (XEXP (x
, 1)) < GET_MODE_BITSIZE (op_mode
)
8304 && GET_MODE_BITSIZE (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
8305 mask
>>= INTVAL (XEXP (x
, 1));
8309 op0
= gen_lowpart_or_truncate (op_mode
,
8310 force_to_mode (XEXP (x
, 0), op_mode
,
8311 mask
, next_select
));
8313 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8314 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8318 /* Here we can only do something if the shift count is a constant,
8319 this shift constant is valid for the host, and we can do arithmetic
8322 if (CONST_INT_P (XEXP (x
, 1))
8323 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8324 && GET_MODE_BITSIZE (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
8326 rtx inner
= XEXP (x
, 0);
8327 unsigned HOST_WIDE_INT inner_mask
;
8329 /* Select the mask of the bits we need for the shift operand. */
8330 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8332 /* We can only change the mode of the shift if we can do arithmetic
8333 in the mode of the shift and INNER_MASK is no wider than the
8334 width of X's mode. */
8335 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8336 op_mode
= GET_MODE (x
);
8338 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8340 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8341 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8344 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8345 shift and AND produces only copies of the sign bit (C2 is one less
8346 than a power of two), we can do this with just a shift. */
8348 if (GET_CODE (x
) == LSHIFTRT
8349 && CONST_INT_P (XEXP (x
, 1))
8350 /* The shift puts one of the sign bit copies in the least significant
8352 && ((INTVAL (XEXP (x
, 1))
8353 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8354 >= GET_MODE_BITSIZE (GET_MODE (x
)))
8355 && exact_log2 (mask
+ 1) >= 0
8356 /* Number of bits left after the shift must be more than the mask
8358 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8359 <= GET_MODE_BITSIZE (GET_MODE (x
)))
8360 /* Must be more sign bit copies than the mask needs. */
8361 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8362 >= exact_log2 (mask
+ 1)))
8363 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8364 GEN_INT (GET_MODE_BITSIZE (GET_MODE (x
))
8365 - exact_log2 (mask
+ 1)));
8370 /* If we are just looking for the sign bit, we don't need this shift at
8371 all, even if it has a variable count. */
8372 if (GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
8373 && (mask
== ((unsigned HOST_WIDE_INT
) 1
8374 << (GET_MODE_BITSIZE (GET_MODE (x
)) - 1))))
8375 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8377 /* If this is a shift by a constant, get a mask that contains those bits
8378 that are not copies of the sign bit. We then have two cases: If
8379 MASK only includes those bits, this can be a logical shift, which may
8380 allow simplifications. If MASK is a single-bit field not within
8381 those bits, we are requesting a copy of the sign bit and hence can
8382 shift the sign bit to the appropriate location. */
8384 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8385 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8389 /* If the considered data is wider than HOST_WIDE_INT, we can't
8390 represent a mask for all its bits in a single scalar.
8391 But we only care about the lower bits, so calculate these. */
8393 if (GET_MODE_BITSIZE (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8395 nonzero
= ~(unsigned HOST_WIDE_INT
) 0;
8397 /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8398 is the number of bits a full-width mask would have set.
8399 We need only shift if these are fewer than nonzero can
8400 hold. If not, we must keep all bits set in nonzero. */
8402 if (GET_MODE_BITSIZE (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8403 < HOST_BITS_PER_WIDE_INT
)
8404 nonzero
>>= INTVAL (XEXP (x
, 1))
8405 + HOST_BITS_PER_WIDE_INT
8406 - GET_MODE_BITSIZE (GET_MODE (x
)) ;
8410 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8411 nonzero
>>= INTVAL (XEXP (x
, 1));
8414 if ((mask
& ~nonzero
) == 0)
8416 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8417 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8418 if (GET_CODE (x
) != ASHIFTRT
)
8419 return force_to_mode (x
, mode
, mask
, next_select
);
8422 else if ((i
= exact_log2 (mask
)) >= 0)
8424 x
= simplify_shift_const
8425 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8426 GET_MODE_BITSIZE (GET_MODE (x
)) - 1 - i
);
8428 if (GET_CODE (x
) != ASHIFTRT
)
8429 return force_to_mode (x
, mode
, mask
, next_select
);
8433 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8434 even if the shift count isn't a constant. */
8436 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8437 XEXP (x
, 0), XEXP (x
, 1));
8441 /* If this is a zero- or sign-extension operation that just affects bits
8442 we don't care about, remove it. Be sure the call above returned
8443 something that is still a shift. */
8445 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8446 && CONST_INT_P (XEXP (x
, 1))
8447 && INTVAL (XEXP (x
, 1)) >= 0
8448 && (INTVAL (XEXP (x
, 1))
8449 <= GET_MODE_BITSIZE (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8450 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8451 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8452 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8459 /* If the shift count is constant and we can do computations
8460 in the mode of X, compute where the bits we care about are.
8461 Otherwise, we can't do anything. Don't change the mode of
8462 the shift or propagate MODE into the shift, though. */
8463 if (CONST_INT_P (XEXP (x
, 1))
8464 && INTVAL (XEXP (x
, 1)) >= 0)
8466 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8467 GET_MODE (x
), GEN_INT (mask
),
8469 if (temp
&& CONST_INT_P (temp
))
8471 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8472 INTVAL (temp
), next_select
));
8477 /* If we just want the low-order bit, the NEG isn't needed since it
8478 won't change the low-order bit. */
8480 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8482 /* We need any bits less significant than the most significant bit in
8483 MASK since carries from those bits will affect the bits we are
8489 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8490 same as the XOR case above. Ensure that the constant we form is not
8491 wider than the mode of X. */
8493 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8494 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8495 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8496 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8497 < GET_MODE_BITSIZE (GET_MODE (x
)))
8498 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8500 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8502 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8503 XEXP (XEXP (x
, 0), 0), temp
);
8504 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8505 temp
, XEXP (XEXP (x
, 0), 1));
8507 return force_to_mode (x
, mode
, mask
, next_select
);
8510 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8511 use the full mask inside the NOT. */
8515 op0
= gen_lowpart_or_truncate (op_mode
,
8516 force_to_mode (XEXP (x
, 0), mode
, mask
,
8518 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8519 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8523 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8524 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8525 which is equal to STORE_FLAG_VALUE. */
8526 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8527 && XEXP (x
, 1) == const0_rtx
8528 && GET_MODE (XEXP (x
, 0)) == mode
8529 && exact_log2 (nonzero_bits (XEXP (x
, 0), mode
)) >= 0
8530 && (nonzero_bits (XEXP (x
, 0), mode
)
8531 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8532 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8537 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8538 written in a narrower mode. We play it safe and do not do so. */
8541 gen_lowpart_or_truncate (GET_MODE (x
),
8542 force_to_mode (XEXP (x
, 1), mode
,
8543 mask
, next_select
)));
8545 gen_lowpart_or_truncate (GET_MODE (x
),
8546 force_to_mode (XEXP (x
, 2), mode
,
8547 mask
, next_select
)));
8554 /* Ensure we return a value of the proper mode. */
8555 return gen_lowpart_or_truncate (mode
, x
);
8558 /* Return nonzero if X is an expression that has one of two values depending on
8559 whether some other value is zero or nonzero. In that case, we return the
8560 value that is being tested, *PTRUE is set to the value if the rtx being
8561 returned has a nonzero value, and *PFALSE is set to the other alternative.
8563 If we return zero, we set *PTRUE and *PFALSE to X. */
8566 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
8568 enum machine_mode mode
= GET_MODE (x
);
8569 enum rtx_code code
= GET_CODE (x
);
8570 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
8571 unsigned HOST_WIDE_INT nz
;
8573 /* If we are comparing a value against zero, we are done. */
8574 if ((code
== NE
|| code
== EQ
)
8575 && XEXP (x
, 1) == const0_rtx
)
8577 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
8578 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
8582 /* If this is a unary operation whose operand has one of two values, apply
8583 our opcode to compute those values. */
8584 else if (UNARY_P (x
)
8585 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
8587 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
8588 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
8589 GET_MODE (XEXP (x
, 0)));
8593 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8594 make can't possibly match and would suppress other optimizations. */
8595 else if (code
== COMPARE
)
8598 /* If this is a binary operation, see if either side has only one of two
8599 values. If either one does or if both do and they are conditional on
8600 the same value, compute the new true and false values. */
8601 else if (BINARY_P (x
))
8603 cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
);
8604 cond1
= if_then_else_cond (XEXP (x
, 1), &true1
, &false1
);
8606 if ((cond0
!= 0 || cond1
!= 0)
8607 && ! (cond0
!= 0 && cond1
!= 0 && ! rtx_equal_p (cond0
, cond1
)))
8609 /* If if_then_else_cond returned zero, then true/false are the
8610 same rtl. We must copy one of them to prevent invalid rtl
8613 true0
= copy_rtx (true0
);
8614 else if (cond1
== 0)
8615 true1
= copy_rtx (true1
);
8617 if (COMPARISON_P (x
))
8619 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
8621 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
8626 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
8627 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
8630 return cond0
? cond0
: cond1
;
8633 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8634 operands is zero when the other is nonzero, and vice-versa,
8635 and STORE_FLAG_VALUE is 1 or -1. */
8637 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8638 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
8640 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8642 rtx op0
= XEXP (XEXP (x
, 0), 1);
8643 rtx op1
= XEXP (XEXP (x
, 1), 1);
8645 cond0
= XEXP (XEXP (x
, 0), 0);
8646 cond1
= XEXP (XEXP (x
, 1), 0);
8648 if (COMPARISON_P (cond0
)
8649 && COMPARISON_P (cond1
)
8650 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8651 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8652 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8653 || ((swap_condition (GET_CODE (cond0
))
8654 == reversed_comparison_code (cond1
, NULL
))
8655 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8656 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8657 && ! side_effects_p (x
))
8659 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
8660 *pfalse
= simplify_gen_binary (MULT
, mode
,
8662 ? simplify_gen_unary (NEG
, mode
,
8670 /* Similarly for MULT, AND and UMIN, except that for these the result
8672 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8673 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
8674 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8676 cond0
= XEXP (XEXP (x
, 0), 0);
8677 cond1
= XEXP (XEXP (x
, 1), 0);
8679 if (COMPARISON_P (cond0
)
8680 && COMPARISON_P (cond1
)
8681 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8682 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8683 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8684 || ((swap_condition (GET_CODE (cond0
))
8685 == reversed_comparison_code (cond1
, NULL
))
8686 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8687 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8688 && ! side_effects_p (x
))
8690 *ptrue
= *pfalse
= const0_rtx
;
8696 else if (code
== IF_THEN_ELSE
)
8698 /* If we have IF_THEN_ELSE already, extract the condition and
8699 canonicalize it if it is NE or EQ. */
8700 cond0
= XEXP (x
, 0);
8701 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
8702 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
8703 return XEXP (cond0
, 0);
8704 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
8706 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
8707 return XEXP (cond0
, 0);
8713 /* If X is a SUBREG, we can narrow both the true and false values
8714 if the inner expression, if there is a condition. */
8715 else if (code
== SUBREG
8716 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
8719 true0
= simplify_gen_subreg (mode
, true0
,
8720 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
8721 false0
= simplify_gen_subreg (mode
, false0
,
8722 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
8723 if (true0
&& false0
)
8731 /* If X is a constant, this isn't special and will cause confusions
8732 if we treat it as such. Likewise if it is equivalent to a constant. */
8733 else if (CONSTANT_P (x
)
8734 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
8737 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8738 will be least confusing to the rest of the compiler. */
8739 else if (mode
== BImode
)
8741 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
8745 /* If X is known to be either 0 or -1, those are the true and
8746 false values when testing X. */
8747 else if (x
== constm1_rtx
|| x
== const0_rtx
8748 || (mode
!= VOIDmode
8749 && num_sign_bit_copies (x
, mode
) == GET_MODE_BITSIZE (mode
)))
8751 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
8755 /* Likewise for 0 or a single bit. */
8756 else if (SCALAR_INT_MODE_P (mode
)
8757 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
8758 && exact_log2 (nz
= nonzero_bits (x
, mode
)) >= 0)
8760 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
8764 /* Otherwise fail; show no condition with true and false values the same. */
8765 *ptrue
= *pfalse
= x
;
8769 /* Return the value of expression X given the fact that condition COND
8770 is known to be true when applied to REG as its first operand and VAL
8771 as its second. X is known to not be shared and so can be modified in
8774 We only handle the simplest cases, and specifically those cases that
8775 arise with IF_THEN_ELSE expressions. */
8778 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
8780 enum rtx_code code
= GET_CODE (x
);
8785 if (side_effects_p (x
))
8788 /* If either operand of the condition is a floating point value,
8789 then we have to avoid collapsing an EQ comparison. */
8791 && rtx_equal_p (x
, reg
)
8792 && ! FLOAT_MODE_P (GET_MODE (x
))
8793 && ! FLOAT_MODE_P (GET_MODE (val
)))
8796 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
8799 /* If X is (abs REG) and we know something about REG's relationship
8800 with zero, we may be able to simplify this. */
8802 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
8805 case GE
: case GT
: case EQ
:
8808 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
8810 GET_MODE (XEXP (x
, 0)));
8815 /* The only other cases we handle are MIN, MAX, and comparisons if the
8816 operands are the same as REG and VAL. */
8818 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
8820 if (rtx_equal_p (XEXP (x
, 0), val
))
8821 cond
= swap_condition (cond
), temp
= val
, val
= reg
, reg
= temp
;
8823 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
8825 if (COMPARISON_P (x
))
8827 if (comparison_dominates_p (cond
, code
))
8828 return const_true_rtx
;
8830 code
= reversed_comparison_code (x
, NULL
);
8832 && comparison_dominates_p (cond
, code
))
8837 else if (code
== SMAX
|| code
== SMIN
8838 || code
== UMIN
|| code
== UMAX
)
8840 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
8842 /* Do not reverse the condition when it is NE or EQ.
8843 This is because we cannot conclude anything about
8844 the value of 'SMAX (x, y)' when x is not equal to y,
8845 but we can when x equals y. */
8846 if ((code
== SMAX
|| code
== UMAX
)
8847 && ! (cond
== EQ
|| cond
== NE
))
8848 cond
= reverse_condition (cond
);
8853 return unsignedp
? x
: XEXP (x
, 1);
8855 return unsignedp
? x
: XEXP (x
, 0);
8857 return unsignedp
? XEXP (x
, 1) : x
;
8859 return unsignedp
? XEXP (x
, 0) : x
;
8866 else if (code
== SUBREG
)
8868 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
8869 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
8871 if (SUBREG_REG (x
) != r
)
8873 /* We must simplify subreg here, before we lose track of the
8874 original inner_mode. */
8875 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
8876 inner_mode
, SUBREG_BYTE (x
));
8880 SUBST (SUBREG_REG (x
), r
);
8885 /* We don't have to handle SIGN_EXTEND here, because even in the
8886 case of replacing something with a modeless CONST_INT, a
8887 CONST_INT is already (supposed to be) a valid sign extension for
8888 its narrower mode, which implies it's already properly
8889 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
8890 story is different. */
8891 else if (code
== ZERO_EXTEND
)
8893 enum machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
8894 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
8896 if (XEXP (x
, 0) != r
)
8898 /* We must simplify the zero_extend here, before we lose
8899 track of the original inner_mode. */
8900 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8905 SUBST (XEXP (x
, 0), r
);
8911 fmt
= GET_RTX_FORMAT (code
);
8912 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8915 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
8916 else if (fmt
[i
] == 'E')
8917 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8918 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
8925 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8926 assignment as a field assignment. */
8929 rtx_equal_for_field_assignment_p (rtx x
, rtx y
)
8931 if (x
== y
|| rtx_equal_p (x
, y
))
8934 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
8937 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8938 Note that all SUBREGs of MEM are paradoxical; otherwise they
8939 would have been rewritten. */
8940 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
8941 && MEM_P (SUBREG_REG (y
))
8942 && rtx_equal_p (SUBREG_REG (y
),
8943 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
8946 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
8947 && MEM_P (SUBREG_REG (x
))
8948 && rtx_equal_p (SUBREG_REG (x
),
8949 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
8952 /* We used to see if get_last_value of X and Y were the same but that's
8953 not correct. In one direction, we'll cause the assignment to have
8954 the wrong destination and in the case, we'll import a register into this
8955 insn that might have already have been dead. So fail if none of the
8956 above cases are true. */
8960 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8961 Return that assignment if so.
8963 We only handle the most common cases. */
8966 make_field_assignment (rtx x
)
8968 rtx dest
= SET_DEST (x
);
8969 rtx src
= SET_SRC (x
);
8974 unsigned HOST_WIDE_INT len
;
8976 enum machine_mode mode
;
8978 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8979 a clear of a one-bit field. We will have changed it to
8980 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
8983 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
8984 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
8985 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
8986 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
8988 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
8991 return gen_rtx_SET (VOIDmode
, assign
, const0_rtx
);
8995 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
8996 && subreg_lowpart_p (XEXP (src
, 0))
8997 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
8998 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
8999 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9000 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9001 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9002 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9004 assign
= make_extraction (VOIDmode
, dest
, 0,
9005 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9008 return gen_rtx_SET (VOIDmode
, assign
, const0_rtx
);
9012 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9014 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9015 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9016 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9018 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9021 return gen_rtx_SET (VOIDmode
, assign
, const1_rtx
);
9025 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9026 SRC is an AND with all bits of that field set, then we can discard
9028 if (GET_CODE (dest
) == ZERO_EXTRACT
9029 && CONST_INT_P (XEXP (dest
, 1))
9030 && GET_CODE (src
) == AND
9031 && CONST_INT_P (XEXP (src
, 1)))
9033 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9034 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9035 unsigned HOST_WIDE_INT ze_mask
;
9037 if (width
>= HOST_BITS_PER_WIDE_INT
)
9040 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9042 /* Complete overlap. We can remove the source AND. */
9043 if ((and_mask
& ze_mask
) == ze_mask
)
9044 return gen_rtx_SET (VOIDmode
, dest
, XEXP (src
, 0));
9046 /* Partial overlap. We can reduce the source AND. */
9047 if ((and_mask
& ze_mask
) != and_mask
)
9049 mode
= GET_MODE (src
);
9050 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9051 gen_int_mode (and_mask
& ze_mask
, mode
));
9052 return gen_rtx_SET (VOIDmode
, dest
, src
);
9056 /* The other case we handle is assignments into a constant-position
9057 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9058 a mask that has all one bits except for a group of zero bits and
9059 OTHER is known to have zeros where C1 has ones, this is such an
9060 assignment. Compute the position and length from C1. Shift OTHER
9061 to the appropriate position, force it to the required mode, and
9062 make the extraction. Check for the AND in both operands. */
9064 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9067 rhs
= expand_compound_operation (XEXP (src
, 0));
9068 lhs
= expand_compound_operation (XEXP (src
, 1));
9070 if (GET_CODE (rhs
) == AND
9071 && CONST_INT_P (XEXP (rhs
, 1))
9072 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9073 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9074 else if (GET_CODE (lhs
) == AND
9075 && CONST_INT_P (XEXP (lhs
, 1))
9076 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9077 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9081 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (GET_MODE (dest
)), &len
);
9082 if (pos
< 0 || pos
+ len
> GET_MODE_BITSIZE (GET_MODE (dest
))
9083 || GET_MODE_BITSIZE (GET_MODE (dest
)) > HOST_BITS_PER_WIDE_INT
9084 || (c1
& nonzero_bits (other
, GET_MODE (dest
))) != 0)
9087 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9091 /* The mode to use for the source is the mode of the assignment, or of
9092 what is inside a possible STRICT_LOW_PART. */
9093 mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9094 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9096 /* Shift OTHER right POS places and make it the source, restricting it
9097 to the proper length and mode. */
9099 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9103 src
= force_to_mode (src
, mode
,
9104 GET_MODE_BITSIZE (mode
) >= HOST_BITS_PER_WIDE_INT
9105 ? ~(unsigned HOST_WIDE_INT
) 0
9106 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
9109 /* If SRC is masked by an AND that does not make a difference in
9110 the value being stored, strip it. */
9111 if (GET_CODE (assign
) == ZERO_EXTRACT
9112 && CONST_INT_P (XEXP (assign
, 1))
9113 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9114 && GET_CODE (src
) == AND
9115 && CONST_INT_P (XEXP (src
, 1))
9116 && UINTVAL (XEXP (src
, 1))
9117 == ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (assign
, 1))) - 1)
9118 src
= XEXP (src
, 0);
9120 return gen_rtx_SET (VOIDmode
, assign
, src
);
9123 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9127 apply_distributive_law (rtx x
)
9129 enum rtx_code code
= GET_CODE (x
);
9130 enum rtx_code inner_code
;
9131 rtx lhs
, rhs
, other
;
9134 /* Distributivity is not true for floating point as it can change the
9135 value. So we don't do it unless -funsafe-math-optimizations. */
9136 if (FLOAT_MODE_P (GET_MODE (x
))
9137 && ! flag_unsafe_math_optimizations
)
9140 /* The outer operation can only be one of the following: */
9141 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9142 && code
!= PLUS
&& code
!= MINUS
)
9148 /* If either operand is a primitive we can't do anything, so get out
9150 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9153 lhs
= expand_compound_operation (lhs
);
9154 rhs
= expand_compound_operation (rhs
);
9155 inner_code
= GET_CODE (lhs
);
9156 if (inner_code
!= GET_CODE (rhs
))
9159 /* See if the inner and outer operations distribute. */
9166 /* These all distribute except over PLUS. */
9167 if (code
== PLUS
|| code
== MINUS
)
9172 if (code
!= PLUS
&& code
!= MINUS
)
9177 /* This is also a multiply, so it distributes over everything. */
9181 /* Non-paradoxical SUBREGs distributes over all operations,
9182 provided the inner modes and byte offsets are the same, this
9183 is an extraction of a low-order part, we don't convert an fp
9184 operation to int or vice versa, this is not a vector mode,
9185 and we would not be converting a single-word operation into a
9186 multi-word operation. The latter test is not required, but
9187 it prevents generating unneeded multi-word operations. Some
9188 of the previous tests are redundant given the latter test,
9189 but are retained because they are required for correctness.
9191 We produce the result slightly differently in this case. */
9193 if (GET_MODE (SUBREG_REG (lhs
)) != GET_MODE (SUBREG_REG (rhs
))
9194 || SUBREG_BYTE (lhs
) != SUBREG_BYTE (rhs
)
9195 || ! subreg_lowpart_p (lhs
)
9196 || (GET_MODE_CLASS (GET_MODE (lhs
))
9197 != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs
))))
9198 || (GET_MODE_SIZE (GET_MODE (lhs
))
9199 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs
))))
9200 || VECTOR_MODE_P (GET_MODE (lhs
))
9201 || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs
))) > UNITS_PER_WORD
9202 /* Result might need to be truncated. Don't change mode if
9203 explicit truncation is needed. */
9204 || !TRULY_NOOP_TRUNCATION
9205 (GET_MODE_BITSIZE (GET_MODE (x
)),
9206 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs
)))))
9209 tem
= simplify_gen_binary (code
, GET_MODE (SUBREG_REG (lhs
)),
9210 SUBREG_REG (lhs
), SUBREG_REG (rhs
));
9211 return gen_lowpart (GET_MODE (x
), tem
);
9217 /* Set LHS and RHS to the inner operands (A and B in the example
9218 above) and set OTHER to the common operand (C in the example).
9219 There is only one way to do this unless the inner operation is
9221 if (COMMUTATIVE_ARITH_P (lhs
)
9222 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9223 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9224 else if (COMMUTATIVE_ARITH_P (lhs
)
9225 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9226 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9227 else if (COMMUTATIVE_ARITH_P (lhs
)
9228 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9229 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9230 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9231 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9235 /* Form the new inner operation, seeing if it simplifies first. */
9236 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9238 /* There is one exception to the general way of distributing:
9239 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9240 if (code
== XOR
&& inner_code
== IOR
)
9243 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9246 /* We may be able to continuing distributing the result, so call
9247 ourselves recursively on the inner operation before forming the
9248 outer operation, which we return. */
9249 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9250 apply_distributive_law (tem
), other
);
9253 /* See if X is of the form (* (+ A B) C), and if so convert to
9254 (+ (* A C) (* B C)) and try to simplify.
9256 Most of the time, this results in no change. However, if some of
9257 the operands are the same or inverses of each other, simplifications
9260 For example, (and (ior A B) (not B)) can occur as the result of
9261 expanding a bit field assignment. When we apply the distributive
9262 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9263 which then simplifies to (and (A (not B))).
9265 Note that no checks happen on the validity of applying the inverse
9266 distributive law. This is pointless since we can do it in the
9267 few places where this routine is called.
9269 N is the index of the term that is decomposed (the arithmetic operation,
9270 i.e. (+ A B) in the first example above). !N is the index of the term that
9271 is distributed, i.e. of C in the first example above. */
9273 distribute_and_simplify_rtx (rtx x
, int n
)
9275 enum machine_mode mode
;
9276 enum rtx_code outer_code
, inner_code
;
9277 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9279 /* Distributivity is not true for floating point as it can change the
9280 value. So we don't do it unless -funsafe-math-optimizations. */
9281 if (FLOAT_MODE_P (GET_MODE (x
))
9282 && ! flag_unsafe_math_optimizations
)
9285 decomposed
= XEXP (x
, n
);
9286 if (!ARITHMETIC_P (decomposed
))
9289 mode
= GET_MODE (x
);
9290 outer_code
= GET_CODE (x
);
9291 distributed
= XEXP (x
, !n
);
9293 inner_code
= GET_CODE (decomposed
);
9294 inner_op0
= XEXP (decomposed
, 0);
9295 inner_op1
= XEXP (decomposed
, 1);
9297 /* Special case (and (xor B C) (not A)), which is equivalent to
9298 (xor (ior A B) (ior A C)) */
9299 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9301 distributed
= XEXP (distributed
, 0);
9307 /* Distribute the second term. */
9308 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9309 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9313 /* Distribute the first term. */
9314 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9315 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9318 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9320 if (GET_CODE (tmp
) != outer_code
9321 && rtx_cost (tmp
, SET
, optimize_this_for_speed_p
)
9322 < rtx_cost (x
, SET
, optimize_this_for_speed_p
))
9328 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9329 in MODE. Return an equivalent form, if different from (and VAROP
9330 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9333 simplify_and_const_int_1 (enum machine_mode mode
, rtx varop
,
9334 unsigned HOST_WIDE_INT constop
)
9336 unsigned HOST_WIDE_INT nonzero
;
9337 unsigned HOST_WIDE_INT orig_constop
;
9342 orig_constop
= constop
;
9343 if (GET_CODE (varop
) == CLOBBER
)
9346 /* Simplify VAROP knowing that we will be only looking at some of the
9349 Note by passing in CONSTOP, we guarantee that the bits not set in
9350 CONSTOP are not significant and will never be examined. We must
9351 ensure that is the case by explicitly masking out those bits
9352 before returning. */
9353 varop
= force_to_mode (varop
, mode
, constop
, 0);
9355 /* If VAROP is a CLOBBER, we will fail so return it. */
9356 if (GET_CODE (varop
) == CLOBBER
)
9359 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9360 to VAROP and return the new constant. */
9361 if (CONST_INT_P (varop
))
9362 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9364 /* See what bits may be nonzero in VAROP. Unlike the general case of
9365 a call to nonzero_bits, here we don't care about bits outside
9368 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9370 /* Turn off all bits in the constant that are known to already be zero.
9371 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9372 which is tested below. */
9376 /* If we don't have any bits left, return zero. */
9380 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9381 a power of two, we can replace this with an ASHIFT. */
9382 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9383 && (i
= exact_log2 (constop
)) >= 0)
9384 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9386 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9387 or XOR, then try to apply the distributive law. This may eliminate
9388 operations if either branch can be simplified because of the AND.
9389 It may also make some cases more complex, but those cases probably
9390 won't match a pattern either with or without this. */
9392 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9396 apply_distributive_law
9397 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9398 simplify_and_const_int (NULL_RTX
,
9402 simplify_and_const_int (NULL_RTX
,
9407 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9408 the AND and see if one of the operands simplifies to zero. If so, we
9409 may eliminate it. */
9411 if (GET_CODE (varop
) == PLUS
9412 && exact_log2 (constop
+ 1) >= 0)
9416 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9417 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9418 if (o0
== const0_rtx
)
9420 if (o1
== const0_rtx
)
9424 /* Make a SUBREG if necessary. If we can't make it, fail. */
9425 varop
= gen_lowpart (mode
, varop
);
9426 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9429 /* If we are only masking insignificant bits, return VAROP. */
9430 if (constop
== nonzero
)
9433 if (varop
== orig_varop
&& constop
== orig_constop
)
9436 /* Otherwise, return an AND. */
9437 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9441 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9444 Return an equivalent form, if different from X. Otherwise, return X. If
9445 X is zero, we are to always construct the equivalent form. */
9448 simplify_and_const_int (rtx x
, enum machine_mode mode
, rtx varop
,
9449 unsigned HOST_WIDE_INT constop
)
9451 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9456 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9457 gen_int_mode (constop
, mode
));
9458 if (GET_MODE (x
) != mode
)
9459 x
= gen_lowpart (mode
, x
);
9463 /* Given a REG, X, compute which bits in X can be nonzero.
9464 We don't care about bits outside of those defined in MODE.
9466 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9467 a shift, AND, or zero_extract, we can do better. */
9470 reg_nonzero_bits_for_combine (const_rtx x
, enum machine_mode mode
,
9471 const_rtx known_x ATTRIBUTE_UNUSED
,
9472 enum machine_mode known_mode ATTRIBUTE_UNUSED
,
9473 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
9474 unsigned HOST_WIDE_INT
*nonzero
)
9479 /* If X is a register whose nonzero bits value is current, use it.
9480 Otherwise, if X is a register whose value we can find, use that
9481 value. Otherwise, use the previously-computed global nonzero bits
9482 for this register. */
9484 rsp
= VEC_index (reg_stat_type
, reg_stat
, REGNO (x
));
9485 if (rsp
->last_set_value
!= 0
9486 && (rsp
->last_set_mode
== mode
9487 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
9488 && GET_MODE_CLASS (mode
) == MODE_INT
))
9489 && ((rsp
->last_set_label
>= label_tick_ebb_start
9490 && rsp
->last_set_label
< label_tick
)
9491 || (rsp
->last_set_label
== label_tick
9492 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9493 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9494 && REG_N_SETS (REGNO (x
)) == 1
9496 (DF_LR_IN (ENTRY_BLOCK_PTR
->next_bb
), REGNO (x
)))))
9498 *nonzero
&= rsp
->last_set_nonzero_bits
;
9502 tem
= get_last_value (x
);
9506 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9507 /* If X is narrower than MODE and TEM is a non-negative
9508 constant that would appear negative in the mode of X,
9509 sign-extend it for use in reg_nonzero_bits because some
9510 machines (maybe most) will actually do the sign-extension
9511 and this is the conservative approach.
9513 ??? For 2.5, try to tighten up the MD files in this regard
9514 instead of this kludge. */
9516 if (GET_MODE_BITSIZE (GET_MODE (x
)) < GET_MODE_BITSIZE (mode
)
9517 && CONST_INT_P (tem
)
9519 && 0 != (UINTVAL (tem
)
9520 & ((unsigned HOST_WIDE_INT
) 1
9521 << (GET_MODE_BITSIZE (GET_MODE (x
)) - 1))))
9522 tem
= GEN_INT (UINTVAL (tem
)
9523 | ((unsigned HOST_WIDE_INT
) (-1)
9524 << GET_MODE_BITSIZE (GET_MODE (x
))));
9528 else if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
9530 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
9532 if (GET_MODE_BITSIZE (GET_MODE (x
)) < GET_MODE_BITSIZE (mode
))
9533 /* We don't know anything about the upper bits. */
9534 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
9541 /* Return the number of bits at the high-order end of X that are known to
9542 be equal to the sign bit. X will be used in mode MODE; if MODE is
9543 VOIDmode, X will be used in its own mode. The returned value will always
9544 be between 1 and the number of bits in MODE. */
9547 reg_num_sign_bit_copies_for_combine (const_rtx x
, enum machine_mode mode
,
9548 const_rtx known_x ATTRIBUTE_UNUSED
,
9549 enum machine_mode known_mode
9551 unsigned int known_ret ATTRIBUTE_UNUSED
,
9552 unsigned int *result
)
9557 rsp
= VEC_index (reg_stat_type
, reg_stat
, REGNO (x
));
9558 if (rsp
->last_set_value
!= 0
9559 && rsp
->last_set_mode
== mode
9560 && ((rsp
->last_set_label
>= label_tick_ebb_start
9561 && rsp
->last_set_label
< label_tick
)
9562 || (rsp
->last_set_label
== label_tick
9563 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9564 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9565 && REG_N_SETS (REGNO (x
)) == 1
9567 (DF_LR_IN (ENTRY_BLOCK_PTR
->next_bb
), REGNO (x
)))))
9569 *result
= rsp
->last_set_sign_bit_copies
;
9573 tem
= get_last_value (x
);
9577 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
9578 && GET_MODE_BITSIZE (GET_MODE (x
)) == GET_MODE_BITSIZE (mode
))
9579 *result
= rsp
->sign_bit_copies
;
9584 /* Return the number of "extended" bits there are in X, when interpreted
9585 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9586 unsigned quantities, this is the number of high-order zero bits.
9587 For signed quantities, this is the number of copies of the sign bit
9588 minus 1. In both case, this function returns the number of "spare"
9589 bits. For example, if two quantities for which this function returns
9590 at least 1 are added, the addition is known not to overflow.
9592 This function will always return 0 unless called during combine, which
9593 implies that it must be called from a define_split. */
9596 extended_count (const_rtx x
, enum machine_mode mode
, int unsignedp
)
9598 if (nonzero_sign_valid
== 0)
9602 ? (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
9603 ? (unsigned int) (GET_MODE_BITSIZE (mode
) - 1
9604 - floor_log2 (nonzero_bits (x
, mode
)))
9606 : num_sign_bit_copies (x
, mode
) - 1);
9609 /* This function is called from `simplify_shift_const' to merge two
9610 outer operations. Specifically, we have already found that we need
9611 to perform operation *POP0 with constant *PCONST0 at the outermost
9612 position. We would now like to also perform OP1 with constant CONST1
9613 (with *POP0 being done last).
9615 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9616 the resulting operation. *PCOMP_P is set to 1 if we would need to
9617 complement the innermost operand, otherwise it is unchanged.
9619 MODE is the mode in which the operation will be done. No bits outside
9620 the width of this mode matter. It is assumed that the width of this mode
9621 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9623 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9624 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9625 result is simply *PCONST0.
9627 If the resulting operation cannot be expressed as one operation, we
9628 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9631 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, enum machine_mode mode
, int *pcomp_p
)
9633 enum rtx_code op0
= *pop0
;
9634 HOST_WIDE_INT const0
= *pconst0
;
9636 const0
&= GET_MODE_MASK (mode
);
9637 const1
&= GET_MODE_MASK (mode
);
9639 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9643 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9646 if (op1
== UNKNOWN
|| op0
== SET
)
9649 else if (op0
== UNKNOWN
)
9650 op0
= op1
, const0
= const1
;
9652 else if (op0
== op1
)
9676 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9677 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
9680 /* If the two constants aren't the same, we can't do anything. The
9681 remaining six cases can all be done. */
9682 else if (const0
!= const1
)
9690 /* (a & b) | b == b */
9692 else /* op1 == XOR */
9693 /* (a ^ b) | b == a | b */
9699 /* (a & b) ^ b == (~a) & b */
9700 op0
= AND
, *pcomp_p
= 1;
9701 else /* op1 == IOR */
9702 /* (a | b) ^ b == a & ~b */
9703 op0
= AND
, const0
= ~const0
;
9708 /* (a | b) & b == b */
9710 else /* op1 == XOR */
9711 /* (a ^ b) & b) == (~a) & b */
9718 /* Check for NO-OP cases. */
9719 const0
&= GET_MODE_MASK (mode
);
9721 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
9723 else if (const0
== 0 && op0
== AND
)
9725 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
9731 /* ??? Slightly redundant with the above mask, but not entirely.
9732 Moving this above means we'd have to sign-extend the mode mask
9733 for the final test. */
9734 if (op0
!= UNKNOWN
&& op0
!= NEG
)
9735 *pconst0
= trunc_int_for_mode (const0
, mode
);
9740 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9741 the shift in. The original shift operation CODE is performed on OP in
9742 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9743 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9744 result of the shift is subject to operation OUTER_CODE with operand
9747 static enum machine_mode
9748 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
9749 enum machine_mode orig_mode
, enum machine_mode mode
,
9750 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
9752 if (orig_mode
== mode
)
9754 gcc_assert (GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (orig_mode
));
9756 /* In general we can't perform in wider mode for right shift and rotate. */
9760 /* We can still widen if the bits brought in from the left are identical
9761 to the sign bit of ORIG_MODE. */
9762 if (num_sign_bit_copies (op
, mode
)
9763 > (unsigned) (GET_MODE_BITSIZE (mode
)
9764 - GET_MODE_BITSIZE (orig_mode
)))
9769 /* Similarly here but with zero bits. */
9770 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
9771 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
9774 /* We can also widen if the bits brought in will be masked off. This
9775 operation is performed in ORIG_MODE. */
9776 if (outer_code
== AND
)
9778 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
9781 && GET_MODE_BITSIZE (orig_mode
) - care_bits
>= count
)
9797 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
9798 The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
9799 simplify it. Otherwise, return a simplified value.
9801 The shift is normally computed in the widest mode we find in VAROP, as
9802 long as it isn't a different number of words than RESULT_MODE. Exceptions
9803 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9806 simplify_shift_const_1 (enum rtx_code code
, enum machine_mode result_mode
,
9807 rtx varop
, int orig_count
)
9809 enum rtx_code orig_code
= code
;
9810 rtx orig_varop
= varop
;
9812 enum machine_mode mode
= result_mode
;
9813 enum machine_mode shift_mode
, tmode
;
9814 unsigned int mode_words
9815 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
9816 /* We form (outer_op (code varop count) (outer_const)). */
9817 enum rtx_code outer_op
= UNKNOWN
;
9818 HOST_WIDE_INT outer_const
= 0;
9819 int complement_p
= 0;
9822 /* Make sure and truncate the "natural" shift on the way in. We don't
9823 want to do this inside the loop as it makes it more difficult to
9825 if (SHIFT_COUNT_TRUNCATED
)
9826 orig_count
&= GET_MODE_BITSIZE (mode
) - 1;
9828 /* If we were given an invalid count, don't do anything except exactly
9829 what was requested. */
9831 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_BITSIZE (mode
))
9836 /* Unless one of the branches of the `if' in this loop does a `continue',
9837 we will `break' the loop after the `if'. */
9841 /* If we have an operand of (clobber (const_int 0)), fail. */
9842 if (GET_CODE (varop
) == CLOBBER
)
9845 /* Convert ROTATERT to ROTATE. */
9846 if (code
== ROTATERT
)
9848 unsigned int bitsize
= GET_MODE_BITSIZE (result_mode
);;
9850 if (VECTOR_MODE_P (result_mode
))
9851 count
= bitsize
/ GET_MODE_NUNITS (result_mode
) - count
;
9853 count
= bitsize
- count
;
9856 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
9857 mode
, outer_op
, outer_const
);
9859 /* Handle cases where the count is greater than the size of the mode
9860 minus 1. For ASHIFT, use the size minus one as the count (this can
9861 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
9862 take the count modulo the size. For other shifts, the result is
9865 Since these shifts are being produced by the compiler by combining
9866 multiple operations, each of which are defined, we know what the
9867 result is supposed to be. */
9869 if (count
> (GET_MODE_BITSIZE (shift_mode
) - 1))
9871 if (code
== ASHIFTRT
)
9872 count
= GET_MODE_BITSIZE (shift_mode
) - 1;
9873 else if (code
== ROTATE
|| code
== ROTATERT
)
9874 count
%= GET_MODE_BITSIZE (shift_mode
);
9877 /* We can't simply return zero because there may be an
9885 /* If we discovered we had to complement VAROP, leave. Making a NOT
9886 here would cause an infinite loop. */
9890 /* An arithmetic right shift of a quantity known to be -1 or 0
9892 if (code
== ASHIFTRT
9893 && (num_sign_bit_copies (varop
, shift_mode
)
9894 == GET_MODE_BITSIZE (shift_mode
)))
9900 /* If we are doing an arithmetic right shift and discarding all but
9901 the sign bit copies, this is equivalent to doing a shift by the
9902 bitsize minus one. Convert it into that shift because it will often
9903 allow other simplifications. */
9905 if (code
== ASHIFTRT
9906 && (count
+ num_sign_bit_copies (varop
, shift_mode
)
9907 >= GET_MODE_BITSIZE (shift_mode
)))
9908 count
= GET_MODE_BITSIZE (shift_mode
) - 1;
9910 /* We simplify the tests below and elsewhere by converting
9911 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9912 `make_compound_operation' will convert it to an ASHIFTRT for
9913 those machines (such as VAX) that don't have an LSHIFTRT. */
9914 if (GET_MODE_BITSIZE (shift_mode
) <= HOST_BITS_PER_WIDE_INT
9916 && ((nonzero_bits (varop
, shift_mode
)
9917 & ((unsigned HOST_WIDE_INT
) 1
9918 << (GET_MODE_BITSIZE (shift_mode
) - 1))) == 0))
9921 if (((code
== LSHIFTRT
9922 && GET_MODE_BITSIZE (shift_mode
) <= HOST_BITS_PER_WIDE_INT
9923 && !(nonzero_bits (varop
, shift_mode
) >> count
))
9925 && GET_MODE_BITSIZE (shift_mode
) <= HOST_BITS_PER_WIDE_INT
9926 && !((nonzero_bits (varop
, shift_mode
) << count
)
9927 & GET_MODE_MASK (shift_mode
))))
9928 && !side_effects_p (varop
))
9931 switch (GET_CODE (varop
))
9937 new_rtx
= expand_compound_operation (varop
);
9938 if (new_rtx
!= varop
)
9946 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9947 minus the width of a smaller mode, we can do this with a
9948 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
9949 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
9950 && ! mode_dependent_address_p (XEXP (varop
, 0))
9951 && ! MEM_VOLATILE_P (varop
)
9952 && (tmode
= mode_for_size (GET_MODE_BITSIZE (mode
) - count
,
9953 MODE_INT
, 1)) != BLKmode
)
9955 new_rtx
= adjust_address_nv (varop
, tmode
,
9956 BYTES_BIG_ENDIAN
? 0
9957 : count
/ BITS_PER_UNIT
);
9959 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
9960 : ZERO_EXTEND
, mode
, new_rtx
);
9967 /* If VAROP is a SUBREG, strip it as long as the inner operand has
9968 the same number of words as what we've seen so far. Then store
9969 the widest mode in MODE. */
9970 if (subreg_lowpart_p (varop
)
9971 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
9972 > GET_MODE_SIZE (GET_MODE (varop
)))
9973 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
9974 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
9976 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
9977 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop
))) == MODE_INT
)
9979 varop
= SUBREG_REG (varop
);
9980 if (GET_MODE_SIZE (GET_MODE (varop
)) > GET_MODE_SIZE (mode
))
9981 mode
= GET_MODE (varop
);
9987 /* Some machines use MULT instead of ASHIFT because MULT
9988 is cheaper. But it is still better on those machines to
9989 merge two shifts into one. */
9990 if (CONST_INT_P (XEXP (varop
, 1))
9991 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
9994 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
9996 GEN_INT (exact_log2 (
9997 UINTVAL (XEXP (varop
, 1)))));
10003 /* Similar, for when divides are cheaper. */
10004 if (CONST_INT_P (XEXP (varop
, 1))
10005 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10008 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10010 GEN_INT (exact_log2 (
10011 UINTVAL (XEXP (varop
, 1)))));
10017 /* If we are extracting just the sign bit of an arithmetic
10018 right shift, that shift is not needed. However, the sign
10019 bit of a wider mode may be different from what would be
10020 interpreted as the sign bit in a narrower mode, so, if
10021 the result is narrower, don't discard the shift. */
10022 if (code
== LSHIFTRT
10023 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10024 && (GET_MODE_BITSIZE (result_mode
)
10025 >= GET_MODE_BITSIZE (GET_MODE (varop
))))
10027 varop
= XEXP (varop
, 0);
10031 /* ... fall through ... */
10036 /* Here we have two nested shifts. The result is usually the
10037 AND of a new shift with a mask. We compute the result below. */
10038 if (CONST_INT_P (XEXP (varop
, 1))
10039 && INTVAL (XEXP (varop
, 1)) >= 0
10040 && INTVAL (XEXP (varop
, 1)) < GET_MODE_BITSIZE (GET_MODE (varop
))
10041 && GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
10042 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
10043 && !VECTOR_MODE_P (result_mode
))
10045 enum rtx_code first_code
= GET_CODE (varop
);
10046 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10047 unsigned HOST_WIDE_INT mask
;
10050 /* We have one common special case. We can't do any merging if
10051 the inner code is an ASHIFTRT of a smaller mode. However, if
10052 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10053 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10054 we can convert it to
10055 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
10056 This simplifies certain SIGN_EXTEND operations. */
10057 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10058 && count
== (GET_MODE_BITSIZE (result_mode
)
10059 - GET_MODE_BITSIZE (GET_MODE (varop
))))
10061 /* C3 has the low-order C1 bits zero. */
10063 mask
= GET_MODE_MASK (mode
)
10064 & ~(((unsigned HOST_WIDE_INT
) 1 << first_count
) - 1);
10066 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10067 XEXP (varop
, 0), mask
);
10068 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10070 count
= first_count
;
10075 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10076 than C1 high-order bits equal to the sign bit, we can convert
10077 this to either an ASHIFT or an ASHIFTRT depending on the
10080 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10082 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10083 && GET_MODE (varop
) == shift_mode
10084 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10087 varop
= XEXP (varop
, 0);
10088 count
-= first_count
;
10098 /* There are some cases we can't do. If CODE is ASHIFTRT,
10099 we can only do this if FIRST_CODE is also ASHIFTRT.
10101 We can't do the case when CODE is ROTATE and FIRST_CODE is
10104 If the mode of this shift is not the mode of the outer shift,
10105 we can't do this if either shift is a right shift or ROTATE.
10107 Finally, we can't do any of these if the mode is too wide
10108 unless the codes are the same.
10110 Handle the case where the shift codes are the same
10113 if (code
== first_code
)
10115 if (GET_MODE (varop
) != result_mode
10116 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10117 || code
== ROTATE
))
10120 count
+= first_count
;
10121 varop
= XEXP (varop
, 0);
10125 if (code
== ASHIFTRT
10126 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10127 || GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
10128 || (GET_MODE (varop
) != result_mode
10129 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10130 || first_code
== ROTATE
10131 || code
== ROTATE
)))
10134 /* To compute the mask to apply after the shift, shift the
10135 nonzero bits of the inner shift the same way the
10136 outer shift will. */
10138 mask_rtx
= GEN_INT (nonzero_bits (varop
, GET_MODE (varop
)));
10141 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10144 /* Give up if we can't compute an outer operation to use. */
10146 || !CONST_INT_P (mask_rtx
)
10147 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10149 result_mode
, &complement_p
))
10152 /* If the shifts are in the same direction, we add the
10153 counts. Otherwise, we subtract them. */
10154 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10155 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10156 count
+= first_count
;
10158 count
-= first_count
;
10160 /* If COUNT is positive, the new shift is usually CODE,
10161 except for the two exceptions below, in which case it is
10162 FIRST_CODE. If the count is negative, FIRST_CODE should
10165 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10166 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10168 else if (count
< 0)
10169 code
= first_code
, count
= -count
;
10171 varop
= XEXP (varop
, 0);
10175 /* If we have (A << B << C) for any shift, we can convert this to
10176 (A << C << B). This wins if A is a constant. Only try this if
10177 B is not a constant. */
10179 else if (GET_CODE (varop
) == code
10180 && CONST_INT_P (XEXP (varop
, 0))
10181 && !CONST_INT_P (XEXP (varop
, 1)))
10183 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10186 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10193 if (VECTOR_MODE_P (mode
))
10196 /* Make this fit the case below. */
10197 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0),
10198 GEN_INT (GET_MODE_MASK (mode
)));
10204 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10205 with C the size of VAROP - 1 and the shift is logical if
10206 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10207 we have an (le X 0) operation. If we have an arithmetic shift
10208 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10209 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10211 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10212 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10213 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10214 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10215 && count
== (GET_MODE_BITSIZE (GET_MODE (varop
)) - 1)
10216 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10219 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10222 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10223 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10228 /* If we have (shift (logical)), move the logical to the outside
10229 to allow it to possibly combine with another logical and the
10230 shift to combine with another shift. This also canonicalizes to
10231 what a ZERO_EXTRACT looks like. Also, some machines have
10232 (and (shift)) insns. */
10234 if (CONST_INT_P (XEXP (varop
, 1))
10235 /* We can't do this if we have (ashiftrt (xor)) and the
10236 constant has its sign bit set in shift_mode. */
10237 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10238 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10240 && (new_rtx
= simplify_const_binary_operation (code
, result_mode
,
10242 GEN_INT (count
))) != 0
10243 && CONST_INT_P (new_rtx
)
10244 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10245 INTVAL (new_rtx
), result_mode
, &complement_p
))
10247 varop
= XEXP (varop
, 0);
10251 /* If we can't do that, try to simplify the shift in each arm of the
10252 logical expression, make a new logical expression, and apply
10253 the inverse distributive law. This also can't be done
10254 for some (ashiftrt (xor)). */
10255 if (CONST_INT_P (XEXP (varop
, 1))
10256 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10257 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10260 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10261 XEXP (varop
, 0), count
);
10262 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10263 XEXP (varop
, 1), count
);
10265 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10267 varop
= apply_distributive_law (varop
);
10275 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10276 says that the sign bit can be tested, FOO has mode MODE, C is
10277 GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
10278 that may be nonzero. */
10279 if (code
== LSHIFTRT
10280 && XEXP (varop
, 1) == const0_rtx
10281 && GET_MODE (XEXP (varop
, 0)) == result_mode
10282 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10283 && GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
10284 && STORE_FLAG_VALUE
== -1
10285 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10286 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10289 varop
= XEXP (varop
, 0);
10296 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10297 than the number of bits in the mode is equivalent to A. */
10298 if (code
== LSHIFTRT
10299 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10300 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10302 varop
= XEXP (varop
, 0);
10307 /* NEG commutes with ASHIFT since it is multiplication. Move the
10308 NEG outside to allow shifts to combine. */
10310 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10313 varop
= XEXP (varop
, 0);
10319 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10320 is one less than the number of bits in the mode is
10321 equivalent to (xor A 1). */
10322 if (code
== LSHIFTRT
10323 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10324 && XEXP (varop
, 1) == constm1_rtx
10325 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10326 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10330 varop
= XEXP (varop
, 0);
10334 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10335 that might be nonzero in BAR are those being shifted out and those
10336 bits are known zero in FOO, we can replace the PLUS with FOO.
10337 Similarly in the other operand order. This code occurs when
10338 we are computing the size of a variable-size array. */
10340 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10341 && count
< HOST_BITS_PER_WIDE_INT
10342 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10343 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10344 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10346 varop
= XEXP (varop
, 0);
10349 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10350 && count
< HOST_BITS_PER_WIDE_INT
10351 && GET_MODE_BITSIZE (result_mode
) <= HOST_BITS_PER_WIDE_INT
10352 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10354 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10355 & nonzero_bits (XEXP (varop
, 1),
10358 varop
= XEXP (varop
, 1);
10362 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10364 && CONST_INT_P (XEXP (varop
, 1))
10365 && (new_rtx
= simplify_const_binary_operation (ASHIFT
, result_mode
,
10367 GEN_INT (count
))) != 0
10368 && CONST_INT_P (new_rtx
)
10369 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10370 INTVAL (new_rtx
), result_mode
, &complement_p
))
10372 varop
= XEXP (varop
, 0);
10376 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10377 signbit', and attempt to change the PLUS to an XOR and move it to
10378 the outer operation as is done above in the AND/IOR/XOR case
10379 leg for shift(logical). See details in logical handling above
10380 for reasoning in doing so. */
10381 if (code
== LSHIFTRT
10382 && CONST_INT_P (XEXP (varop
, 1))
10383 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10384 && (new_rtx
= simplify_const_binary_operation (code
, result_mode
,
10386 GEN_INT (count
))) != 0
10387 && CONST_INT_P (new_rtx
)
10388 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10389 INTVAL (new_rtx
), result_mode
, &complement_p
))
10391 varop
= XEXP (varop
, 0);
10398 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10399 with C the size of VAROP - 1 and the shift is logical if
10400 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10401 we have a (gt X 0) operation. If the shift is arithmetic with
10402 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10403 we have a (neg (gt X 0)) operation. */
10405 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10406 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10407 && count
== (GET_MODE_BITSIZE (GET_MODE (varop
)) - 1)
10408 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10409 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10410 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10411 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10414 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
10417 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10418 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10425 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10426 if the truncate does not affect the value. */
10427 if (code
== LSHIFTRT
10428 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
10429 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10430 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
10431 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop
, 0)))
10432 - GET_MODE_BITSIZE (GET_MODE (varop
)))))
10434 rtx varop_inner
= XEXP (varop
, 0);
10437 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
10438 XEXP (varop_inner
, 0),
10440 (count
+ INTVAL (XEXP (varop_inner
, 1))));
10441 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
10454 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
10455 outer_op
, outer_const
);
10457 /* We have now finished analyzing the shift. The result should be
10458 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10459 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10460 to the result of the shift. OUTER_CONST is the relevant constant,
10461 but we must turn off all bits turned off in the shift. */
10463 if (outer_op
== UNKNOWN
10464 && orig_code
== code
&& orig_count
== count
10465 && varop
== orig_varop
10466 && shift_mode
== GET_MODE (varop
))
10469 /* Make a SUBREG if necessary. If we can't make it, fail. */
10470 varop
= gen_lowpart (shift_mode
, varop
);
10471 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10474 /* If we have an outer operation and we just made a shift, it is
10475 possible that we could have simplified the shift were it not
10476 for the outer operation. So try to do the simplification
10479 if (outer_op
!= UNKNOWN
)
10480 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
10485 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
10487 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10488 turn off all the bits that the shift would have turned off. */
10489 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
10490 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
10491 GET_MODE_MASK (result_mode
) >> orig_count
);
10493 /* Do the remainder of the processing in RESULT_MODE. */
10494 x
= gen_lowpart_or_truncate (result_mode
, x
);
10496 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10499 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
10501 if (outer_op
!= UNKNOWN
)
10503 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
10504 && GET_MODE_BITSIZE (result_mode
) < HOST_BITS_PER_WIDE_INT
)
10505 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
10507 if (outer_op
== AND
)
10508 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
10509 else if (outer_op
== SET
)
10511 /* This means that we have determined that the result is
10512 equivalent to a constant. This should be rare. */
10513 if (!side_effects_p (x
))
10514 x
= GEN_INT (outer_const
);
10516 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
10517 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
10519 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
10520 GEN_INT (outer_const
));
10526 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10527 The result of the shift is RESULT_MODE. If we cannot simplify it,
10528 return X or, if it is NULL, synthesize the expression with
10529 simplify_gen_binary. Otherwise, return a simplified value.
10531 The shift is normally computed in the widest mode we find in VAROP, as
10532 long as it isn't a different number of words than RESULT_MODE. Exceptions
10533 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10536 simplify_shift_const (rtx x
, enum rtx_code code
, enum machine_mode result_mode
,
10537 rtx varop
, int count
)
10539 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
10544 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
10545 if (GET_MODE (x
) != result_mode
)
10546 x
= gen_lowpart (result_mode
, x
);
10551 /* Like recog, but we receive the address of a pointer to a new pattern.
10552 We try to match the rtx that the pointer points to.
10553 If that fails, we may try to modify or replace the pattern,
10554 storing the replacement into the same pointer object.
10556 Modifications include deletion or addition of CLOBBERs.
10558 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10559 the CLOBBERs are placed.
10561 The value is the final insn code from the pattern ultimately matched,
10565 recog_for_combine (rtx
*pnewpat
, rtx insn
, rtx
*pnotes
)
10567 rtx pat
= *pnewpat
;
10568 int insn_code_number
;
10569 int num_clobbers_to_add
= 0;
10572 rtx old_notes
, old_pat
;
10574 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10575 we use to indicate that something didn't match. If we find such a
10576 thing, force rejection. */
10577 if (GET_CODE (pat
) == PARALLEL
)
10578 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
10579 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
10580 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
10583 old_pat
= PATTERN (insn
);
10584 old_notes
= REG_NOTES (insn
);
10585 PATTERN (insn
) = pat
;
10586 REG_NOTES (insn
) = 0;
10588 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10589 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10591 if (insn_code_number
< 0)
10592 fputs ("Failed to match this instruction:\n", dump_file
);
10594 fputs ("Successfully matched this instruction:\n", dump_file
);
10595 print_rtl_single (dump_file
, pat
);
10598 /* If it isn't, there is the possibility that we previously had an insn
10599 that clobbered some register as a side effect, but the combined
10600 insn doesn't need to do that. So try once more without the clobbers
10601 unless this represents an ASM insn. */
10603 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
10604 && GET_CODE (pat
) == PARALLEL
)
10608 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
10609 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
10612 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
10616 SUBST_INT (XVECLEN (pat
, 0), pos
);
10619 pat
= XVECEXP (pat
, 0, 0);
10621 PATTERN (insn
) = pat
;
10622 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10623 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10625 if (insn_code_number
< 0)
10626 fputs ("Failed to match this instruction:\n", dump_file
);
10628 fputs ("Successfully matched this instruction:\n", dump_file
);
10629 print_rtl_single (dump_file
, pat
);
10632 PATTERN (insn
) = old_pat
;
10633 REG_NOTES (insn
) = old_notes
;
10635 /* Recognize all noop sets, these will be killed by followup pass. */
10636 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
10637 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
10639 /* If we had any clobbers to add, make a new pattern than contains
10640 them. Then check to make sure that all of them are dead. */
10641 if (num_clobbers_to_add
)
10643 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
10644 rtvec_alloc (GET_CODE (pat
) == PARALLEL
10645 ? (XVECLEN (pat
, 0)
10646 + num_clobbers_to_add
)
10647 : num_clobbers_to_add
+ 1));
10649 if (GET_CODE (pat
) == PARALLEL
)
10650 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
10651 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
10653 XVECEXP (newpat
, 0, 0) = pat
;
10655 add_clobbers (newpat
, insn_code_number
);
10657 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
10658 i
< XVECLEN (newpat
, 0); i
++)
10660 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
10661 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
10663 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
10665 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
10666 notes
= alloc_reg_note (REG_UNUSED
,
10667 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
10676 return insn_code_number
;
10679 /* Like gen_lowpart_general but for use by combine. In combine it
10680 is not possible to create any new pseudoregs. However, it is
10681 safe to create invalid memory addresses, because combine will
10682 try to recognize them and all they will do is make the combine
10685 If for some reason this cannot do its job, an rtx
10686 (clobber (const_int 0)) is returned.
10687 An insn containing that will not be recognized. */
10690 gen_lowpart_for_combine (enum machine_mode omode
, rtx x
)
10692 enum machine_mode imode
= GET_MODE (x
);
10693 unsigned int osize
= GET_MODE_SIZE (omode
);
10694 unsigned int isize
= GET_MODE_SIZE (imode
);
10697 if (omode
== imode
)
10700 /* Return identity if this is a CONST or symbolic reference. */
10702 && (GET_CODE (x
) == CONST
10703 || GET_CODE (x
) == SYMBOL_REF
10704 || GET_CODE (x
) == LABEL_REF
))
10707 /* We can only support MODE being wider than a word if X is a
10708 constant integer or has a mode the same size. */
10709 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
10710 && ! ((imode
== VOIDmode
10711 && (CONST_INT_P (x
)
10712 || GET_CODE (x
) == CONST_DOUBLE
))
10713 || isize
== osize
))
10716 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10717 won't know what to do. So we will strip off the SUBREG here and
10718 process normally. */
10719 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
10721 x
= SUBREG_REG (x
);
10723 /* For use in case we fall down into the address adjustments
10724 further below, we need to adjust the known mode and size of
10725 x; imode and isize, since we just adjusted x. */
10726 imode
= GET_MODE (x
);
10728 if (imode
== omode
)
10731 isize
= GET_MODE_SIZE (imode
);
10734 result
= gen_lowpart_common (omode
, x
);
10743 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10745 if (MEM_VOLATILE_P (x
) || mode_dependent_address_p (XEXP (x
, 0)))
10748 /* If we want to refer to something bigger than the original memref,
10749 generate a paradoxical subreg instead. That will force a reload
10750 of the original memref X. */
10752 return gen_rtx_SUBREG (omode
, x
, 0);
10754 if (WORDS_BIG_ENDIAN
)
10755 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
10757 /* Adjust the address so that the address-after-the-data is
10759 if (BYTES_BIG_ENDIAN
)
10760 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
10762 return adjust_address_nv (x
, omode
, offset
);
10765 /* If X is a comparison operator, rewrite it in a new mode. This
10766 probably won't match, but may allow further simplifications. */
10767 else if (COMPARISON_P (x
))
10768 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
10770 /* If we couldn't simplify X any other way, just enclose it in a
10771 SUBREG. Normally, this SUBREG won't match, but some patterns may
10772 include an explicit SUBREG or we may simplify it further in combine. */
10778 offset
= subreg_lowpart_offset (omode
, imode
);
10779 if (imode
== VOIDmode
)
10781 imode
= int_mode_for_mode (omode
);
10782 x
= gen_lowpart_common (imode
, x
);
10786 res
= simplify_gen_subreg (omode
, x
, imode
, offset
);
10792 return gen_rtx_CLOBBER (omode
, const0_rtx
);
10795 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10796 comparison code that will be tested.
10798 The result is a possibly different comparison code to use. *POP0 and
10799 *POP1 may be updated.
10801 It is possible that we might detect that a comparison is either always
10802 true or always false. However, we do not perform general constant
10803 folding in combine, so this knowledge isn't useful. Such tautologies
10804 should have been detected earlier. Hence we ignore all such cases. */
10806 static enum rtx_code
10807 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
10813 enum machine_mode mode
, tmode
;
10815 /* Try a few ways of applying the same transformation to both operands. */
10818 #ifndef WORD_REGISTER_OPERATIONS
10819 /* The test below this one won't handle SIGN_EXTENDs on these machines,
10820 so check specially. */
10821 if (code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
10822 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
10823 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
10824 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
10825 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
10826 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
10827 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0)))
10828 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0))))
10829 && CONST_INT_P (XEXP (op0
, 1))
10830 && XEXP (op0
, 1) == XEXP (op1
, 1)
10831 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
10832 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
10833 && (INTVAL (XEXP (op0
, 1))
10834 == (GET_MODE_BITSIZE (GET_MODE (op0
))
10835 - (GET_MODE_BITSIZE
10836 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))))))))
10838 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
10839 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
10843 /* If both operands are the same constant shift, see if we can ignore the
10844 shift. We can if the shift is a rotate or if the bits shifted out of
10845 this shift are known to be zero for both inputs and if the type of
10846 comparison is compatible with the shift. */
10847 if (GET_CODE (op0
) == GET_CODE (op1
)
10848 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
10849 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
10850 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
10851 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
10852 || (GET_CODE (op0
) == ASHIFTRT
10853 && (code
!= GTU
&& code
!= LTU
10854 && code
!= GEU
&& code
!= LEU
)))
10855 && CONST_INT_P (XEXP (op0
, 1))
10856 && INTVAL (XEXP (op0
, 1)) >= 0
10857 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
10858 && XEXP (op0
, 1) == XEXP (op1
, 1))
10860 enum machine_mode mode
= GET_MODE (op0
);
10861 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
10862 int shift_count
= INTVAL (XEXP (op0
, 1));
10864 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
10865 mask
&= (mask
>> shift_count
) << shift_count
;
10866 else if (GET_CODE (op0
) == ASHIFT
)
10867 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
10869 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
10870 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
10871 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
10876 /* If both operands are AND's of a paradoxical SUBREG by constant, the
10877 SUBREGs are of the same mode, and, in both cases, the AND would
10878 be redundant if the comparison was done in the narrower mode,
10879 do the comparison in the narrower mode (e.g., we are AND'ing with 1
10880 and the operand's possibly nonzero bits are 0xffffff01; in that case
10881 if we only care about QImode, we don't need the AND). This case
10882 occurs if the output mode of an scc insn is not SImode and
10883 STORE_FLAG_VALUE == 1 (e.g., the 386).
10885 Similarly, check for a case where the AND's are ZERO_EXTEND
10886 operations from some narrower mode even though a SUBREG is not
10889 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
10890 && CONST_INT_P (XEXP (op0
, 1))
10891 && CONST_INT_P (XEXP (op1
, 1)))
10893 rtx inner_op0
= XEXP (op0
, 0);
10894 rtx inner_op1
= XEXP (op1
, 0);
10895 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
10896 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
10899 if (GET_CODE (inner_op0
) == SUBREG
&& GET_CODE (inner_op1
) == SUBREG
10900 && (GET_MODE_SIZE (GET_MODE (inner_op0
))
10901 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0
))))
10902 && (GET_MODE (SUBREG_REG (inner_op0
))
10903 == GET_MODE (SUBREG_REG (inner_op1
)))
10904 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0
)))
10905 <= HOST_BITS_PER_WIDE_INT
)
10906 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
10907 GET_MODE (SUBREG_REG (inner_op0
)))))
10908 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
10909 GET_MODE (SUBREG_REG (inner_op1
))))))
10911 op0
= SUBREG_REG (inner_op0
);
10912 op1
= SUBREG_REG (inner_op1
);
10914 /* The resulting comparison is always unsigned since we masked
10915 off the original sign bit. */
10916 code
= unsigned_condition (code
);
10922 for (tmode
= GET_CLASS_NARROWEST_MODE
10923 (GET_MODE_CLASS (GET_MODE (op0
)));
10924 tmode
!= GET_MODE (op0
); tmode
= GET_MODE_WIDER_MODE (tmode
))
10925 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
10927 op0
= gen_lowpart (tmode
, inner_op0
);
10928 op1
= gen_lowpart (tmode
, inner_op1
);
10929 code
= unsigned_condition (code
);
10938 /* If both operands are NOT, we can strip off the outer operation
10939 and adjust the comparison code for swapped operands; similarly for
10940 NEG, except that this must be an equality comparison. */
10941 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
10942 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
10943 && (code
== EQ
|| code
== NE
)))
10944 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
10950 /* If the first operand is a constant, swap the operands and adjust the
10951 comparison code appropriately, but don't do this if the second operand
10952 is already a constant integer. */
10953 if (swap_commutative_operands_p (op0
, op1
))
10955 tem
= op0
, op0
= op1
, op1
= tem
;
10956 code
= swap_condition (code
);
10959 /* We now enter a loop during which we will try to simplify the comparison.
10960 For the most part, we only are concerned with comparisons with zero,
10961 but some things may really be comparisons with zero but not start
10962 out looking that way. */
10964 while (CONST_INT_P (op1
))
10966 enum machine_mode mode
= GET_MODE (op0
);
10967 unsigned int mode_width
= GET_MODE_BITSIZE (mode
);
10968 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
10969 int equality_comparison_p
;
10970 int sign_bit_comparison_p
;
10971 int unsigned_comparison_p
;
10972 HOST_WIDE_INT const_op
;
10974 /* We only want to handle integral modes. This catches VOIDmode,
10975 CCmode, and the floating-point modes. An exception is that we
10976 can handle VOIDmode if OP0 is a COMPARE or a comparison
10979 if (GET_MODE_CLASS (mode
) != MODE_INT
10980 && ! (mode
== VOIDmode
10981 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
10984 /* Get the constant we are comparing against and turn off all bits
10985 not on in our mode. */
10986 const_op
= INTVAL (op1
);
10987 if (mode
!= VOIDmode
)
10988 const_op
= trunc_int_for_mode (const_op
, mode
);
10989 op1
= GEN_INT (const_op
);
10991 /* If we are comparing against a constant power of two and the value
10992 being compared can only have that single bit nonzero (e.g., it was
10993 `and'ed with that bit), we can replace this with a comparison
10996 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
10997 || code
== LT
|| code
== LTU
)
10998 && mode_width
<= HOST_BITS_PER_WIDE_INT
10999 && exact_log2 (const_op
) >= 0
11000 && nonzero_bits (op0
, mode
) == (unsigned HOST_WIDE_INT
) const_op
)
11002 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11003 op1
= const0_rtx
, const_op
= 0;
11006 /* Similarly, if we are comparing a value known to be either -1 or
11007 0 with -1, change it to the opposite comparison against zero. */
11010 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11011 || code
== GEU
|| code
== LTU
)
11012 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11014 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11015 op1
= const0_rtx
, const_op
= 0;
11018 /* Do some canonicalizations based on the comparison code. We prefer
11019 comparisons against zero and then prefer equality comparisons.
11020 If we can reduce the size of a constant, we will do that too. */
11025 /* < C is equivalent to <= (C - 1) */
11029 op1
= GEN_INT (const_op
);
11031 /* ... fall through to LE case below. */
11037 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11041 op1
= GEN_INT (const_op
);
11045 /* If we are doing a <= 0 comparison on a value known to have
11046 a zero sign bit, we can replace this with == 0. */
11047 else if (const_op
== 0
11048 && mode_width
<= HOST_BITS_PER_WIDE_INT
11049 && (nonzero_bits (op0
, mode
)
11050 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11056 /* >= C is equivalent to > (C - 1). */
11060 op1
= GEN_INT (const_op
);
11062 /* ... fall through to GT below. */
11068 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11072 op1
= GEN_INT (const_op
);
11076 /* If we are doing a > 0 comparison on a value known to have
11077 a zero sign bit, we can replace this with != 0. */
11078 else if (const_op
== 0
11079 && mode_width
<= HOST_BITS_PER_WIDE_INT
11080 && (nonzero_bits (op0
, mode
)
11081 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11087 /* < C is equivalent to <= (C - 1). */
11091 op1
= GEN_INT (const_op
);
11093 /* ... fall through ... */
11096 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11097 else if (mode_width
<= HOST_BITS_PER_WIDE_INT
11098 && (unsigned HOST_WIDE_INT
) const_op
11099 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11101 const_op
= 0, op1
= const0_rtx
;
11109 /* unsigned <= 0 is equivalent to == 0 */
11113 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11114 else if (mode_width
<= HOST_BITS_PER_WIDE_INT
11115 && (unsigned HOST_WIDE_INT
) const_op
11116 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11118 const_op
= 0, op1
= const0_rtx
;
11124 /* >= C is equivalent to > (C - 1). */
11128 op1
= GEN_INT (const_op
);
11130 /* ... fall through ... */
11133 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11134 else if (mode_width
<= HOST_BITS_PER_WIDE_INT
11135 && (unsigned HOST_WIDE_INT
) const_op
11136 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11138 const_op
= 0, op1
= const0_rtx
;
11146 /* unsigned > 0 is equivalent to != 0 */
11150 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11151 else if (mode_width
<= HOST_BITS_PER_WIDE_INT
11152 && (unsigned HOST_WIDE_INT
) const_op
11153 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11155 const_op
= 0, op1
= const0_rtx
;
11164 /* Compute some predicates to simplify code below. */
11166 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11167 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11168 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11171 /* If this is a sign bit comparison and we can do arithmetic in
11172 MODE, say that we will only be needing the sign bit of OP0. */
11173 if (sign_bit_comparison_p
11174 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
11175 op0
= force_to_mode (op0
, mode
,
11176 (unsigned HOST_WIDE_INT
) 1
11177 << (GET_MODE_BITSIZE (mode
) - 1),
11180 /* Now try cases based on the opcode of OP0. If none of the cases
11181 does a "continue", we exit this loop immediately after the
11184 switch (GET_CODE (op0
))
11187 /* If we are extracting a single bit from a variable position in
11188 a constant that has only a single bit set and are comparing it
11189 with zero, we can convert this into an equality comparison
11190 between the position and the location of the single bit. */
11191 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11192 have already reduced the shift count modulo the word size. */
11193 if (!SHIFT_COUNT_TRUNCATED
11194 && CONST_INT_P (XEXP (op0
, 0))
11195 && XEXP (op0
, 1) == const1_rtx
11196 && equality_comparison_p
&& const_op
== 0
11197 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11199 if (BITS_BIG_ENDIAN
)
11201 enum machine_mode new_mode
11202 = mode_for_extraction (EP_extzv
, 1);
11203 if (new_mode
== MAX_MACHINE_MODE
)
11204 i
= BITS_PER_WORD
- 1 - i
;
11208 i
= (GET_MODE_BITSIZE (mode
) - 1 - i
);
11212 op0
= XEXP (op0
, 2);
11216 /* Result is nonzero iff shift count is equal to I. */
11217 code
= reverse_condition (code
);
11221 /* ... fall through ... */
11224 tem
= expand_compound_operation (op0
);
11233 /* If testing for equality, we can take the NOT of the constant. */
11234 if (equality_comparison_p
11235 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11237 op0
= XEXP (op0
, 0);
11242 /* If just looking at the sign bit, reverse the sense of the
11244 if (sign_bit_comparison_p
)
11246 op0
= XEXP (op0
, 0);
11247 code
= (code
== GE
? LT
: GE
);
11253 /* If testing for equality, we can take the NEG of the constant. */
11254 if (equality_comparison_p
11255 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
11257 op0
= XEXP (op0
, 0);
11262 /* The remaining cases only apply to comparisons with zero. */
11266 /* When X is ABS or is known positive,
11267 (neg X) is < 0 if and only if X != 0. */
11269 if (sign_bit_comparison_p
11270 && (GET_CODE (XEXP (op0
, 0)) == ABS
11271 || (mode_width
<= HOST_BITS_PER_WIDE_INT
11272 && (nonzero_bits (XEXP (op0
, 0), mode
)
11273 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11276 op0
= XEXP (op0
, 0);
11277 code
= (code
== LT
? NE
: EQ
);
11281 /* If we have NEG of something whose two high-order bits are the
11282 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11283 if (num_sign_bit_copies (op0
, mode
) >= 2)
11285 op0
= XEXP (op0
, 0);
11286 code
= swap_condition (code
);
11292 /* If we are testing equality and our count is a constant, we
11293 can perform the inverse operation on our RHS. */
11294 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11295 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
11296 op1
, XEXP (op0
, 1))) != 0)
11298 op0
= XEXP (op0
, 0);
11303 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11304 a particular bit. Convert it to an AND of a constant of that
11305 bit. This will be converted into a ZERO_EXTRACT. */
11306 if (const_op
== 0 && sign_bit_comparison_p
11307 && CONST_INT_P (XEXP (op0
, 1))
11308 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11310 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11311 ((unsigned HOST_WIDE_INT
) 1
11313 - INTVAL (XEXP (op0
, 1)))));
11314 code
= (code
== LT
? NE
: EQ
);
11318 /* Fall through. */
11321 /* ABS is ignorable inside an equality comparison with zero. */
11322 if (const_op
== 0 && equality_comparison_p
)
11324 op0
= XEXP (op0
, 0);
11330 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11331 (compare FOO CONST) if CONST fits in FOO's mode and we
11332 are either testing inequality or have an unsigned
11333 comparison with ZERO_EXTEND or a signed comparison with
11334 SIGN_EXTEND. But don't do it if we don't have a compare
11335 insn of the given mode, since we'd have to revert it
11336 later on, and then we wouldn't know whether to sign- or
11338 mode
= GET_MODE (XEXP (op0
, 0));
11339 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
11340 && ! unsigned_comparison_p
11341 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
11342 && ((unsigned HOST_WIDE_INT
) const_op
11343 < (((unsigned HOST_WIDE_INT
) 1
11344 << (GET_MODE_BITSIZE (mode
) - 1))))
11345 && have_insn_for (COMPARE
, mode
))
11347 op0
= XEXP (op0
, 0);
11353 /* Check for the case where we are comparing A - C1 with C2, that is
11355 (subreg:MODE (plus (A) (-C1))) op (C2)
11357 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11358 comparison in the wider mode. One of the following two conditions
11359 must be true in order for this to be valid:
11361 1. The mode extension results in the same bit pattern being added
11362 on both sides and the comparison is equality or unsigned. As
11363 C2 has been truncated to fit in MODE, the pattern can only be
11366 2. The mode extension results in the sign bit being copied on
11369 The difficulty here is that we have predicates for A but not for
11370 (A - C1) so we need to check that C1 is within proper bounds so
11371 as to perturbate A as little as possible. */
11373 if (mode_width
<= HOST_BITS_PER_WIDE_INT
11374 && subreg_lowpart_p (op0
)
11375 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
))) > mode_width
11376 && GET_CODE (SUBREG_REG (op0
)) == PLUS
11377 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
11379 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
11380 rtx a
= XEXP (SUBREG_REG (op0
), 0);
11381 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
11384 && (unsigned HOST_WIDE_INT
) c1
11385 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)
11386 && (equality_comparison_p
|| unsigned_comparison_p
)
11387 /* (A - C1) zero-extends if it is positive and sign-extends
11388 if it is negative, C2 both zero- and sign-extends. */
11389 && ((0 == (nonzero_bits (a
, inner_mode
)
11390 & ~GET_MODE_MASK (mode
))
11392 /* (A - C1) sign-extends if it is positive and 1-extends
11393 if it is negative, C2 both sign- and 1-extends. */
11394 || (num_sign_bit_copies (a
, inner_mode
)
11395 > (unsigned int) (GET_MODE_BITSIZE (inner_mode
)
11398 || ((unsigned HOST_WIDE_INT
) c1
11399 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 2)
11400 /* (A - C1) always sign-extends, like C2. */
11401 && num_sign_bit_copies (a
, inner_mode
)
11402 > (unsigned int) (GET_MODE_BITSIZE (inner_mode
)
11403 - (mode_width
- 1))))
11405 op0
= SUBREG_REG (op0
);
11410 /* If the inner mode is narrower and we are extracting the low part,
11411 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11412 if (subreg_lowpart_p (op0
)
11413 && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
))) < mode_width
)
11414 /* Fall through */ ;
11418 /* ... fall through ... */
11421 mode
= GET_MODE (XEXP (op0
, 0));
11422 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
11423 && (unsigned_comparison_p
|| equality_comparison_p
)
11424 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
11425 && ((unsigned HOST_WIDE_INT
) const_op
< GET_MODE_MASK (mode
))
11426 && have_insn_for (COMPARE
, mode
))
11428 op0
= XEXP (op0
, 0);
11434 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11435 this for equality comparisons due to pathological cases involving
11437 if (equality_comparison_p
11438 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11439 op1
, XEXP (op0
, 1))))
11441 op0
= XEXP (op0
, 0);
11446 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11447 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
11448 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
11450 op0
= XEXP (XEXP (op0
, 0), 0);
11451 code
= (code
== LT
? EQ
: NE
);
11457 /* We used to optimize signed comparisons against zero, but that
11458 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11459 arrive here as equality comparisons, or (GEU, LTU) are
11460 optimized away. No need to special-case them. */
11462 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11463 (eq B (minus A C)), whichever simplifies. We can only do
11464 this for equality comparisons due to pathological cases involving
11466 if (equality_comparison_p
11467 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
11468 XEXP (op0
, 1), op1
)))
11470 op0
= XEXP (op0
, 0);
11475 if (equality_comparison_p
11476 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11477 XEXP (op0
, 0), op1
)))
11479 op0
= XEXP (op0
, 1);
11484 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11485 of bits in X minus 1, is one iff X > 0. */
11486 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
11487 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11488 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
11489 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
11491 op0
= XEXP (op0
, 1);
11492 code
= (code
== GE
? LE
: GT
);
11498 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11499 if C is zero or B is a constant. */
11500 if (equality_comparison_p
11501 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
11502 XEXP (op0
, 1), op1
)))
11504 op0
= XEXP (op0
, 0);
11511 case UNEQ
: case LTGT
:
11512 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
11513 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
11514 case UNORDERED
: case ORDERED
:
11515 /* We can't do anything if OP0 is a condition code value, rather
11516 than an actual data value. */
11518 || CC0_P (XEXP (op0
, 0))
11519 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
11522 /* Get the two operands being compared. */
11523 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
11524 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
11526 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
11528 /* Check for the cases where we simply want the result of the
11529 earlier test or the opposite of that result. */
11530 if (code
== NE
|| code
== EQ
11531 || (GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
11532 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
11533 && (STORE_FLAG_VALUE
11534 & (((unsigned HOST_WIDE_INT
) 1
11535 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
11536 && (code
== LT
|| code
== GE
)))
11538 enum rtx_code new_code
;
11539 if (code
== LT
|| code
== NE
)
11540 new_code
= GET_CODE (op0
);
11542 new_code
= reversed_comparison_code (op0
, NULL
);
11544 if (new_code
!= UNKNOWN
)
11555 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11557 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
11558 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
11559 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
11561 op0
= XEXP (op0
, 1);
11562 code
= (code
== GE
? GT
: LE
);
11568 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11569 will be converted to a ZERO_EXTRACT later. */
11570 if (const_op
== 0 && equality_comparison_p
11571 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11572 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
11574 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
11575 XEXP (XEXP (op0
, 0), 1));
11576 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
11580 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11581 zero and X is a comparison and C1 and C2 describe only bits set
11582 in STORE_FLAG_VALUE, we can compare with X. */
11583 if (const_op
== 0 && equality_comparison_p
11584 && mode_width
<= HOST_BITS_PER_WIDE_INT
11585 && CONST_INT_P (XEXP (op0
, 1))
11586 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
11587 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11588 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
11589 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
11591 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
11592 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
11593 if ((~STORE_FLAG_VALUE
& mask
) == 0
11594 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
11595 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
11596 && COMPARISON_P (tem
))))
11598 op0
= XEXP (XEXP (op0
, 0), 0);
11603 /* If we are doing an equality comparison of an AND of a bit equal
11604 to the sign bit, replace this with a LT or GE comparison of
11605 the underlying value. */
11606 if (equality_comparison_p
11608 && CONST_INT_P (XEXP (op0
, 1))
11609 && mode_width
<= HOST_BITS_PER_WIDE_INT
11610 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
11611 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11613 op0
= XEXP (op0
, 0);
11614 code
= (code
== EQ
? GE
: LT
);
11618 /* If this AND operation is really a ZERO_EXTEND from a narrower
11619 mode, the constant fits within that mode, and this is either an
11620 equality or unsigned comparison, try to do this comparison in
11625 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11626 -> (ne:DI (reg:SI 4) (const_int 0))
11628 unless TRULY_NOOP_TRUNCATION allows it or the register is
11629 known to hold a value of the required mode the
11630 transformation is invalid. */
11631 if ((equality_comparison_p
|| unsigned_comparison_p
)
11632 && CONST_INT_P (XEXP (op0
, 1))
11633 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
11634 & GET_MODE_MASK (mode
))
11636 && const_op
>> i
== 0
11637 && (tmode
= mode_for_size (i
, MODE_INT
, 1)) != BLKmode
11638 && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode
),
11639 GET_MODE_BITSIZE (GET_MODE (op0
)))
11640 || (REG_P (XEXP (op0
, 0))
11641 && reg_truncated_to_mode (tmode
, XEXP (op0
, 0)))))
11643 op0
= gen_lowpart (tmode
, XEXP (op0
, 0));
11647 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11648 fits in both M1 and M2 and the SUBREG is either paradoxical
11649 or represents the low part, permute the SUBREG and the AND
11651 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
)
11653 unsigned HOST_WIDE_INT c1
;
11654 tmode
= GET_MODE (SUBREG_REG (XEXP (op0
, 0)));
11655 /* Require an integral mode, to avoid creating something like
11657 if (SCALAR_INT_MODE_P (tmode
)
11658 /* It is unsafe to commute the AND into the SUBREG if the
11659 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11660 not defined. As originally written the upper bits
11661 have a defined value due to the AND operation.
11662 However, if we commute the AND inside the SUBREG then
11663 they no longer have defined values and the meaning of
11664 the code has been changed. */
11666 #ifdef WORD_REGISTER_OPERATIONS
11667 || (mode_width
> GET_MODE_BITSIZE (tmode
)
11668 && mode_width
<= BITS_PER_WORD
)
11670 || (mode_width
<= GET_MODE_BITSIZE (tmode
)
11671 && subreg_lowpart_p (XEXP (op0
, 0))))
11672 && CONST_INT_P (XEXP (op0
, 1))
11673 && mode_width
<= HOST_BITS_PER_WIDE_INT
11674 && GET_MODE_BITSIZE (tmode
) <= HOST_BITS_PER_WIDE_INT
11675 && ((c1
= INTVAL (XEXP (op0
, 1))) & ~mask
) == 0
11676 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
11678 && c1
!= GET_MODE_MASK (tmode
))
11680 op0
= simplify_gen_binary (AND
, tmode
,
11681 SUBREG_REG (XEXP (op0
, 0)),
11682 gen_int_mode (c1
, tmode
));
11683 op0
= gen_lowpart (mode
, op0
);
11688 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11689 if (const_op
== 0 && equality_comparison_p
11690 && XEXP (op0
, 1) == const1_rtx
11691 && GET_CODE (XEXP (op0
, 0)) == NOT
)
11693 op0
= simplify_and_const_int (NULL_RTX
, mode
,
11694 XEXP (XEXP (op0
, 0), 0), 1);
11695 code
= (code
== NE
? EQ
: NE
);
11699 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11700 (eq (and (lshiftrt X) 1) 0).
11701 Also handle the case where (not X) is expressed using xor. */
11702 if (const_op
== 0 && equality_comparison_p
11703 && XEXP (op0
, 1) == const1_rtx
11704 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
11706 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
11707 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
11709 if (GET_CODE (shift_op
) == NOT
11710 || (GET_CODE (shift_op
) == XOR
11711 && CONST_INT_P (XEXP (shift_op
, 1))
11712 && CONST_INT_P (shift_count
)
11713 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
11714 && (UINTVAL (XEXP (shift_op
, 1))
11715 == (unsigned HOST_WIDE_INT
) 1
11716 << INTVAL (shift_count
))))
11719 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
11720 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
11721 code
= (code
== NE
? EQ
: NE
);
11728 /* If we have (compare (ashift FOO N) (const_int C)) and
11729 the high order N bits of FOO (N+1 if an inequality comparison)
11730 are known to be zero, we can do this by comparing FOO with C
11731 shifted right N bits so long as the low-order N bits of C are
11733 if (CONST_INT_P (XEXP (op0
, 1))
11734 && INTVAL (XEXP (op0
, 1)) >= 0
11735 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
11736 < HOST_BITS_PER_WIDE_INT
)
11737 && (((unsigned HOST_WIDE_INT
) const_op
11738 & (((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1)))
11740 && mode_width
<= HOST_BITS_PER_WIDE_INT
11741 && (nonzero_bits (XEXP (op0
, 0), mode
)
11742 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
11743 + ! equality_comparison_p
))) == 0)
11745 /* We must perform a logical shift, not an arithmetic one,
11746 as we want the top N bits of C to be zero. */
11747 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
11749 temp
>>= INTVAL (XEXP (op0
, 1));
11750 op1
= gen_int_mode (temp
, mode
);
11751 op0
= XEXP (op0
, 0);
11755 /* If we are doing a sign bit comparison, it means we are testing
11756 a particular bit. Convert it to the appropriate AND. */
11757 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11758 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11760 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11761 ((unsigned HOST_WIDE_INT
) 1
11763 - INTVAL (XEXP (op0
, 1)))));
11764 code
= (code
== LT
? NE
: EQ
);
11768 /* If this an equality comparison with zero and we are shifting
11769 the low bit to the sign bit, we can convert this to an AND of the
11771 if (const_op
== 0 && equality_comparison_p
11772 && CONST_INT_P (XEXP (op0
, 1))
11773 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
11775 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
11781 /* If this is an equality comparison with zero, we can do this
11782 as a logical shift, which might be much simpler. */
11783 if (equality_comparison_p
&& const_op
== 0
11784 && CONST_INT_P (XEXP (op0
, 1)))
11786 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
11788 INTVAL (XEXP (op0
, 1)));
11792 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11793 do the comparison in a narrower mode. */
11794 if (! unsigned_comparison_p
11795 && CONST_INT_P (XEXP (op0
, 1))
11796 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11797 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11798 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
11799 MODE_INT
, 1)) != BLKmode
11800 && (((unsigned HOST_WIDE_INT
) const_op
11801 + (GET_MODE_MASK (tmode
) >> 1) + 1)
11802 <= GET_MODE_MASK (tmode
)))
11804 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
11808 /* Likewise if OP0 is a PLUS of a sign extension with a
11809 constant, which is usually represented with the PLUS
11810 between the shifts. */
11811 if (! unsigned_comparison_p
11812 && CONST_INT_P (XEXP (op0
, 1))
11813 && GET_CODE (XEXP (op0
, 0)) == PLUS
11814 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11815 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
11816 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
11817 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
11818 MODE_INT
, 1)) != BLKmode
11819 && (((unsigned HOST_WIDE_INT
) const_op
11820 + (GET_MODE_MASK (tmode
) >> 1) + 1)
11821 <= GET_MODE_MASK (tmode
)))
11823 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
11824 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
11825 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
11826 add_const
, XEXP (op0
, 1));
11828 op0
= simplify_gen_binary (PLUS
, tmode
,
11829 gen_lowpart (tmode
, inner
),
11834 /* ... fall through ... */
11836 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11837 the low order N bits of FOO are known to be zero, we can do this
11838 by comparing FOO with C shifted left N bits so long as no
11839 overflow occurs. Even if the low order N bits of FOO aren't known
11840 to be zero, if the comparison is >= or < we can use the same
11841 optimization and for > or <= by setting all the low
11842 order N bits in the comparison constant. */
11843 if (CONST_INT_P (XEXP (op0
, 1))
11844 && INTVAL (XEXP (op0
, 1)) > 0
11845 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11846 && mode_width
<= HOST_BITS_PER_WIDE_INT
11847 && (((unsigned HOST_WIDE_INT
) const_op
11848 + (GET_CODE (op0
) != LSHIFTRT
11849 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
11852 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
11854 unsigned HOST_WIDE_INT low_bits
11855 = (nonzero_bits (XEXP (op0
, 0), mode
)
11856 & (((unsigned HOST_WIDE_INT
) 1
11857 << INTVAL (XEXP (op0
, 1))) - 1));
11858 if (low_bits
== 0 || !equality_comparison_p
)
11860 /* If the shift was logical, then we must make the condition
11862 if (GET_CODE (op0
) == LSHIFTRT
)
11863 code
= unsigned_condition (code
);
11865 const_op
<<= INTVAL (XEXP (op0
, 1));
11867 && (code
== GT
|| code
== GTU
11868 || code
== LE
|| code
== LEU
))
11870 |= (((HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1))) - 1);
11871 op1
= GEN_INT (const_op
);
11872 op0
= XEXP (op0
, 0);
11877 /* If we are using this shift to extract just the sign bit, we
11878 can replace this with an LT or GE comparison. */
11880 && (equality_comparison_p
|| sign_bit_comparison_p
)
11881 && CONST_INT_P (XEXP (op0
, 1))
11882 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
11884 op0
= XEXP (op0
, 0);
11885 code
= (code
== NE
|| code
== GT
? LT
: GE
);
11897 /* Now make any compound operations involved in this comparison. Then,
11898 check for an outmost SUBREG on OP0 that is not doing anything or is
11899 paradoxical. The latter transformation must only be performed when
11900 it is known that the "extra" bits will be the same in op0 and op1 or
11901 that they don't matter. There are three cases to consider:
11903 1. SUBREG_REG (op0) is a register. In this case the bits are don't
11904 care bits and we can assume they have any convenient value. So
11905 making the transformation is safe.
11907 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11908 In this case the upper bits of op0 are undefined. We should not make
11909 the simplification in that case as we do not know the contents of
11912 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11913 UNKNOWN. In that case we know those bits are zeros or ones. We must
11914 also be sure that they are the same as the upper bits of op1.
11916 We can never remove a SUBREG for a non-equality comparison because
11917 the sign bit is in a different place in the underlying object. */
11919 op0
= make_compound_operation (op0
, op1
== const0_rtx
? COMPARE
: SET
);
11920 op1
= make_compound_operation (op1
, SET
);
11922 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
11923 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
11924 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
11925 && (code
== NE
|| code
== EQ
))
11927 if (GET_MODE_SIZE (GET_MODE (op0
))
11928 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
))))
11930 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
11932 if (REG_P (SUBREG_REG (op0
)))
11934 op0
= SUBREG_REG (op0
);
11935 op1
= gen_lowpart (GET_MODE (op0
), op1
);
11938 else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
11939 <= HOST_BITS_PER_WIDE_INT
)
11940 && (nonzero_bits (SUBREG_REG (op0
),
11941 GET_MODE (SUBREG_REG (op0
)))
11942 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
11944 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
11946 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
11947 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
11948 op0
= SUBREG_REG (op0
), op1
= tem
;
11952 /* We now do the opposite procedure: Some machines don't have compare
11953 insns in all modes. If OP0's mode is an integer mode smaller than a
11954 word and we can't do a compare in that mode, see if there is a larger
11955 mode for which we can do the compare. There are a number of cases in
11956 which we can use the wider mode. */
11958 mode
= GET_MODE (op0
);
11959 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
11960 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
11961 && ! have_insn_for (COMPARE
, mode
))
11962 for (tmode
= GET_MODE_WIDER_MODE (mode
);
11964 && GET_MODE_BITSIZE (tmode
) <= HOST_BITS_PER_WIDE_INT
);
11965 tmode
= GET_MODE_WIDER_MODE (tmode
))
11966 if (have_insn_for (COMPARE
, tmode
))
11970 /* If this is a test for negative, we can make an explicit
11971 test of the sign bit. Test this first so we can use
11972 a paradoxical subreg to extend OP0. */
11974 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
11975 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
11977 op0
= simplify_gen_binary (AND
, tmode
,
11978 gen_lowpart (tmode
, op0
),
11979 GEN_INT ((unsigned HOST_WIDE_INT
) 1
11980 << (GET_MODE_BITSIZE (mode
)
11982 code
= (code
== LT
) ? NE
: EQ
;
11986 /* If the only nonzero bits in OP0 and OP1 are those in the
11987 narrower mode and this is an equality or unsigned comparison,
11988 we can use the wider mode. Similarly for sign-extended
11989 values, in which case it is true for all comparisons. */
11990 zero_extended
= ((code
== EQ
|| code
== NE
11991 || code
== GEU
|| code
== GTU
11992 || code
== LEU
|| code
== LTU
)
11993 && (nonzero_bits (op0
, tmode
)
11994 & ~GET_MODE_MASK (mode
)) == 0
11995 && ((CONST_INT_P (op1
)
11996 || (nonzero_bits (op1
, tmode
)
11997 & ~GET_MODE_MASK (mode
)) == 0)));
12000 || ((num_sign_bit_copies (op0
, tmode
)
12001 > (unsigned int) (GET_MODE_BITSIZE (tmode
)
12002 - GET_MODE_BITSIZE (mode
)))
12003 && (num_sign_bit_copies (op1
, tmode
)
12004 > (unsigned int) (GET_MODE_BITSIZE (tmode
)
12005 - GET_MODE_BITSIZE (mode
)))))
12007 /* If OP0 is an AND and we don't have an AND in MODE either,
12008 make a new AND in the proper mode. */
12009 if (GET_CODE (op0
) == AND
12010 && !have_insn_for (AND
, mode
))
12011 op0
= simplify_gen_binary (AND
, tmode
,
12012 gen_lowpart (tmode
,
12014 gen_lowpart (tmode
,
12020 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op0
, mode
);
12021 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op1
, mode
);
12025 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op0
, mode
);
12026 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op1
, mode
);
12033 #ifdef CANONICALIZE_COMPARISON
12034 /* If this machine only supports a subset of valid comparisons, see if we
12035 can convert an unsupported one into a supported one. */
12036 CANONICALIZE_COMPARISON (code
, op0
, op1
);
12045 /* Utility function for record_value_for_reg. Count number of
12050 enum rtx_code code
= GET_CODE (x
);
12054 if (GET_RTX_CLASS (code
) == '2'
12055 || GET_RTX_CLASS (code
) == 'c')
12057 rtx x0
= XEXP (x
, 0);
12058 rtx x1
= XEXP (x
, 1);
12061 return 1 + 2 * count_rtxs (x0
);
12063 if ((GET_RTX_CLASS (GET_CODE (x1
)) == '2'
12064 || GET_RTX_CLASS (GET_CODE (x1
)) == 'c')
12065 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12066 return 2 + 2 * count_rtxs (x0
)
12067 + count_rtxs (x
== XEXP (x1
, 0)
12068 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12070 if ((GET_RTX_CLASS (GET_CODE (x0
)) == '2'
12071 || GET_RTX_CLASS (GET_CODE (x0
)) == 'c')
12072 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12073 return 2 + 2 * count_rtxs (x1
)
12074 + count_rtxs (x
== XEXP (x0
, 0)
12075 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12078 fmt
= GET_RTX_FORMAT (code
);
12079 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12081 ret
+= count_rtxs (XEXP (x
, i
));
12082 else if (fmt
[i
] == 'E')
12083 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12084 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12089 /* Utility function for following routine. Called when X is part of a value
12090 being stored into last_set_value. Sets last_set_table_tick
12091 for each register mentioned. Similar to mention_regs in cse.c */
12094 update_table_tick (rtx x
)
12096 enum rtx_code code
= GET_CODE (x
);
12097 const char *fmt
= GET_RTX_FORMAT (code
);
12102 unsigned int regno
= REGNO (x
);
12103 unsigned int endregno
= END_REGNO (x
);
12106 for (r
= regno
; r
< endregno
; r
++)
12108 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, r
);
12109 rsp
->last_set_table_tick
= label_tick
;
12115 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12118 /* Check for identical subexpressions. If x contains
12119 identical subexpression we only have to traverse one of
12121 if (i
== 0 && ARITHMETIC_P (x
))
12123 /* Note that at this point x1 has already been
12125 rtx x0
= XEXP (x
, 0);
12126 rtx x1
= XEXP (x
, 1);
12128 /* If x0 and x1 are identical then there is no need to
12133 /* If x0 is identical to a subexpression of x1 then while
12134 processing x1, x0 has already been processed. Thus we
12135 are done with x. */
12136 if (ARITHMETIC_P (x1
)
12137 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12140 /* If x1 is identical to a subexpression of x0 then we
12141 still have to process the rest of x0. */
12142 if (ARITHMETIC_P (x0
)
12143 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12145 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12150 update_table_tick (XEXP (x
, i
));
12152 else if (fmt
[i
] == 'E')
12153 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12154 update_table_tick (XVECEXP (x
, i
, j
));
12157 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12158 are saying that the register is clobbered and we no longer know its
12159 value. If INSN is zero, don't update reg_stat[].last_set; this is
12160 only permitted with VALUE also zero and is used to invalidate the
12164 record_value_for_reg (rtx reg
, rtx insn
, rtx value
)
12166 unsigned int regno
= REGNO (reg
);
12167 unsigned int endregno
= END_REGNO (reg
);
12169 reg_stat_type
*rsp
;
12171 /* If VALUE contains REG and we have a previous value for REG, substitute
12172 the previous value. */
12173 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12177 /* Set things up so get_last_value is allowed to see anything set up to
12179 subst_low_luid
= DF_INSN_LUID (insn
);
12180 tem
= get_last_value (reg
);
12182 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12183 it isn't going to be useful and will take a lot of time to process,
12184 so just use the CLOBBER. */
12188 if (ARITHMETIC_P (tem
)
12189 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12190 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12191 tem
= XEXP (tem
, 0);
12192 else if (count_occurrences (value
, reg
, 1) >= 2)
12194 /* If there are two or more occurrences of REG in VALUE,
12195 prevent the value from growing too much. */
12196 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12197 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12200 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12204 /* For each register modified, show we don't know its value, that
12205 we don't know about its bitwise content, that its value has been
12206 updated, and that we don't know the location of the death of the
12208 for (i
= regno
; i
< endregno
; i
++)
12210 rsp
= VEC_index (reg_stat_type
, reg_stat
, i
);
12213 rsp
->last_set
= insn
;
12215 rsp
->last_set_value
= 0;
12216 rsp
->last_set_mode
= VOIDmode
;
12217 rsp
->last_set_nonzero_bits
= 0;
12218 rsp
->last_set_sign_bit_copies
= 0;
12219 rsp
->last_death
= 0;
12220 rsp
->truncated_to_mode
= VOIDmode
;
12223 /* Mark registers that are being referenced in this value. */
12225 update_table_tick (value
);
12227 /* Now update the status of each register being set.
12228 If someone is using this register in this block, set this register
12229 to invalid since we will get confused between the two lives in this
12230 basic block. This makes using this register always invalid. In cse, we
12231 scan the table to invalidate all entries using this register, but this
12232 is too much work for us. */
12234 for (i
= regno
; i
< endregno
; i
++)
12236 rsp
= VEC_index (reg_stat_type
, reg_stat
, i
);
12237 rsp
->last_set_label
= label_tick
;
12239 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
12240 rsp
->last_set_invalid
= 1;
12242 rsp
->last_set_invalid
= 0;
12245 /* The value being assigned might refer to X (like in "x++;"). In that
12246 case, we must replace it with (clobber (const_int 0)) to prevent
12248 rsp
= VEC_index (reg_stat_type
, reg_stat
, regno
);
12249 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
12251 value
= copy_rtx (value
);
12252 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
12256 /* For the main register being modified, update the value, the mode, the
12257 nonzero bits, and the number of sign bit copies. */
12259 rsp
->last_set_value
= value
;
12263 enum machine_mode mode
= GET_MODE (reg
);
12264 subst_low_luid
= DF_INSN_LUID (insn
);
12265 rsp
->last_set_mode
= mode
;
12266 if (GET_MODE_CLASS (mode
) == MODE_INT
12267 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
12268 mode
= nonzero_bits_mode
;
12269 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
12270 rsp
->last_set_sign_bit_copies
12271 = num_sign_bit_copies (value
, GET_MODE (reg
));
12275 /* Called via note_stores from record_dead_and_set_regs to handle one
12276 SET or CLOBBER in an insn. DATA is the instruction in which the
12277 set is occurring. */
12280 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
12282 rtx record_dead_insn
= (rtx
) data
;
12284 if (GET_CODE (dest
) == SUBREG
)
12285 dest
= SUBREG_REG (dest
);
12287 if (!record_dead_insn
)
12290 record_value_for_reg (dest
, NULL_RTX
, NULL_RTX
);
12296 /* If we are setting the whole register, we know its value. Otherwise
12297 show that we don't know the value. We can handle SUBREG in
12299 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
12300 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
12301 else if (GET_CODE (setter
) == SET
12302 && GET_CODE (SET_DEST (setter
)) == SUBREG
12303 && SUBREG_REG (SET_DEST (setter
)) == dest
12304 && GET_MODE_BITSIZE (GET_MODE (dest
)) <= BITS_PER_WORD
12305 && subreg_lowpart_p (SET_DEST (setter
)))
12306 record_value_for_reg (dest
, record_dead_insn
,
12307 gen_lowpart (GET_MODE (dest
),
12308 SET_SRC (setter
)));
12310 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
12312 else if (MEM_P (dest
)
12313 /* Ignore pushes, they clobber nothing. */
12314 && ! push_operand (dest
, GET_MODE (dest
)))
12315 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
12318 /* Update the records of when each REG was most recently set or killed
12319 for the things done by INSN. This is the last thing done in processing
12320 INSN in the combiner loop.
12322 We update reg_stat[], in particular fields last_set, last_set_value,
12323 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12324 last_death, and also the similar information mem_last_set (which insn
12325 most recently modified memory) and last_call_luid (which insn was the
12326 most recent subroutine call). */
12329 record_dead_and_set_regs (rtx insn
)
12334 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
12336 if (REG_NOTE_KIND (link
) == REG_DEAD
12337 && REG_P (XEXP (link
, 0)))
12339 unsigned int regno
= REGNO (XEXP (link
, 0));
12340 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
12342 for (i
= regno
; i
< endregno
; i
++)
12344 reg_stat_type
*rsp
;
12346 rsp
= VEC_index (reg_stat_type
, reg_stat
, i
);
12347 rsp
->last_death
= insn
;
12350 else if (REG_NOTE_KIND (link
) == REG_INC
)
12351 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
12356 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
12357 if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
12359 reg_stat_type
*rsp
;
12361 rsp
= VEC_index (reg_stat_type
, reg_stat
, i
);
12362 rsp
->last_set_invalid
= 1;
12363 rsp
->last_set
= insn
;
12364 rsp
->last_set_value
= 0;
12365 rsp
->last_set_mode
= VOIDmode
;
12366 rsp
->last_set_nonzero_bits
= 0;
12367 rsp
->last_set_sign_bit_copies
= 0;
12368 rsp
->last_death
= 0;
12369 rsp
->truncated_to_mode
= VOIDmode
;
12372 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
12374 /* We can't combine into a call pattern. Remember, though, that
12375 the return value register is set at this LUID. We could
12376 still replace a register with the return value from the
12377 wrong subroutine call! */
12378 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
12381 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
12384 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12385 register present in the SUBREG, so for each such SUBREG go back and
12386 adjust nonzero and sign bit information of the registers that are
12387 known to have some zero/sign bits set.
12389 This is needed because when combine blows the SUBREGs away, the
12390 information on zero/sign bits is lost and further combines can be
12391 missed because of that. */
12394 record_promoted_value (rtx insn
, rtx subreg
)
12397 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
12398 enum machine_mode mode
= GET_MODE (subreg
);
12400 if (GET_MODE_BITSIZE (mode
) > HOST_BITS_PER_WIDE_INT
)
12403 for (links
= LOG_LINKS (insn
); links
;)
12405 reg_stat_type
*rsp
;
12407 insn
= XEXP (links
, 0);
12408 set
= single_set (insn
);
12410 if (! set
|| !REG_P (SET_DEST (set
))
12411 || REGNO (SET_DEST (set
)) != regno
12412 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
12414 links
= XEXP (links
, 1);
12418 rsp
= VEC_index (reg_stat_type
, reg_stat
, regno
);
12419 if (rsp
->last_set
== insn
)
12421 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
) > 0)
12422 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
12425 if (REG_P (SET_SRC (set
)))
12427 regno
= REGNO (SET_SRC (set
));
12428 links
= LOG_LINKS (insn
);
12435 /* Check if X, a register, is known to contain a value already
12436 truncated to MODE. In this case we can use a subreg to refer to
12437 the truncated value even though in the generic case we would need
12438 an explicit truncation. */
12441 reg_truncated_to_mode (enum machine_mode mode
, const_rtx x
)
12443 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, REGNO (x
));
12444 enum machine_mode truncated
= rsp
->truncated_to_mode
;
12447 || rsp
->truncation_label
< label_tick_ebb_start
)
12449 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
12451 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
12452 GET_MODE_BITSIZE (truncated
)))
12457 /* Callback for for_each_rtx. If *P is a hard reg or a subreg record the mode
12458 that the register is accessed in. For non-TRULY_NOOP_TRUNCATION targets we
12459 might be able to turn a truncate into a subreg using this information.
12460 Return -1 if traversing *P is complete or 0 otherwise. */
12463 record_truncated_value (rtx
*p
, void *data ATTRIBUTE_UNUSED
)
12466 enum machine_mode truncated_mode
;
12467 reg_stat_type
*rsp
;
12469 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
12471 enum machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
12472 truncated_mode
= GET_MODE (x
);
12474 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
12477 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode
),
12478 GET_MODE_BITSIZE (original_mode
)))
12481 x
= SUBREG_REG (x
);
12483 /* ??? For hard-regs we now record everything. We might be able to
12484 optimize this using last_set_mode. */
12485 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
12486 truncated_mode
= GET_MODE (x
);
12490 rsp
= VEC_index (reg_stat_type
, reg_stat
, REGNO (x
));
12491 if (rsp
->truncated_to_mode
== 0
12492 || rsp
->truncation_label
< label_tick_ebb_start
12493 || (GET_MODE_SIZE (truncated_mode
)
12494 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
12496 rsp
->truncated_to_mode
= truncated_mode
;
12497 rsp
->truncation_label
= label_tick
;
12503 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12504 the modes they are used in. This can help truning TRUNCATEs into
12508 record_truncated_values (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
12510 for_each_rtx (x
, record_truncated_value
, NULL
);
12513 /* Scan X for promoted SUBREGs. For each one found,
12514 note what it implies to the registers used in it. */
12517 check_promoted_subreg (rtx insn
, rtx x
)
12519 if (GET_CODE (x
) == SUBREG
12520 && SUBREG_PROMOTED_VAR_P (x
)
12521 && REG_P (SUBREG_REG (x
)))
12522 record_promoted_value (insn
, x
);
12525 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
12528 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
12532 check_promoted_subreg (insn
, XEXP (x
, i
));
12536 if (XVEC (x
, i
) != 0)
12537 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12538 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
12544 /* Verify that all the registers and memory references mentioned in *LOC are
12545 still valid. *LOC was part of a value set in INSN when label_tick was
12546 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12547 the invalid references with (clobber (const_int 0)) and return 1. This
12548 replacement is useful because we often can get useful information about
12549 the form of a value (e.g., if it was produced by a shift that always
12550 produces -1 or 0) even though we don't know exactly what registers it
12551 was produced from. */
12554 get_last_value_validate (rtx
*loc
, rtx insn
, int tick
, int replace
)
12557 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
12558 int len
= GET_RTX_LENGTH (GET_CODE (x
));
12563 unsigned int regno
= REGNO (x
);
12564 unsigned int endregno
= END_REGNO (x
);
12567 for (j
= regno
; j
< endregno
; j
++)
12569 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, j
);
12570 if (rsp
->last_set_invalid
12571 /* If this is a pseudo-register that was only set once and not
12572 live at the beginning of the function, it is always valid. */
12573 || (! (regno
>= FIRST_PSEUDO_REGISTER
12574 && REG_N_SETS (regno
) == 1
12575 && (!REGNO_REG_SET_P
12576 (DF_LR_IN (ENTRY_BLOCK_PTR
->next_bb
), regno
)))
12577 && rsp
->last_set_label
> tick
))
12580 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
12587 /* If this is a memory reference, make sure that there were no stores after
12588 it that might have clobbered the value. We don't have alias info, so we
12589 assume any store invalidates it. Moreover, we only have local UIDs, so
12590 we also assume that there were stores in the intervening basic blocks. */
12591 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
12592 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
12595 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
12599 for (i
= 0; i
< len
; i
++)
12603 /* Check for identical subexpressions. If x contains
12604 identical subexpression we only have to traverse one of
12606 if (i
== 1 && ARITHMETIC_P (x
))
12608 /* Note that at this point x0 has already been checked
12609 and found valid. */
12610 rtx x0
= XEXP (x
, 0);
12611 rtx x1
= XEXP (x
, 1);
12613 /* If x0 and x1 are identical then x is also valid. */
12617 /* If x1 is identical to a subexpression of x0 then
12618 while checking x0, x1 has already been checked. Thus
12619 it is valid and so as x. */
12620 if (ARITHMETIC_P (x0
)
12621 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12624 /* If x0 is identical to a subexpression of x1 then x is
12625 valid iff the rest of x1 is valid. */
12626 if (ARITHMETIC_P (x1
)
12627 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12629 get_last_value_validate (&XEXP (x1
,
12630 x0
== XEXP (x1
, 0) ? 1 : 0),
12631 insn
, tick
, replace
);
12634 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
12638 else if (fmt
[i
] == 'E')
12639 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12640 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
12641 insn
, tick
, replace
) == 0)
12645 /* If we haven't found a reason for it to be invalid, it is valid. */
12649 /* Get the last value assigned to X, if known. Some registers
12650 in the value may be replaced with (clobber (const_int 0)) if their value
12651 is known longer known reliably. */
12654 get_last_value (const_rtx x
)
12656 unsigned int regno
;
12658 reg_stat_type
*rsp
;
12660 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12661 then convert it to the desired mode. If this is a paradoxical SUBREG,
12662 we cannot predict what values the "extra" bits might have. */
12663 if (GET_CODE (x
) == SUBREG
12664 && subreg_lowpart_p (x
)
12665 && (GET_MODE_SIZE (GET_MODE (x
))
12666 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
12667 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
12668 return gen_lowpart (GET_MODE (x
), value
);
12674 rsp
= VEC_index (reg_stat_type
, reg_stat
, regno
);
12675 value
= rsp
->last_set_value
;
12677 /* If we don't have a value, or if it isn't for this basic block and
12678 it's either a hard register, set more than once, or it's a live
12679 at the beginning of the function, return 0.
12681 Because if it's not live at the beginning of the function then the reg
12682 is always set before being used (is never used without being set).
12683 And, if it's set only once, and it's always set before use, then all
12684 uses must have the same last value, even if it's not from this basic
12688 || (rsp
->last_set_label
< label_tick_ebb_start
12689 && (regno
< FIRST_PSEUDO_REGISTER
12690 || REG_N_SETS (regno
) != 1
12692 (DF_LR_IN (ENTRY_BLOCK_PTR
->next_bb
), regno
))))
12695 /* If the value was set in a later insn than the ones we are processing,
12696 we can't use it even if the register was only set once. */
12697 if (rsp
->last_set_label
== label_tick
12698 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
12701 /* If the value has all its registers valid, return it. */
12702 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
12705 /* Otherwise, make a copy and replace any invalid register with
12706 (clobber (const_int 0)). If that fails for some reason, return 0. */
12708 value
= copy_rtx (value
);
12709 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
12715 /* Return nonzero if expression X refers to a REG or to memory
12716 that is set in an instruction more recent than FROM_LUID. */
12719 use_crosses_set_p (const_rtx x
, int from_luid
)
12723 enum rtx_code code
= GET_CODE (x
);
12727 unsigned int regno
= REGNO (x
);
12728 unsigned endreg
= END_REGNO (x
);
12730 #ifdef PUSH_ROUNDING
12731 /* Don't allow uses of the stack pointer to be moved,
12732 because we don't know whether the move crosses a push insn. */
12733 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
12736 for (; regno
< endreg
; regno
++)
12738 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, regno
);
12740 && rsp
->last_set_label
== label_tick
12741 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
12747 if (code
== MEM
&& mem_last_set
> from_luid
)
12750 fmt
= GET_RTX_FORMAT (code
);
12752 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12757 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
12758 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
12761 else if (fmt
[i
] == 'e'
12762 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
12768 /* Define three variables used for communication between the following
12771 static unsigned int reg_dead_regno
, reg_dead_endregno
;
12772 static int reg_dead_flag
;
12774 /* Function called via note_stores from reg_dead_at_p.
12776 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12777 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12780 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
12782 unsigned int regno
, endregno
;
12787 regno
= REGNO (dest
);
12788 endregno
= END_REGNO (dest
);
12789 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
12790 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
12793 /* Return nonzero if REG is known to be dead at INSN.
12795 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12796 referencing REG, it is dead. If we hit a SET referencing REG, it is
12797 live. Otherwise, see if it is live or dead at the start of the basic
12798 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12799 must be assumed to be always live. */
12802 reg_dead_at_p (rtx reg
, rtx insn
)
12807 /* Set variables for reg_dead_at_p_1. */
12808 reg_dead_regno
= REGNO (reg
);
12809 reg_dead_endregno
= END_REGNO (reg
);
12813 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12814 we allow the machine description to decide whether use-and-clobber
12815 patterns are OK. */
12816 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
12818 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
12819 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
12823 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12824 beginning of basic block. */
12825 block
= BLOCK_FOR_INSN (insn
);
12830 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
12832 return reg_dead_flag
== 1 ? 1 : 0;
12834 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
12838 if (insn
== BB_HEAD (block
))
12841 insn
= PREV_INSN (insn
);
12844 /* Look at live-in sets for the basic block that we were in. */
12845 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
12846 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
12852 /* Note hard registers in X that are used. */
12855 mark_used_regs_combine (rtx x
)
12857 RTX_CODE code
= GET_CODE (x
);
12858 unsigned int regno
;
12871 case ADDR_DIFF_VEC
:
12874 /* CC0 must die in the insn after it is set, so we don't need to take
12875 special note of it here. */
12881 /* If we are clobbering a MEM, mark any hard registers inside the
12882 address as used. */
12883 if (MEM_P (XEXP (x
, 0)))
12884 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
12889 /* A hard reg in a wide mode may really be multiple registers.
12890 If so, mark all of them just like the first. */
12891 if (regno
< FIRST_PSEUDO_REGISTER
)
12893 /* None of this applies to the stack, frame or arg pointers. */
12894 if (regno
== STACK_POINTER_REGNUM
12895 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12896 || regno
== HARD_FRAME_POINTER_REGNUM
12898 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12899 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
12901 || regno
== FRAME_POINTER_REGNUM
)
12904 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
12910 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12912 rtx testreg
= SET_DEST (x
);
12914 while (GET_CODE (testreg
) == SUBREG
12915 || GET_CODE (testreg
) == ZERO_EXTRACT
12916 || GET_CODE (testreg
) == STRICT_LOW_PART
)
12917 testreg
= XEXP (testreg
, 0);
12919 if (MEM_P (testreg
))
12920 mark_used_regs_combine (XEXP (testreg
, 0));
12922 mark_used_regs_combine (SET_SRC (x
));
12930 /* Recursively scan the operands of this expression. */
12933 const char *fmt
= GET_RTX_FORMAT (code
);
12935 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12938 mark_used_regs_combine (XEXP (x
, i
));
12939 else if (fmt
[i
] == 'E')
12943 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12944 mark_used_regs_combine (XVECEXP (x
, i
, j
));
12950 /* Remove register number REGNO from the dead registers list of INSN.
12952 Return the note used to record the death, if there was one. */
12955 remove_death (unsigned int regno
, rtx insn
)
12957 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
12960 remove_note (insn
, note
);
12965 /* For each register (hardware or pseudo) used within expression X, if its
12966 death is in an instruction with luid between FROM_LUID (inclusive) and
12967 TO_INSN (exclusive), put a REG_DEAD note for that register in the
12968 list headed by PNOTES.
12970 That said, don't move registers killed by maybe_kill_insn.
12972 This is done when X is being merged by combination into TO_INSN. These
12973 notes will then be distributed as needed. */
12976 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx to_insn
,
12981 enum rtx_code code
= GET_CODE (x
);
12985 unsigned int regno
= REGNO (x
);
12986 rtx where_dead
= VEC_index (reg_stat_type
, reg_stat
, regno
)->last_death
;
12988 /* Don't move the register if it gets killed in between from and to. */
12989 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
12990 && ! reg_referenced_p (x
, maybe_kill_insn
))
12994 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
12995 && DF_INSN_LUID (where_dead
) >= from_luid
12996 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
12998 rtx note
= remove_death (regno
, where_dead
);
13000 /* It is possible for the call above to return 0. This can occur
13001 when last_death points to I2 or I1 that we combined with.
13002 In that case make a new note.
13004 We must also check for the case where X is a hard register
13005 and NOTE is a death note for a range of hard registers
13006 including X. In that case, we must put REG_DEAD notes for
13007 the remaining registers in place of NOTE. */
13009 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13010 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13011 > GET_MODE_SIZE (GET_MODE (x
))))
13013 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13014 unsigned int deadend
= END_HARD_REGNO (XEXP (note
, 0));
13015 unsigned int ourend
= END_HARD_REGNO (x
);
13018 for (i
= deadregno
; i
< deadend
; i
++)
13019 if (i
< regno
|| i
>= ourend
)
13020 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13023 /* If we didn't find any note, or if we found a REG_DEAD note that
13024 covers only part of the given reg, and we have a multi-reg hard
13025 register, then to be safe we must check for REG_DEAD notes
13026 for each register other than the first. They could have
13027 their own REG_DEAD notes lying around. */
13028 else if ((note
== 0
13030 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13031 < GET_MODE_SIZE (GET_MODE (x
)))))
13032 && regno
< FIRST_PSEUDO_REGISTER
13033 && hard_regno_nregs
[regno
][GET_MODE (x
)] > 1)
13035 unsigned int ourend
= END_HARD_REGNO (x
);
13036 unsigned int i
, offset
;
13040 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13044 for (i
= regno
+ offset
; i
< ourend
; i
++)
13045 move_deaths (regno_reg_rtx
[i
],
13046 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13049 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13051 XEXP (note
, 1) = *pnotes
;
13055 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13061 else if (GET_CODE (x
) == SET
)
13063 rtx dest
= SET_DEST (x
);
13065 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13067 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13068 that accesses one word of a multi-word item, some
13069 piece of everything register in the expression is used by
13070 this insn, so remove any old death. */
13071 /* ??? So why do we test for equality of the sizes? */
13073 if (GET_CODE (dest
) == ZERO_EXTRACT
13074 || GET_CODE (dest
) == STRICT_LOW_PART
13075 || (GET_CODE (dest
) == SUBREG
13076 && (((GET_MODE_SIZE (GET_MODE (dest
))
13077 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13078 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13079 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13081 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13085 /* If this is some other SUBREG, we know it replaces the entire
13086 value, so use that as the destination. */
13087 if (GET_CODE (dest
) == SUBREG
)
13088 dest
= SUBREG_REG (dest
);
13090 /* If this is a MEM, adjust deaths of anything used in the address.
13091 For a REG (the only other possibility), the entire value is
13092 being replaced so the old value is not used in this insn. */
13095 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13100 else if (GET_CODE (x
) == CLOBBER
)
13103 len
= GET_RTX_LENGTH (code
);
13104 fmt
= GET_RTX_FORMAT (code
);
13106 for (i
= 0; i
< len
; i
++)
13111 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13112 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13115 else if (fmt
[i
] == 'e')
13116 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13120 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13121 pattern of an insn. X must be a REG. */
13124 reg_bitfield_target_p (rtx x
, rtx body
)
13128 if (GET_CODE (body
) == SET
)
13130 rtx dest
= SET_DEST (body
);
13132 unsigned int regno
, tregno
, endregno
, endtregno
;
13134 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13135 target
= XEXP (dest
, 0);
13136 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13137 target
= SUBREG_REG (XEXP (dest
, 0));
13141 if (GET_CODE (target
) == SUBREG
)
13142 target
= SUBREG_REG (target
);
13144 if (!REG_P (target
))
13147 tregno
= REGNO (target
), regno
= REGNO (x
);
13148 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13149 return target
== x
;
13151 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13152 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13154 return endregno
> tregno
&& regno
< endtregno
;
13157 else if (GET_CODE (body
) == PARALLEL
)
13158 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13159 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13165 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13166 as appropriate. I3 and I2 are the insns resulting from the combination
13167 insns including FROM (I2 may be zero).
13169 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13170 not need REG_DEAD notes because they are being substituted for. This
13171 saves searching in the most common cases.
13173 Each note in the list is either ignored or placed on some insns, depending
13174 on the type of note. */
13177 distribute_notes (rtx notes
, rtx from_insn
, rtx i3
, rtx i2
, rtx elim_i2
,
13178 rtx elim_i1
, rtx elim_i0
)
13180 rtx note
, next_note
;
13183 for (note
= notes
; note
; note
= next_note
)
13185 rtx place
= 0, place2
= 0;
13187 next_note
= XEXP (note
, 1);
13188 switch (REG_NOTE_KIND (note
))
13192 /* Doesn't matter much where we put this, as long as it's somewhere.
13193 It is preferable to keep these notes on branches, which is most
13194 likely to be i3. */
13198 case REG_NON_LOCAL_GOTO
:
13203 gcc_assert (i2
&& JUMP_P (i2
));
13208 case REG_EH_REGION
:
13209 /* These notes must remain with the call or trapping instruction. */
13212 else if (i2
&& CALL_P (i2
))
13216 gcc_assert (cfun
->can_throw_non_call_exceptions
);
13217 if (may_trap_p (i3
))
13219 else if (i2
&& may_trap_p (i2
))
13221 /* ??? Otherwise assume we've combined things such that we
13222 can now prove that the instructions can't trap. Drop the
13223 note in this case. */
13229 /* These notes must remain with the call. It should not be
13230 possible for both I2 and I3 to be a call. */
13235 gcc_assert (i2
&& CALL_P (i2
));
13241 /* Any clobbers for i3 may still exist, and so we must process
13242 REG_UNUSED notes from that insn.
13244 Any clobbers from i2 or i1 can only exist if they were added by
13245 recog_for_combine. In that case, recog_for_combine created the
13246 necessary REG_UNUSED notes. Trying to keep any original
13247 REG_UNUSED notes from these insns can cause incorrect output
13248 if it is for the same register as the original i3 dest.
13249 In that case, we will notice that the register is set in i3,
13250 and then add a REG_UNUSED note for the destination of i3, which
13251 is wrong. However, it is possible to have REG_UNUSED notes from
13252 i2 or i1 for register which were both used and clobbered, so
13253 we keep notes from i2 or i1 if they will turn into REG_DEAD
13256 /* If this register is set or clobbered in I3, put the note there
13257 unless there is one already. */
13258 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
13260 if (from_insn
!= i3
)
13263 if (! (REG_P (XEXP (note
, 0))
13264 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
13265 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
13268 /* Otherwise, if this register is used by I3, then this register
13269 now dies here, so we must put a REG_DEAD note here unless there
13271 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
13272 && ! (REG_P (XEXP (note
, 0))
13273 ? find_regno_note (i3
, REG_DEAD
,
13274 REGNO (XEXP (note
, 0)))
13275 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
13277 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
13285 /* These notes say something about results of an insn. We can
13286 only support them if they used to be on I3 in which case they
13287 remain on I3. Otherwise they are ignored.
13289 If the note refers to an expression that is not a constant, we
13290 must also ignore the note since we cannot tell whether the
13291 equivalence is still true. It might be possible to do
13292 slightly better than this (we only have a problem if I2DEST
13293 or I1DEST is present in the expression), but it doesn't
13294 seem worth the trouble. */
13296 if (from_insn
== i3
13297 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
13302 /* These notes say something about how a register is used. They must
13303 be present on any use of the register in I2 or I3. */
13304 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
13307 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
13316 case REG_LABEL_TARGET
:
13317 case REG_LABEL_OPERAND
:
13318 /* This can show up in several ways -- either directly in the
13319 pattern, or hidden off in the constant pool with (or without?)
13320 a REG_EQUAL note. */
13321 /* ??? Ignore the without-reg_equal-note problem for now. */
13322 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
13323 || ((tem
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
13324 && GET_CODE (XEXP (tem
, 0)) == LABEL_REF
13325 && XEXP (XEXP (tem
, 0), 0) == XEXP (note
, 0)))
13329 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
13330 || ((tem
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
13331 && GET_CODE (XEXP (tem
, 0)) == LABEL_REF
13332 && XEXP (XEXP (tem
, 0), 0) == XEXP (note
, 0))))
13340 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13341 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13343 if (place
&& JUMP_P (place
)
13344 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13345 && (JUMP_LABEL (place
) == NULL
13346 || JUMP_LABEL (place
) == XEXP (note
, 0)))
13348 rtx label
= JUMP_LABEL (place
);
13351 JUMP_LABEL (place
) = XEXP (note
, 0);
13352 else if (LABEL_P (label
))
13353 LABEL_NUSES (label
)--;
13356 if (place2
&& JUMP_P (place2
)
13357 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13358 && (JUMP_LABEL (place2
) == NULL
13359 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
13361 rtx label
= JUMP_LABEL (place2
);
13364 JUMP_LABEL (place2
) = XEXP (note
, 0);
13365 else if (LABEL_P (label
))
13366 LABEL_NUSES (label
)--;
13372 /* This note says something about the value of a register prior
13373 to the execution of an insn. It is too much trouble to see
13374 if the note is still correct in all situations. It is better
13375 to simply delete it. */
13379 /* If we replaced the right hand side of FROM_INSN with a
13380 REG_EQUAL note, the original use of the dying register
13381 will not have been combined into I3 and I2. In such cases,
13382 FROM_INSN is guaranteed to be the first of the combined
13383 instructions, so we simply need to search back before
13384 FROM_INSN for the previous use or set of this register,
13385 then alter the notes there appropriately.
13387 If the register is used as an input in I3, it dies there.
13388 Similarly for I2, if it is nonzero and adjacent to I3.
13390 If the register is not used as an input in either I3 or I2
13391 and it is not one of the registers we were supposed to eliminate,
13392 there are two possibilities. We might have a non-adjacent I2
13393 or we might have somehow eliminated an additional register
13394 from a computation. For example, we might have had A & B where
13395 we discover that B will always be zero. In this case we will
13396 eliminate the reference to A.
13398 In both cases, we must search to see if we can find a previous
13399 use of A and put the death note there. */
13402 && from_insn
== i2mod
13403 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
13408 && CALL_P (from_insn
)
13409 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
13411 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
13413 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
13414 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
13416 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
13418 && reg_overlap_mentioned_p (XEXP (note
, 0),
13420 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
13421 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
13428 basic_block bb
= this_basic_block
;
13430 for (tem
= PREV_INSN (tem
); place
== 0; tem
= PREV_INSN (tem
))
13432 if (!NONDEBUG_INSN_P (tem
))
13434 if (tem
== BB_HEAD (bb
))
13439 /* If the register is being set at TEM, see if that is all
13440 TEM is doing. If so, delete TEM. Otherwise, make this
13441 into a REG_UNUSED note instead. Don't delete sets to
13442 global register vars. */
13443 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
13444 || !global_regs
[REGNO (XEXP (note
, 0))])
13445 && reg_set_p (XEXP (note
, 0), PATTERN (tem
)))
13447 rtx set
= single_set (tem
);
13448 rtx inner_dest
= 0;
13450 rtx cc0_setter
= NULL_RTX
;
13454 for (inner_dest
= SET_DEST (set
);
13455 (GET_CODE (inner_dest
) == STRICT_LOW_PART
13456 || GET_CODE (inner_dest
) == SUBREG
13457 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
13458 inner_dest
= XEXP (inner_dest
, 0))
13461 /* Verify that it was the set, and not a clobber that
13462 modified the register.
13464 CC0 targets must be careful to maintain setter/user
13465 pairs. If we cannot delete the setter due to side
13466 effects, mark the user with an UNUSED note instead
13469 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
13470 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
13472 && (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
13473 || ((cc0_setter
= prev_cc0_setter (tem
)) != NULL
13474 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))
13478 /* Move the notes and links of TEM elsewhere.
13479 This might delete other dead insns recursively.
13480 First set the pattern to something that won't use
13482 rtx old_notes
= REG_NOTES (tem
);
13484 PATTERN (tem
) = pc_rtx
;
13485 REG_NOTES (tem
) = NULL
;
13487 distribute_notes (old_notes
, tem
, tem
, NULL_RTX
,
13488 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13489 distribute_links (LOG_LINKS (tem
));
13491 SET_INSN_DELETED (tem
);
13496 /* Delete the setter too. */
13499 PATTERN (cc0_setter
) = pc_rtx
;
13500 old_notes
= REG_NOTES (cc0_setter
);
13501 REG_NOTES (cc0_setter
) = NULL
;
13503 distribute_notes (old_notes
, cc0_setter
,
13504 cc0_setter
, NULL_RTX
,
13505 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13506 distribute_links (LOG_LINKS (cc0_setter
));
13508 SET_INSN_DELETED (cc0_setter
);
13509 if (cc0_setter
== i2
)
13516 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
13518 /* If there isn't already a REG_UNUSED note, put one
13519 here. Do not place a REG_DEAD note, even if
13520 the register is also used here; that would not
13521 match the algorithm used in lifetime analysis
13522 and can cause the consistency check in the
13523 scheduler to fail. */
13524 if (! find_regno_note (tem
, REG_UNUSED
,
13525 REGNO (XEXP (note
, 0))))
13530 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem
))
13532 && find_reg_fusage (tem
, USE
, XEXP (note
, 0))))
13536 /* If we are doing a 3->2 combination, and we have a
13537 register which formerly died in i3 and was not used
13538 by i2, which now no longer dies in i3 and is used in
13539 i2 but does not die in i2, and place is between i2
13540 and i3, then we may need to move a link from place to
13542 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
13544 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
13545 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
13547 rtx links
= LOG_LINKS (place
);
13548 LOG_LINKS (place
) = 0;
13549 distribute_links (links
);
13554 if (tem
== BB_HEAD (bb
))
13560 /* If the register is set or already dead at PLACE, we needn't do
13561 anything with this note if it is still a REG_DEAD note.
13562 We check here if it is set at all, not if is it totally replaced,
13563 which is what `dead_or_set_p' checks, so also check for it being
13566 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
13568 unsigned int regno
= REGNO (XEXP (note
, 0));
13569 reg_stat_type
*rsp
= VEC_index (reg_stat_type
, reg_stat
, regno
);
13571 if (dead_or_set_p (place
, XEXP (note
, 0))
13572 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
13574 /* Unless the register previously died in PLACE, clear
13575 last_death. [I no longer understand why this is
13577 if (rsp
->last_death
!= place
)
13578 rsp
->last_death
= 0;
13582 rsp
->last_death
= place
;
13584 /* If this is a death note for a hard reg that is occupying
13585 multiple registers, ensure that we are still using all
13586 parts of the object. If we find a piece of the object
13587 that is unused, we must arrange for an appropriate REG_DEAD
13588 note to be added for it. However, we can't just emit a USE
13589 and tag the note to it, since the register might actually
13590 be dead; so we recourse, and the recursive call then finds
13591 the previous insn that used this register. */
13593 if (place
&& regno
< FIRST_PSEUDO_REGISTER
13594 && hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))] > 1)
13596 unsigned int endregno
= END_HARD_REGNO (XEXP (note
, 0));
13600 for (i
= regno
; i
< endregno
; i
++)
13601 if ((! refers_to_regno_p (i
, i
+ 1, PATTERN (place
), 0)
13602 && ! find_regno_fusage (place
, USE
, i
))
13603 || dead_or_set_regno_p (place
, i
))
13608 /* Put only REG_DEAD notes for pieces that are
13609 not already dead or set. */
13611 for (i
= regno
; i
< endregno
;
13612 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
13614 rtx piece
= regno_reg_rtx
[i
];
13615 basic_block bb
= this_basic_block
;
13617 if (! dead_or_set_p (place
, piece
)
13618 && ! reg_bitfield_target_p (piece
,
13621 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
13624 distribute_notes (new_note
, place
, place
,
13625 NULL_RTX
, NULL_RTX
, NULL_RTX
,
13628 else if (! refers_to_regno_p (i
, i
+ 1,
13629 PATTERN (place
), 0)
13630 && ! find_regno_fusage (place
, USE
, i
))
13631 for (tem
= PREV_INSN (place
); ;
13632 tem
= PREV_INSN (tem
))
13634 if (!NONDEBUG_INSN_P (tem
))
13636 if (tem
== BB_HEAD (bb
))
13640 if (dead_or_set_p (tem
, piece
)
13641 || reg_bitfield_target_p (piece
,
13644 add_reg_note (tem
, REG_UNUSED
, piece
);
13658 /* Any other notes should not be present at this point in the
13660 gcc_unreachable ();
13665 XEXP (note
, 1) = REG_NOTES (place
);
13666 REG_NOTES (place
) = note
;
13670 add_reg_note (place2
, REG_NOTE_KIND (note
), XEXP (note
, 0));
13674 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13675 I3, I2, and I1 to new locations. This is also called to add a link
13676 pointing at I3 when I3's destination is changed. */
13679 distribute_links (rtx links
)
13681 rtx link
, next_link
;
13683 for (link
= links
; link
; link
= next_link
)
13689 next_link
= XEXP (link
, 1);
13691 /* If the insn that this link points to is a NOTE or isn't a single
13692 set, ignore it. In the latter case, it isn't clear what we
13693 can do other than ignore the link, since we can't tell which
13694 register it was for. Such links wouldn't be used by combine
13697 It is not possible for the destination of the target of the link to
13698 have been changed by combine. The only potential of this is if we
13699 replace I3, I2, and I1 by I3 and I2. But in that case the
13700 destination of I2 also remains unchanged. */
13702 if (NOTE_P (XEXP (link
, 0))
13703 || (set
= single_set (XEXP (link
, 0))) == 0)
13706 reg
= SET_DEST (set
);
13707 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == ZERO_EXTRACT
13708 || GET_CODE (reg
) == STRICT_LOW_PART
)
13709 reg
= XEXP (reg
, 0);
13711 /* A LOG_LINK is defined as being placed on the first insn that uses
13712 a register and points to the insn that sets the register. Start
13713 searching at the next insn after the target of the link and stop
13714 when we reach a set of the register or the end of the basic block.
13716 Note that this correctly handles the link that used to point from
13717 I3 to I2. Also note that not much searching is typically done here
13718 since most links don't point very far away. */
13720 for (insn
= NEXT_INSN (XEXP (link
, 0));
13721 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR
13722 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
13723 insn
= NEXT_INSN (insn
))
13724 if (DEBUG_INSN_P (insn
))
13726 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
13728 if (reg_referenced_p (reg
, PATTERN (insn
)))
13732 else if (CALL_P (insn
)
13733 && find_reg_fusage (insn
, USE
, reg
))
13738 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
13741 /* If we found a place to put the link, place it there unless there
13742 is already a link to the same insn as LINK at that point. */
13748 for (link2
= LOG_LINKS (place
); link2
; link2
= XEXP (link2
, 1))
13749 if (XEXP (link2
, 0) == XEXP (link
, 0))
13754 XEXP (link
, 1) = LOG_LINKS (place
);
13755 LOG_LINKS (place
) = link
;
13757 /* Set added_links_insn to the earliest insn we added a
13759 if (added_links_insn
== 0
13760 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
13761 added_links_insn
= place
;
13767 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13768 Check whether the expression pointer to by LOC is a register or
13769 memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13770 Otherwise return zero. */
13773 unmentioned_reg_p_1 (rtx
*loc
, void *expr
)
13778 && (REG_P (x
) || MEM_P (x
))
13779 && ! reg_mentioned_p (x
, (rtx
) expr
))
13784 /* Check for any register or memory mentioned in EQUIV that is not
13785 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13786 of EXPR where some registers may have been replaced by constants. */
13789 unmentioned_reg_p (rtx equiv
, rtx expr
)
13791 return for_each_rtx (&equiv
, unmentioned_reg_p_1
, expr
);
13795 dump_combine_stats (FILE *file
)
13799 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13800 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
13804 dump_combine_total_stats (FILE *file
)
13808 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13809 total_attempts
, total_merges
, total_extras
, total_successes
);
13813 gate_handle_combine (void)
13815 return (optimize
> 0);
13818 /* Try combining insns through substitution. */
13819 static unsigned int
13820 rest_of_handle_combine (void)
13822 int rebuild_jump_labels_after_combine
;
13824 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
13825 df_note_add_problem ();
13828 regstat_init_n_sets_and_refs ();
13830 rebuild_jump_labels_after_combine
13831 = combine_instructions (get_insns (), max_reg_num ());
13833 /* Combining insns may have turned an indirect jump into a
13834 direct jump. Rebuild the JUMP_LABEL fields of jumping
13836 if (rebuild_jump_labels_after_combine
)
13838 timevar_push (TV_JUMP
);
13839 rebuild_jump_labels (get_insns ());
13841 timevar_pop (TV_JUMP
);
13844 regstat_free_n_sets_and_refs ();
13848 struct rtl_opt_pass pass_combine
=
13852 "combine", /* name */
13853 gate_handle_combine
, /* gate */
13854 rest_of_handle_combine
, /* execute */
13857 0, /* static_pass_number */
13858 TV_COMBINE
, /* tv_id */
13859 PROP_cfglayout
, /* properties_required */
13860 0, /* properties_provided */
13861 0, /* properties_destroyed */
13862 0, /* todo_flags_start */
13864 TODO_df_finish
| TODO_verify_rtl_sharing
|
13865 TODO_ggc_collect
, /* todo_flags_finish */