1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
80 #include "coretypes.h"
86 #include "double-int.h"
93 #include "stor-layout.h"
97 #include "hard-reg-set.h"
100 #include "dominance.h"
103 #include "cfgcleanup.h"
104 #include "basic-block.h"
105 #include "insn-config.h"
106 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
108 #include "statistics.h"
110 #include "fixed-value.h"
115 #include "emit-rtl.h"
119 #include "insn-attr.h"
121 #include "diagnostic-core.h"
123 #include "insn-codes.h"
125 #include "rtlhooks-def.h"
127 #include "tree-pass.h"
129 #include "valtrack.h"
130 #include "hash-map.h"
132 #include "plugin-api.h"
136 #include "rtl-iter.h"
138 /* Number of attempts to combine instructions in this function. */
140 static int combine_attempts
;
142 /* Number of attempts that got as far as substitution in this function. */
144 static int combine_merges
;
146 /* Number of instructions combined with added SETs in this function. */
148 static int combine_extras
;
150 /* Number of instructions combined in this function. */
152 static int combine_successes
;
154 /* Totals over entire compilation. */
156 static int total_attempts
, total_merges
, total_extras
, total_successes
;
158 /* combine_instructions may try to replace the right hand side of the
159 second instruction with the value of an associated REG_EQUAL note
160 before throwing it at try_combine. That is problematic when there
161 is a REG_DEAD note for a register used in the old right hand side
162 and can cause distribute_notes to do wrong things. This is the
163 second instruction if it has been so modified, null otherwise. */
165 static rtx_insn
*i2mod
;
167 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
169 static rtx i2mod_old_rhs
;
171 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
173 static rtx i2mod_new_rhs
;
175 typedef struct reg_stat_struct
{
176 /* Record last point of death of (hard or pseudo) register n. */
177 rtx_insn
*last_death
;
179 /* Record last point of modification of (hard or pseudo) register n. */
182 /* The next group of fields allows the recording of the last value assigned
183 to (hard or pseudo) register n. We use this information to see if an
184 operation being processed is redundant given a prior operation performed
185 on the register. For example, an `and' with a constant is redundant if
186 all the zero bits are already known to be turned off.
188 We use an approach similar to that used by cse, but change it in the
191 (1) We do not want to reinitialize at each label.
192 (2) It is useful, but not critical, to know the actual value assigned
193 to a register. Often just its form is helpful.
195 Therefore, we maintain the following fields:
197 last_set_value the last value assigned
198 last_set_label records the value of label_tick when the
199 register was assigned
200 last_set_table_tick records the value of label_tick when a
201 value using the register is assigned
202 last_set_invalid set to nonzero when it is not valid
203 to use the value of this register in some
206 To understand the usage of these tables, it is important to understand
207 the distinction between the value in last_set_value being valid and
208 the register being validly contained in some other expression in the
211 (The next two parameters are out of date).
213 reg_stat[i].last_set_value is valid if it is nonzero, and either
214 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
216 Register I may validly appear in any expression returned for the value
217 of another register if reg_n_sets[i] is 1. It may also appear in the
218 value for register J if reg_stat[j].last_set_invalid is zero, or
219 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
221 If an expression is found in the table containing a register which may
222 not validly appear in an expression, the register is replaced by
223 something that won't match, (clobber (const_int 0)). */
225 /* Record last value assigned to (hard or pseudo) register n. */
229 /* Record the value of label_tick when an expression involving register n
230 is placed in last_set_value. */
232 int last_set_table_tick
;
234 /* Record the value of label_tick when the value for register n is placed in
239 /* These fields are maintained in parallel with last_set_value and are
240 used to store the mode in which the register was last set, the bits
241 that were known to be zero when it was last set, and the number of
242 sign bits copies it was known to have when it was last set. */
244 unsigned HOST_WIDE_INT last_set_nonzero_bits
;
245 char last_set_sign_bit_copies
;
246 ENUM_BITFIELD(machine_mode
) last_set_mode
: 8;
248 /* Set nonzero if references to register n in expressions should not be
249 used. last_set_invalid is set nonzero when this register is being
250 assigned to and last_set_table_tick == label_tick. */
252 char last_set_invalid
;
254 /* Some registers that are set more than once and used in more than one
255 basic block are nevertheless always set in similar ways. For example,
256 a QImode register may be loaded from memory in two places on a machine
257 where byte loads zero extend.
259 We record in the following fields if a register has some leading bits
260 that are always equal to the sign bit, and what we know about the
261 nonzero bits of a register, specifically which bits are known to be
264 If an entry is zero, it means that we don't know anything special. */
266 unsigned char sign_bit_copies
;
268 unsigned HOST_WIDE_INT nonzero_bits
;
270 /* Record the value of the label_tick when the last truncation
271 happened. The field truncated_to_mode is only valid if
272 truncation_label == label_tick. */
274 int truncation_label
;
276 /* Record the last truncation seen for this register. If truncation
277 is not a nop to this mode we might be able to save an explicit
278 truncation if we know that value already contains a truncated
281 ENUM_BITFIELD(machine_mode
) truncated_to_mode
: 8;
285 static vec
<reg_stat_type
> reg_stat
;
287 /* One plus the highest pseudo for which we track REG_N_SETS.
288 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
289 but during combine_split_insns new pseudos can be created. As we don't have
290 updated DF information in that case, it is hard to initialize the array
291 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
292 so instead of growing the arrays, just assume all newly created pseudos
293 during combine might be set multiple times. */
295 static unsigned int reg_n_sets_max
;
297 /* Record the luid of the last insn that invalidated memory
298 (anything that writes memory, and subroutine calls, but not pushes). */
300 static int mem_last_set
;
302 /* Record the luid of the last CALL_INSN
303 so we can tell whether a potential combination crosses any calls. */
305 static int last_call_luid
;
307 /* When `subst' is called, this is the insn that is being modified
308 (by combining in a previous insn). The PATTERN of this insn
309 is still the old pattern partially modified and it should not be
310 looked at, but this may be used to examine the successors of the insn
311 to judge whether a simplification is valid. */
313 static rtx_insn
*subst_insn
;
315 /* This is the lowest LUID that `subst' is currently dealing with.
316 get_last_value will not return a value if the register was set at or
317 after this LUID. If not for this mechanism, we could get confused if
318 I2 or I1 in try_combine were an insn that used the old value of a register
319 to obtain a new value. In that case, we might erroneously get the
320 new value of the register when we wanted the old one. */
322 static int subst_low_luid
;
324 /* This contains any hard registers that are used in newpat; reg_dead_at_p
325 must consider all these registers to be always live. */
327 static HARD_REG_SET newpat_used_regs
;
329 /* This is an insn to which a LOG_LINKS entry has been added. If this
330 insn is the earlier than I2 or I3, combine should rescan starting at
333 static rtx_insn
*added_links_insn
;
335 /* Basic block in which we are performing combines. */
336 static basic_block this_basic_block
;
337 static bool optimize_this_for_speed_p
;
340 /* Length of the currently allocated uid_insn_cost array. */
342 static int max_uid_known
;
344 /* The following array records the insn_rtx_cost for every insn
345 in the instruction stream. */
347 static int *uid_insn_cost
;
349 /* The following array records the LOG_LINKS for every insn in the
350 instruction stream as struct insn_link pointers. */
355 struct insn_link
*next
;
358 static struct insn_link
**uid_log_links
;
360 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
361 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
363 #define FOR_EACH_LOG_LINK(L, INSN) \
364 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
366 /* Links for LOG_LINKS are allocated from this obstack. */
368 static struct obstack insn_link_obstack
;
370 /* Allocate a link. */
372 static inline struct insn_link
*
373 alloc_insn_link (rtx_insn
*insn
, unsigned int regno
, struct insn_link
*next
)
376 = (struct insn_link
*) obstack_alloc (&insn_link_obstack
,
377 sizeof (struct insn_link
));
384 /* Incremented for each basic block. */
386 static int label_tick
;
388 /* Reset to label_tick for each extended basic block in scanning order. */
390 static int label_tick_ebb_start
;
392 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
393 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
395 static machine_mode nonzero_bits_mode
;
397 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
398 be safely used. It is zero while computing them and after combine has
399 completed. This former test prevents propagating values based on
400 previously set values, which can be incorrect if a variable is modified
403 static int nonzero_sign_valid
;
406 /* Record one modification to rtl structure
407 to be undone by storing old_contents into *where. */
409 enum undo_kind
{ UNDO_RTX
, UNDO_INT
, UNDO_MODE
, UNDO_LINKS
};
415 union { rtx r
; int i
; machine_mode m
; struct insn_link
*l
; } old_contents
;
416 union { rtx
*r
; int *i
; struct insn_link
**l
; } where
;
419 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
420 num_undo says how many are currently recorded.
422 other_insn is nonzero if we have modified some other insn in the process
423 of working on subst_insn. It must be verified too. */
429 rtx_insn
*other_insn
;
432 static struct undobuf undobuf
;
434 /* Number of times the pseudo being substituted for
435 was found and replaced. */
437 static int n_occurrences
;
439 static rtx
reg_nonzero_bits_for_combine (const_rtx
, machine_mode
, const_rtx
,
441 unsigned HOST_WIDE_INT
,
442 unsigned HOST_WIDE_INT
*);
443 static rtx
reg_num_sign_bit_copies_for_combine (const_rtx
, machine_mode
, const_rtx
,
445 unsigned int, unsigned int *);
446 static void do_SUBST (rtx
*, rtx
);
447 static void do_SUBST_INT (int *, int);
448 static void init_reg_last (void);
449 static void setup_incoming_promotions (rtx_insn
*);
450 static void set_nonzero_bits_and_sign_copies (rtx
, const_rtx
, void *);
451 static int cant_combine_insn_p (rtx_insn
*);
452 static int can_combine_p (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
453 rtx_insn
*, rtx_insn
*, rtx
*, rtx
*);
454 static int combinable_i3pat (rtx_insn
*, rtx
*, rtx
, rtx
, rtx
, int, int, rtx
*);
455 static int contains_muldiv (rtx
);
456 static rtx_insn
*try_combine (rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx_insn
*,
458 static void undo_all (void);
459 static void undo_commit (void);
460 static rtx
*find_split_point (rtx
*, rtx_insn
*, bool);
461 static rtx
subst (rtx
, rtx
, rtx
, int, int, int);
462 static rtx
combine_simplify_rtx (rtx
, machine_mode
, int, int);
463 static rtx
simplify_if_then_else (rtx
);
464 static rtx
simplify_set (rtx
);
465 static rtx
simplify_logical (rtx
);
466 static rtx
expand_compound_operation (rtx
);
467 static const_rtx
expand_field_assignment (const_rtx
);
468 static rtx
make_extraction (machine_mode
, rtx
, HOST_WIDE_INT
,
469 rtx
, unsigned HOST_WIDE_INT
, int, int, int);
470 static rtx
extract_left_shift (rtx
, int);
471 static int get_pos_from_mask (unsigned HOST_WIDE_INT
,
472 unsigned HOST_WIDE_INT
*);
473 static rtx
canon_reg_for_combine (rtx
, rtx
);
474 static rtx
force_to_mode (rtx
, machine_mode
,
475 unsigned HOST_WIDE_INT
, int);
476 static rtx
if_then_else_cond (rtx
, rtx
*, rtx
*);
477 static rtx
known_cond (rtx
, enum rtx_code
, rtx
, rtx
);
478 static int rtx_equal_for_field_assignment_p (rtx
, rtx
, bool = false);
479 static rtx
make_field_assignment (rtx
);
480 static rtx
apply_distributive_law (rtx
);
481 static rtx
distribute_and_simplify_rtx (rtx
, int);
482 static rtx
simplify_and_const_int_1 (machine_mode
, rtx
,
483 unsigned HOST_WIDE_INT
);
484 static rtx
simplify_and_const_int (rtx
, machine_mode
, rtx
,
485 unsigned HOST_WIDE_INT
);
486 static int merge_outer_ops (enum rtx_code
*, HOST_WIDE_INT
*, enum rtx_code
,
487 HOST_WIDE_INT
, machine_mode
, int *);
488 static rtx
simplify_shift_const_1 (enum rtx_code
, machine_mode
, rtx
, int);
489 static rtx
simplify_shift_const (rtx
, enum rtx_code
, machine_mode
, rtx
,
491 static int recog_for_combine (rtx
*, rtx_insn
*, rtx
*);
492 static rtx
gen_lowpart_for_combine (machine_mode
, rtx
);
493 static enum rtx_code
simplify_compare_const (enum rtx_code
, machine_mode
,
495 static enum rtx_code
simplify_comparison (enum rtx_code
, rtx
*, rtx
*);
496 static void update_table_tick (rtx
);
497 static void record_value_for_reg (rtx
, rtx_insn
*, rtx
);
498 static void check_promoted_subreg (rtx_insn
*, rtx
);
499 static void record_dead_and_set_regs_1 (rtx
, const_rtx
, void *);
500 static void record_dead_and_set_regs (rtx_insn
*);
501 static int get_last_value_validate (rtx
*, rtx_insn
*, int, int);
502 static rtx
get_last_value (const_rtx
);
503 static int use_crosses_set_p (const_rtx
, int);
504 static void reg_dead_at_p_1 (rtx
, const_rtx
, void *);
505 static int reg_dead_at_p (rtx
, rtx_insn
*);
506 static void move_deaths (rtx
, rtx
, int, rtx_insn
*, rtx
*);
507 static int reg_bitfield_target_p (rtx
, rtx
);
508 static void distribute_notes (rtx
, rtx_insn
*, rtx_insn
*, rtx_insn
*, rtx
, rtx
, rtx
);
509 static void distribute_links (struct insn_link
*);
510 static void mark_used_regs_combine (rtx
);
511 static void record_promoted_value (rtx_insn
*, rtx
);
512 static bool unmentioned_reg_p (rtx
, rtx
);
513 static void record_truncated_values (rtx
*, void *);
514 static bool reg_truncated_to_mode (machine_mode
, const_rtx
);
515 static rtx
gen_lowpart_or_truncate (machine_mode
, rtx
);
518 /* It is not safe to use ordinary gen_lowpart in combine.
519 See comments in gen_lowpart_for_combine. */
520 #undef RTL_HOOKS_GEN_LOWPART
521 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
523 /* Our implementation of gen_lowpart never emits a new pseudo. */
524 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
525 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
527 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
528 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
530 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
531 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
533 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
534 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
536 static const struct rtl_hooks combine_rtl_hooks
= RTL_HOOKS_INITIALIZER
;
539 /* Convenience wrapper for the canonicalize_comparison target hook.
540 Target hooks cannot use enum rtx_code. */
542 target_canonicalize_comparison (enum rtx_code
*code
, rtx
*op0
, rtx
*op1
,
543 bool op0_preserve_value
)
545 int code_int
= (int)*code
;
546 targetm
.canonicalize_comparison (&code_int
, op0
, op1
, op0_preserve_value
);
547 *code
= (enum rtx_code
)code_int
;
550 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
551 PATTERN can not be split. Otherwise, it returns an insn sequence.
552 This is a wrapper around split_insns which ensures that the
553 reg_stat vector is made larger if the splitter creates a new
557 combine_split_insns (rtx pattern
, rtx insn
)
562 ret
= safe_as_a
<rtx_insn
*> (split_insns (pattern
, insn
));
563 nregs
= max_reg_num ();
564 if (nregs
> reg_stat
.length ())
565 reg_stat
.safe_grow_cleared (nregs
);
569 /* This is used by find_single_use to locate an rtx in LOC that
570 contains exactly one use of DEST, which is typically either a REG
571 or CC0. It returns a pointer to the innermost rtx expression
572 containing DEST. Appearances of DEST that are being used to
573 totally replace it are not counted. */
576 find_single_use_1 (rtx dest
, rtx
*loc
)
579 enum rtx_code code
= GET_CODE (x
);
595 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
596 of a REG that occupies all of the REG, the insn uses DEST if
597 it is mentioned in the destination or the source. Otherwise, we
598 need just check the source. */
599 if (GET_CODE (SET_DEST (x
)) != CC0
600 && GET_CODE (SET_DEST (x
)) != PC
601 && !REG_P (SET_DEST (x
))
602 && ! (GET_CODE (SET_DEST (x
)) == SUBREG
603 && REG_P (SUBREG_REG (SET_DEST (x
)))
604 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
605 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
606 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
607 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))))
610 return find_single_use_1 (dest
, &SET_SRC (x
));
614 return find_single_use_1 (dest
, &XEXP (x
, 0));
620 /* If it wasn't one of the common cases above, check each expression and
621 vector of this code. Look for a unique usage of DEST. */
623 fmt
= GET_RTX_FORMAT (code
);
624 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
628 if (dest
== XEXP (x
, i
)
629 || (REG_P (dest
) && REG_P (XEXP (x
, i
))
630 && REGNO (dest
) == REGNO (XEXP (x
, i
))))
633 this_result
= find_single_use_1 (dest
, &XEXP (x
, i
));
636 result
= this_result
;
637 else if (this_result
)
638 /* Duplicate usage. */
641 else if (fmt
[i
] == 'E')
645 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
647 if (XVECEXP (x
, i
, j
) == dest
649 && REG_P (XVECEXP (x
, i
, j
))
650 && REGNO (XVECEXP (x
, i
, j
)) == REGNO (dest
)))
653 this_result
= find_single_use_1 (dest
, &XVECEXP (x
, i
, j
));
656 result
= this_result
;
657 else if (this_result
)
667 /* See if DEST, produced in INSN, is used only a single time in the
668 sequel. If so, return a pointer to the innermost rtx expression in which
671 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
673 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
674 care about REG_DEAD notes or LOG_LINKS.
676 Otherwise, we find the single use by finding an insn that has a
677 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
678 only referenced once in that insn, we know that it must be the first
679 and last insn referencing DEST. */
682 find_single_use (rtx dest
, rtx_insn
*insn
, rtx_insn
**ploc
)
687 struct insn_link
*link
;
691 next
= NEXT_INSN (insn
);
693 || (!NONJUMP_INSN_P (next
) && !JUMP_P (next
)))
696 result
= find_single_use_1 (dest
, &PATTERN (next
));
705 bb
= BLOCK_FOR_INSN (insn
);
706 for (next
= NEXT_INSN (insn
);
707 next
&& BLOCK_FOR_INSN (next
) == bb
;
708 next
= NEXT_INSN (next
))
709 if (INSN_P (next
) && dead_or_set_p (next
, dest
))
711 FOR_EACH_LOG_LINK (link
, next
)
712 if (link
->insn
== insn
&& link
->regno
== REGNO (dest
))
717 result
= find_single_use_1 (dest
, &PATTERN (next
));
727 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
728 insn. The substitution can be undone by undo_all. If INTO is already
729 set to NEWVAL, do not record this change. Because computing NEWVAL might
730 also call SUBST, we have to compute it before we put anything into
734 do_SUBST (rtx
*into
, rtx newval
)
739 if (oldval
== newval
)
742 /* We'd like to catch as many invalid transformations here as
743 possible. Unfortunately, there are way too many mode changes
744 that are perfectly valid, so we'd waste too much effort for
745 little gain doing the checks here. Focus on catching invalid
746 transformations involving integer constants. */
747 if (GET_MODE_CLASS (GET_MODE (oldval
)) == MODE_INT
748 && CONST_INT_P (newval
))
750 /* Sanity check that we're replacing oldval with a CONST_INT
751 that is a valid sign-extension for the original mode. */
752 gcc_assert (INTVAL (newval
)
753 == trunc_int_for_mode (INTVAL (newval
), GET_MODE (oldval
)));
755 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
756 CONST_INT is not valid, because after the replacement, the
757 original mode would be gone. Unfortunately, we can't tell
758 when do_SUBST is called to replace the operand thereof, so we
759 perform this test on oldval instead, checking whether an
760 invalid replacement took place before we got here. */
761 gcc_assert (!(GET_CODE (oldval
) == SUBREG
762 && CONST_INT_P (SUBREG_REG (oldval
))));
763 gcc_assert (!(GET_CODE (oldval
) == ZERO_EXTEND
764 && CONST_INT_P (XEXP (oldval
, 0))));
768 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
770 buf
= XNEW (struct undo
);
772 buf
->kind
= UNDO_RTX
;
774 buf
->old_contents
.r
= oldval
;
777 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
780 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
782 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
783 for the value of a HOST_WIDE_INT value (including CONST_INT) is
787 do_SUBST_INT (int *into
, int newval
)
792 if (oldval
== newval
)
796 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
798 buf
= XNEW (struct undo
);
800 buf
->kind
= UNDO_INT
;
802 buf
->old_contents
.i
= oldval
;
805 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
808 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
810 /* Similar to SUBST, but just substitute the mode. This is used when
811 changing the mode of a pseudo-register, so that any other
812 references to the entry in the regno_reg_rtx array will change as
816 do_SUBST_MODE (rtx
*into
, machine_mode newval
)
819 machine_mode oldval
= GET_MODE (*into
);
821 if (oldval
== newval
)
825 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
827 buf
= XNEW (struct undo
);
829 buf
->kind
= UNDO_MODE
;
831 buf
->old_contents
.m
= oldval
;
832 adjust_reg_mode (*into
, newval
);
834 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
837 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
840 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
843 do_SUBST_LINK (struct insn_link
**into
, struct insn_link
*newval
)
846 struct insn_link
* oldval
= *into
;
848 if (oldval
== newval
)
852 buf
= undobuf
.frees
, undobuf
.frees
= buf
->next
;
854 buf
= XNEW (struct undo
);
856 buf
->kind
= UNDO_LINKS
;
858 buf
->old_contents
.l
= oldval
;
861 buf
->next
= undobuf
.undos
, undobuf
.undos
= buf
;
864 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
867 /* Subroutine of try_combine. Determine whether the replacement patterns
868 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
869 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
870 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
871 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
872 of all the instructions can be estimated and the replacements are more
873 expensive than the original sequence. */
876 combine_validate_cost (rtx_insn
*i0
, rtx_insn
*i1
, rtx_insn
*i2
, rtx_insn
*i3
,
877 rtx newpat
, rtx newi2pat
, rtx newotherpat
)
879 int i0_cost
, i1_cost
, i2_cost
, i3_cost
;
880 int new_i2_cost
, new_i3_cost
;
881 int old_cost
, new_cost
;
883 /* Lookup the original insn_rtx_costs. */
884 i2_cost
= INSN_COST (i2
);
885 i3_cost
= INSN_COST (i3
);
889 i1_cost
= INSN_COST (i1
);
892 i0_cost
= INSN_COST (i0
);
893 old_cost
= (i0_cost
> 0 && i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
894 ? i0_cost
+ i1_cost
+ i2_cost
+ i3_cost
: 0);
898 old_cost
= (i1_cost
> 0 && i2_cost
> 0 && i3_cost
> 0
899 ? i1_cost
+ i2_cost
+ i3_cost
: 0);
905 old_cost
= (i2_cost
> 0 && i3_cost
> 0) ? i2_cost
+ i3_cost
: 0;
906 i1_cost
= i0_cost
= 0;
909 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
911 if (old_cost
&& i1
&& INSN_UID (i1
) == INSN_UID (i2
))
915 /* Calculate the replacement insn_rtx_costs. */
916 new_i3_cost
= insn_rtx_cost (newpat
, optimize_this_for_speed_p
);
919 new_i2_cost
= insn_rtx_cost (newi2pat
, optimize_this_for_speed_p
);
920 new_cost
= (new_i2_cost
> 0 && new_i3_cost
> 0)
921 ? new_i2_cost
+ new_i3_cost
: 0;
925 new_cost
= new_i3_cost
;
929 if (undobuf
.other_insn
)
931 int old_other_cost
, new_other_cost
;
933 old_other_cost
= INSN_COST (undobuf
.other_insn
);
934 new_other_cost
= insn_rtx_cost (newotherpat
, optimize_this_for_speed_p
);
935 if (old_other_cost
> 0 && new_other_cost
> 0)
937 old_cost
+= old_other_cost
;
938 new_cost
+= new_other_cost
;
944 /* Disallow this combination if both new_cost and old_cost are greater than
945 zero, and new_cost is greater than old cost. */
946 int reject
= old_cost
> 0 && new_cost
> old_cost
;
950 fprintf (dump_file
, "%s combination of insns ",
951 reject
? "rejecting" : "allowing");
953 fprintf (dump_file
, "%d, ", INSN_UID (i0
));
954 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
955 fprintf (dump_file
, "%d, ", INSN_UID (i1
));
956 fprintf (dump_file
, "%d and %d\n", INSN_UID (i2
), INSN_UID (i3
));
958 fprintf (dump_file
, "original costs ");
960 fprintf (dump_file
, "%d + ", i0_cost
);
961 if (i1
&& INSN_UID (i1
) != INSN_UID (i2
))
962 fprintf (dump_file
, "%d + ", i1_cost
);
963 fprintf (dump_file
, "%d + %d = %d\n", i2_cost
, i3_cost
, old_cost
);
966 fprintf (dump_file
, "replacement costs %d + %d = %d\n",
967 new_i2_cost
, new_i3_cost
, new_cost
);
969 fprintf (dump_file
, "replacement cost %d\n", new_cost
);
975 /* Update the uid_insn_cost array with the replacement costs. */
976 INSN_COST (i2
) = new_i2_cost
;
977 INSN_COST (i3
) = new_i3_cost
;
989 /* Delete any insns that copy a register to itself. */
992 delete_noop_moves (void)
994 rtx_insn
*insn
, *next
;
997 FOR_EACH_BB_FN (bb
, cfun
)
999 for (insn
= BB_HEAD (bb
); insn
!= NEXT_INSN (BB_END (bb
)); insn
= next
)
1001 next
= NEXT_INSN (insn
);
1002 if (INSN_P (insn
) && noop_move_p (insn
))
1005 fprintf (dump_file
, "deleting noop move %d\n", INSN_UID (insn
));
1007 delete_insn_and_edges (insn
);
1014 /* Return false if we do not want to (or cannot) combine DEF. */
1016 can_combine_def_p (df_ref def
)
1018 /* Do not consider if it is pre/post modification in MEM. */
1019 if (DF_REF_FLAGS (def
) & DF_REF_PRE_POST_MODIFY
)
1022 unsigned int regno
= DF_REF_REGNO (def
);
1024 /* Do not combine frame pointer adjustments. */
1025 if ((regno
== FRAME_POINTER_REGNUM
1026 && (!reload_completed
|| frame_pointer_needed
))
1027 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1028 || (regno
== HARD_FRAME_POINTER_REGNUM
1029 && (!reload_completed
|| frame_pointer_needed
))
1031 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1032 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
1040 /* Return false if we do not want to (or cannot) combine USE. */
1042 can_combine_use_p (df_ref use
)
1044 /* Do not consider the usage of the stack pointer by function call. */
1045 if (DF_REF_FLAGS (use
) & DF_REF_CALL_STACK_USAGE
)
1051 /* Fill in log links field for all insns. */
1054 create_log_links (void)
1057 rtx_insn
**next_use
;
1061 next_use
= XCNEWVEC (rtx_insn
*, max_reg_num ());
1063 /* Pass through each block from the end, recording the uses of each
1064 register and establishing log links when def is encountered.
1065 Note that we do not clear next_use array in order to save time,
1066 so we have to test whether the use is in the same basic block as def.
1068 There are a few cases below when we do not consider the definition or
1069 usage -- these are taken from original flow.c did. Don't ask me why it is
1070 done this way; I don't know and if it works, I don't want to know. */
1072 FOR_EACH_BB_FN (bb
, cfun
)
1074 FOR_BB_INSNS_REVERSE (bb
, insn
)
1076 if (!NONDEBUG_INSN_P (insn
))
1079 /* Log links are created only once. */
1080 gcc_assert (!LOG_LINKS (insn
));
1082 FOR_EACH_INSN_DEF (def
, insn
)
1084 unsigned int regno
= DF_REF_REGNO (def
);
1087 if (!next_use
[regno
])
1090 if (!can_combine_def_p (def
))
1093 use_insn
= next_use
[regno
];
1094 next_use
[regno
] = NULL
;
1096 if (BLOCK_FOR_INSN (use_insn
) != bb
)
1101 We don't build a LOG_LINK for hard registers contained
1102 in ASM_OPERANDs. If these registers get replaced,
1103 we might wind up changing the semantics of the insn,
1104 even if reload can make what appear to be valid
1105 assignments later. */
1106 if (regno
< FIRST_PSEUDO_REGISTER
1107 && asm_noperands (PATTERN (use_insn
)) >= 0)
1110 /* Don't add duplicate links between instructions. */
1111 struct insn_link
*links
;
1112 FOR_EACH_LOG_LINK (links
, use_insn
)
1113 if (insn
== links
->insn
&& regno
== links
->regno
)
1117 LOG_LINKS (use_insn
)
1118 = alloc_insn_link (insn
, regno
, LOG_LINKS (use_insn
));
1121 FOR_EACH_INSN_USE (use
, insn
)
1122 if (can_combine_use_p (use
))
1123 next_use
[DF_REF_REGNO (use
)] = insn
;
1130 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1131 true if we found a LOG_LINK that proves that A feeds B. This only works
1132 if there are no instructions between A and B which could have a link
1133 depending on A, since in that case we would not record a link for B.
1134 We also check the implicit dependency created by a cc0 setter/user
1138 insn_a_feeds_b (rtx_insn
*a
, rtx_insn
*b
)
1140 struct insn_link
*links
;
1141 FOR_EACH_LOG_LINK (links
, b
)
1142 if (links
->insn
== a
)
1144 if (HAVE_cc0
&& sets_cc0_p (a
))
1149 /* Main entry point for combiner. F is the first insn of the function.
1150 NREGS is the first unused pseudo-reg number.
1152 Return nonzero if the combiner has turned an indirect jump
1153 instruction into a direct jump. */
1155 combine_instructions (rtx_insn
*f
, unsigned int nregs
)
1157 rtx_insn
*insn
, *next
;
1161 struct insn_link
*links
, *nextlinks
;
1163 basic_block last_bb
;
1165 int new_direct_jump_p
= 0;
1167 for (first
= f
; first
&& !INSN_P (first
); )
1168 first
= NEXT_INSN (first
);
1172 combine_attempts
= 0;
1175 combine_successes
= 0;
1177 rtl_hooks
= combine_rtl_hooks
;
1179 reg_stat
.safe_grow_cleared (nregs
);
1181 init_recog_no_volatile ();
1183 /* Allocate array for insn info. */
1184 max_uid_known
= get_max_uid ();
1185 uid_log_links
= XCNEWVEC (struct insn_link
*, max_uid_known
+ 1);
1186 uid_insn_cost
= XCNEWVEC (int, max_uid_known
+ 1);
1187 gcc_obstack_init (&insn_link_obstack
);
1189 nonzero_bits_mode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
1191 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1192 problems when, for example, we have j <<= 1 in a loop. */
1194 nonzero_sign_valid
= 0;
1195 label_tick
= label_tick_ebb_start
= 1;
1197 /* Scan all SETs and see if we can deduce anything about what
1198 bits are known to be zero for some registers and how many copies
1199 of the sign bit are known to exist for those registers.
1201 Also set any known values so that we can use it while searching
1202 for what bits are known to be set. */
1204 setup_incoming_promotions (first
);
1205 /* Allow the entry block and the first block to fall into the same EBB.
1206 Conceptually the incoming promotions are assigned to the entry block. */
1207 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1209 create_log_links ();
1210 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1212 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1217 if (!single_pred_p (this_basic_block
)
1218 || single_pred (this_basic_block
) != last_bb
)
1219 label_tick_ebb_start
= label_tick
;
1220 last_bb
= this_basic_block
;
1222 FOR_BB_INSNS (this_basic_block
, insn
)
1223 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
))
1229 subst_low_luid
= DF_INSN_LUID (insn
);
1232 note_stores (PATTERN (insn
), set_nonzero_bits_and_sign_copies
,
1234 record_dead_and_set_regs (insn
);
1237 for (links
= REG_NOTES (insn
); links
; links
= XEXP (links
, 1))
1238 if (REG_NOTE_KIND (links
) == REG_INC
)
1239 set_nonzero_bits_and_sign_copies (XEXP (links
, 0), NULL_RTX
,
1243 /* Record the current insn_rtx_cost of this instruction. */
1244 if (NONJUMP_INSN_P (insn
))
1245 INSN_COST (insn
) = insn_rtx_cost (PATTERN (insn
),
1246 optimize_this_for_speed_p
);
1248 fprintf (dump_file
, "insn_cost %d: %d\n",
1249 INSN_UID (insn
), INSN_COST (insn
));
1253 nonzero_sign_valid
= 1;
1255 /* Now scan all the insns in forward order. */
1256 label_tick
= label_tick_ebb_start
= 1;
1258 setup_incoming_promotions (first
);
1259 last_bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
1260 int max_combine
= PARAM_VALUE (PARAM_MAX_COMBINE_INSNS
);
1262 FOR_EACH_BB_FN (this_basic_block
, cfun
)
1264 rtx_insn
*last_combined_insn
= NULL
;
1265 optimize_this_for_speed_p
= optimize_bb_for_speed_p (this_basic_block
);
1270 if (!single_pred_p (this_basic_block
)
1271 || single_pred (this_basic_block
) != last_bb
)
1272 label_tick_ebb_start
= label_tick
;
1273 last_bb
= this_basic_block
;
1275 rtl_profile_for_bb (this_basic_block
);
1276 for (insn
= BB_HEAD (this_basic_block
);
1277 insn
!= NEXT_INSN (BB_END (this_basic_block
));
1278 insn
= next
? next
: NEXT_INSN (insn
))
1281 if (!NONDEBUG_INSN_P (insn
))
1284 while (last_combined_insn
1285 && last_combined_insn
->deleted ())
1286 last_combined_insn
= PREV_INSN (last_combined_insn
);
1287 if (last_combined_insn
== NULL_RTX
1288 || BARRIER_P (last_combined_insn
)
1289 || BLOCK_FOR_INSN (last_combined_insn
) != this_basic_block
1290 || DF_INSN_LUID (last_combined_insn
) <= DF_INSN_LUID (insn
))
1291 last_combined_insn
= insn
;
1293 /* See if we know about function return values before this
1294 insn based upon SUBREG flags. */
1295 check_promoted_subreg (insn
, PATTERN (insn
));
1297 /* See if we can find hardregs and subreg of pseudos in
1298 narrower modes. This could help turning TRUNCATEs
1300 note_uses (&PATTERN (insn
), record_truncated_values
, NULL
);
1302 /* Try this insn with each insn it links back to. */
1304 FOR_EACH_LOG_LINK (links
, insn
)
1305 if ((next
= try_combine (insn
, links
->insn
, NULL
,
1306 NULL
, &new_direct_jump_p
,
1307 last_combined_insn
)) != 0)
1309 statistics_counter_event (cfun
, "two-insn combine", 1);
1313 /* Try each sequence of three linked insns ending with this one. */
1315 if (max_combine
>= 3)
1316 FOR_EACH_LOG_LINK (links
, insn
)
1318 rtx_insn
*link
= links
->insn
;
1320 /* If the linked insn has been replaced by a note, then there
1321 is no point in pursuing this chain any further. */
1325 FOR_EACH_LOG_LINK (nextlinks
, link
)
1326 if ((next
= try_combine (insn
, link
, nextlinks
->insn
,
1327 NULL
, &new_direct_jump_p
,
1328 last_combined_insn
)) != 0)
1330 statistics_counter_event (cfun
, "three-insn combine", 1);
1336 /* Try to combine a jump insn that uses CC0
1337 with a preceding insn that sets CC0, and maybe with its
1338 logical predecessor as well.
1339 This is how we make decrement-and-branch insns.
1340 We need this special code because data flow connections
1341 via CC0 do not get entered in LOG_LINKS. */
1344 && (prev
= prev_nonnote_insn (insn
)) != 0
1345 && NONJUMP_INSN_P (prev
)
1346 && sets_cc0_p (PATTERN (prev
)))
1348 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1350 last_combined_insn
)) != 0)
1353 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1354 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1355 NULL
, &new_direct_jump_p
,
1356 last_combined_insn
)) != 0)
1360 /* Do the same for an insn that explicitly references CC0. */
1361 if (NONJUMP_INSN_P (insn
)
1362 && (prev
= prev_nonnote_insn (insn
)) != 0
1363 && NONJUMP_INSN_P (prev
)
1364 && sets_cc0_p (PATTERN (prev
))
1365 && GET_CODE (PATTERN (insn
)) == SET
1366 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (insn
))))
1368 if ((next
= try_combine (insn
, prev
, NULL
, NULL
,
1370 last_combined_insn
)) != 0)
1373 FOR_EACH_LOG_LINK (nextlinks
, prev
)
1374 if ((next
= try_combine (insn
, prev
, nextlinks
->insn
,
1375 NULL
, &new_direct_jump_p
,
1376 last_combined_insn
)) != 0)
1380 /* Finally, see if any of the insns that this insn links to
1381 explicitly references CC0. If so, try this insn, that insn,
1382 and its predecessor if it sets CC0. */
1383 FOR_EACH_LOG_LINK (links
, insn
)
1384 if (NONJUMP_INSN_P (links
->insn
)
1385 && GET_CODE (PATTERN (links
->insn
)) == SET
1386 && reg_mentioned_p (cc0_rtx
, SET_SRC (PATTERN (links
->insn
)))
1387 && (prev
= prev_nonnote_insn (links
->insn
)) != 0
1388 && NONJUMP_INSN_P (prev
)
1389 && sets_cc0_p (PATTERN (prev
))
1390 && (next
= try_combine (insn
, links
->insn
,
1391 prev
, NULL
, &new_direct_jump_p
,
1392 last_combined_insn
)) != 0)
1396 /* Try combining an insn with two different insns whose results it
1398 if (max_combine
>= 3)
1399 FOR_EACH_LOG_LINK (links
, insn
)
1400 for (nextlinks
= links
->next
; nextlinks
;
1401 nextlinks
= nextlinks
->next
)
1402 if ((next
= try_combine (insn
, links
->insn
,
1403 nextlinks
->insn
, NULL
,
1405 last_combined_insn
)) != 0)
1408 statistics_counter_event (cfun
, "three-insn combine", 1);
1412 /* Try four-instruction combinations. */
1413 if (max_combine
>= 4)
1414 FOR_EACH_LOG_LINK (links
, insn
)
1416 struct insn_link
*next1
;
1417 rtx_insn
*link
= links
->insn
;
1419 /* If the linked insn has been replaced by a note, then there
1420 is no point in pursuing this chain any further. */
1424 FOR_EACH_LOG_LINK (next1
, link
)
1426 rtx_insn
*link1
= next1
->insn
;
1429 /* I0 -> I1 -> I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1431 if ((next
= try_combine (insn
, link
, link1
,
1434 last_combined_insn
)) != 0)
1436 statistics_counter_event (cfun
, "four-insn combine", 1);
1439 /* I0, I1 -> I2, I2 -> I3. */
1440 for (nextlinks
= next1
->next
; nextlinks
;
1441 nextlinks
= nextlinks
->next
)
1442 if ((next
= try_combine (insn
, link
, link1
,
1445 last_combined_insn
)) != 0)
1447 statistics_counter_event (cfun
, "four-insn combine", 1);
1452 for (next1
= links
->next
; next1
; next1
= next1
->next
)
1454 rtx_insn
*link1
= next1
->insn
;
1457 /* I0 -> I2; I1, I2 -> I3. */
1458 FOR_EACH_LOG_LINK (nextlinks
, link
)
1459 if ((next
= try_combine (insn
, link
, link1
,
1462 last_combined_insn
)) != 0)
1464 statistics_counter_event (cfun
, "four-insn combine", 1);
1467 /* I0 -> I1; I1, I2 -> I3. */
1468 FOR_EACH_LOG_LINK (nextlinks
, link1
)
1469 if ((next
= try_combine (insn
, link
, link1
,
1472 last_combined_insn
)) != 0)
1474 statistics_counter_event (cfun
, "four-insn combine", 1);
1480 /* Try this insn with each REG_EQUAL note it links back to. */
1481 FOR_EACH_LOG_LINK (links
, insn
)
1484 rtx_insn
*temp
= links
->insn
;
1485 if ((set
= single_set (temp
)) != 0
1486 && (note
= find_reg_equal_equiv_note (temp
)) != 0
1487 && (note
= XEXP (note
, 0), GET_CODE (note
)) != EXPR_LIST
1488 /* Avoid using a register that may already been marked
1489 dead by an earlier instruction. */
1490 && ! unmentioned_reg_p (note
, SET_SRC (set
))
1491 && (GET_MODE (note
) == VOIDmode
1492 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set
)))
1493 : GET_MODE (SET_DEST (set
)) == GET_MODE (note
)))
1495 /* Temporarily replace the set's source with the
1496 contents of the REG_EQUAL note. The insn will
1497 be deleted or recognized by try_combine. */
1498 rtx orig
= SET_SRC (set
);
1499 SET_SRC (set
) = note
;
1501 i2mod_old_rhs
= copy_rtx (orig
);
1502 i2mod_new_rhs
= copy_rtx (note
);
1503 next
= try_combine (insn
, i2mod
, NULL
, NULL
,
1505 last_combined_insn
);
1509 statistics_counter_event (cfun
, "insn-with-note combine", 1);
1512 SET_SRC (set
) = orig
;
1517 record_dead_and_set_regs (insn
);
1524 default_rtl_profile ();
1526 new_direct_jump_p
|= purge_all_dead_edges ();
1527 delete_noop_moves ();
1530 obstack_free (&insn_link_obstack
, NULL
);
1531 free (uid_log_links
);
1532 free (uid_insn_cost
);
1533 reg_stat
.release ();
1536 struct undo
*undo
, *next
;
1537 for (undo
= undobuf
.frees
; undo
; undo
= next
)
1545 total_attempts
+= combine_attempts
;
1546 total_merges
+= combine_merges
;
1547 total_extras
+= combine_extras
;
1548 total_successes
+= combine_successes
;
1550 nonzero_sign_valid
= 0;
1551 rtl_hooks
= general_rtl_hooks
;
1553 /* Make recognizer allow volatile MEMs again. */
1556 return new_direct_jump_p
;
1559 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1562 init_reg_last (void)
1567 FOR_EACH_VEC_ELT (reg_stat
, i
, p
)
1568 memset (p
, 0, offsetof (reg_stat_type
, sign_bit_copies
));
1571 /* Set up any promoted values for incoming argument registers. */
1574 setup_incoming_promotions (rtx_insn
*first
)
1577 bool strictly_local
= false;
1579 for (arg
= DECL_ARGUMENTS (current_function_decl
); arg
;
1580 arg
= DECL_CHAIN (arg
))
1582 rtx x
, reg
= DECL_INCOMING_RTL (arg
);
1584 machine_mode mode1
, mode2
, mode3
, mode4
;
1586 /* Only continue if the incoming argument is in a register. */
1590 /* Determine, if possible, whether all call sites of the current
1591 function lie within the current compilation unit. (This does
1592 take into account the exporting of a function via taking its
1593 address, and so forth.) */
1594 strictly_local
= cgraph_node::local_info (current_function_decl
)->local
;
1596 /* The mode and signedness of the argument before any promotions happen
1597 (equal to the mode of the pseudo holding it at that stage). */
1598 mode1
= TYPE_MODE (TREE_TYPE (arg
));
1599 uns1
= TYPE_UNSIGNED (TREE_TYPE (arg
));
1601 /* The mode and signedness of the argument after any source language and
1602 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1603 mode2
= TYPE_MODE (DECL_ARG_TYPE (arg
));
1604 uns3
= TYPE_UNSIGNED (DECL_ARG_TYPE (arg
));
1606 /* The mode and signedness of the argument as it is actually passed,
1607 see assign_parm_setup_reg in function.c. */
1608 mode3
= promote_function_mode (TREE_TYPE (arg
), mode1
, &uns3
,
1609 TREE_TYPE (cfun
->decl
), 0);
1611 /* The mode of the register in which the argument is being passed. */
1612 mode4
= GET_MODE (reg
);
1614 /* Eliminate sign extensions in the callee when:
1615 (a) A mode promotion has occurred; */
1618 /* (b) The mode of the register is the same as the mode of
1619 the argument as it is passed; */
1622 /* (c) There's no language level extension; */
1625 /* (c.1) All callers are from the current compilation unit. If that's
1626 the case we don't have to rely on an ABI, we only have to know
1627 what we're generating right now, and we know that we will do the
1628 mode1 to mode2 promotion with the given sign. */
1629 else if (!strictly_local
)
1631 /* (c.2) The combination of the two promotions is useful. This is
1632 true when the signs match, or if the first promotion is unsigned.
1633 In the later case, (sign_extend (zero_extend x)) is the same as
1634 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1640 /* Record that the value was promoted from mode1 to mode3,
1641 so that any sign extension at the head of the current
1642 function may be eliminated. */
1643 x
= gen_rtx_CLOBBER (mode1
, const0_rtx
);
1644 x
= gen_rtx_fmt_e ((uns3
? ZERO_EXTEND
: SIGN_EXTEND
), mode3
, x
);
1645 record_value_for_reg (reg
, first
, x
);
1649 /* Called via note_stores. If X is a pseudo that is narrower than
1650 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1652 If we are setting only a portion of X and we can't figure out what
1653 portion, assume all bits will be used since we don't know what will
1656 Similarly, set how many bits of X are known to be copies of the sign bit
1657 at all locations in the function. This is the smallest number implied
1661 set_nonzero_bits_and_sign_copies (rtx x
, const_rtx set
, void *data
)
1663 rtx_insn
*insn
= (rtx_insn
*) data
;
1667 && REGNO (x
) >= FIRST_PSEUDO_REGISTER
1668 /* If this register is undefined at the start of the file, we can't
1669 say what its contents were. */
1670 && ! REGNO_REG_SET_P
1671 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), REGNO (x
))
1672 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
1674 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
1676 if (set
== 0 || GET_CODE (set
) == CLOBBER
)
1678 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1679 rsp
->sign_bit_copies
= 1;
1683 /* If this register is being initialized using itself, and the
1684 register is uninitialized in this basic block, and there are
1685 no LOG_LINKS which set the register, then part of the
1686 register is uninitialized. In that case we can't assume
1687 anything about the number of nonzero bits.
1689 ??? We could do better if we checked this in
1690 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1691 could avoid making assumptions about the insn which initially
1692 sets the register, while still using the information in other
1693 insns. We would have to be careful to check every insn
1694 involved in the combination. */
1697 && reg_referenced_p (x
, PATTERN (insn
))
1698 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn
)),
1701 struct insn_link
*link
;
1703 FOR_EACH_LOG_LINK (link
, insn
)
1704 if (dead_or_set_p (link
->insn
, x
))
1708 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1709 rsp
->sign_bit_copies
= 1;
1714 /* If this is a complex assignment, see if we can convert it into a
1715 simple assignment. */
1716 set
= expand_field_assignment (set
);
1718 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1719 set what we know about X. */
1721 if (SET_DEST (set
) == x
1722 || (paradoxical_subreg_p (SET_DEST (set
))
1723 && SUBREG_REG (SET_DEST (set
)) == x
))
1725 rtx src
= SET_SRC (set
);
1727 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1728 /* If X is narrower than a word and SRC is a non-negative
1729 constant that would appear negative in the mode of X,
1730 sign-extend it for use in reg_stat[].nonzero_bits because some
1731 machines (maybe most) will actually do the sign-extension
1732 and this is the conservative approach.
1734 ??? For 2.5, try to tighten up the MD files in this regard
1735 instead of this kludge. */
1737 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
1738 && CONST_INT_P (src
)
1740 && val_signbit_known_set_p (GET_MODE (x
), INTVAL (src
)))
1741 src
= GEN_INT (INTVAL (src
) | ~GET_MODE_MASK (GET_MODE (x
)));
1744 /* Don't call nonzero_bits if it cannot change anything. */
1745 if (rsp
->nonzero_bits
!= ~(unsigned HOST_WIDE_INT
) 0)
1746 rsp
->nonzero_bits
|= nonzero_bits (src
, nonzero_bits_mode
);
1747 num
= num_sign_bit_copies (SET_SRC (set
), GET_MODE (x
));
1748 if (rsp
->sign_bit_copies
== 0
1749 || rsp
->sign_bit_copies
> num
)
1750 rsp
->sign_bit_copies
= num
;
1754 rsp
->nonzero_bits
= GET_MODE_MASK (GET_MODE (x
));
1755 rsp
->sign_bit_copies
= 1;
1760 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1761 optionally insns that were previously combined into I3 or that will be
1762 combined into the merger of INSN and I3. The order is PRED, PRED2,
1763 INSN, SUCC, SUCC2, I3.
1765 Return 0 if the combination is not allowed for any reason.
1767 If the combination is allowed, *PDEST will be set to the single
1768 destination of INSN and *PSRC to the single source, and this function
1772 can_combine_p (rtx_insn
*insn
, rtx_insn
*i3
, rtx_insn
*pred ATTRIBUTE_UNUSED
,
1773 rtx_insn
*pred2 ATTRIBUTE_UNUSED
, rtx_insn
*succ
, rtx_insn
*succ2
,
1774 rtx
*pdest
, rtx
*psrc
)
1783 bool all_adjacent
= true;
1784 int (*is_volatile_p
) (const_rtx
);
1790 if (next_active_insn (succ2
) != i3
)
1791 all_adjacent
= false;
1792 if (next_active_insn (succ
) != succ2
)
1793 all_adjacent
= false;
1795 else if (next_active_insn (succ
) != i3
)
1796 all_adjacent
= false;
1797 if (next_active_insn (insn
) != succ
)
1798 all_adjacent
= false;
1800 else if (next_active_insn (insn
) != i3
)
1801 all_adjacent
= false;
1803 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1804 or a PARALLEL consisting of such a SET and CLOBBERs.
1806 If INSN has CLOBBER parallel parts, ignore them for our processing.
1807 By definition, these happen during the execution of the insn. When it
1808 is merged with another insn, all bets are off. If they are, in fact,
1809 needed and aren't also supplied in I3, they may be added by
1810 recog_for_combine. Otherwise, it won't match.
1812 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1815 Get the source and destination of INSN. If more than one, can't
1818 if (GET_CODE (PATTERN (insn
)) == SET
)
1819 set
= PATTERN (insn
);
1820 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
1821 && GET_CODE (XVECEXP (PATTERN (insn
), 0, 0)) == SET
)
1823 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1825 rtx elt
= XVECEXP (PATTERN (insn
), 0, i
);
1827 switch (GET_CODE (elt
))
1829 /* This is important to combine floating point insns
1830 for the SH4 port. */
1832 /* Combining an isolated USE doesn't make sense.
1833 We depend here on combinable_i3pat to reject them. */
1834 /* The code below this loop only verifies that the inputs of
1835 the SET in INSN do not change. We call reg_set_between_p
1836 to verify that the REG in the USE does not change between
1838 If the USE in INSN was for a pseudo register, the matching
1839 insn pattern will likely match any register; combining this
1840 with any other USE would only be safe if we knew that the
1841 used registers have identical values, or if there was
1842 something to tell them apart, e.g. different modes. For
1843 now, we forgo such complicated tests and simply disallow
1844 combining of USES of pseudo registers with any other USE. */
1845 if (REG_P (XEXP (elt
, 0))
1846 && GET_CODE (PATTERN (i3
)) == PARALLEL
)
1848 rtx i3pat
= PATTERN (i3
);
1849 int i
= XVECLEN (i3pat
, 0) - 1;
1850 unsigned int regno
= REGNO (XEXP (elt
, 0));
1854 rtx i3elt
= XVECEXP (i3pat
, 0, i
);
1856 if (GET_CODE (i3elt
) == USE
1857 && REG_P (XEXP (i3elt
, 0))
1858 && (REGNO (XEXP (i3elt
, 0)) == regno
1859 ? reg_set_between_p (XEXP (elt
, 0),
1860 PREV_INSN (insn
), i3
)
1861 : regno
>= FIRST_PSEUDO_REGISTER
))
1868 /* We can ignore CLOBBERs. */
1873 /* Ignore SETs whose result isn't used but not those that
1874 have side-effects. */
1875 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (elt
))
1876 && insn_nothrow_p (insn
)
1877 && !side_effects_p (elt
))
1880 /* If we have already found a SET, this is a second one and
1881 so we cannot combine with this insn. */
1889 /* Anything else means we can't combine. */
1895 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1896 so don't do anything with it. */
1897 || GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1906 /* The simplification in expand_field_assignment may call back to
1907 get_last_value, so set safe guard here. */
1908 subst_low_luid
= DF_INSN_LUID (insn
);
1910 set
= expand_field_assignment (set
);
1911 src
= SET_SRC (set
), dest
= SET_DEST (set
);
1913 /* Do not eliminate user-specified register if it is in an
1914 asm input because we may break the register asm usage defined
1915 in GCC manual if allow to do so.
1916 Be aware that this may cover more cases than we expect but this
1917 should be harmless. */
1918 if (REG_P (dest
) && REG_USERVAR_P (dest
) && HARD_REGISTER_P (dest
)
1919 && extract_asm_operands (PATTERN (i3
)))
1922 /* Don't eliminate a store in the stack pointer. */
1923 if (dest
== stack_pointer_rtx
1924 /* Don't combine with an insn that sets a register to itself if it has
1925 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1926 || (rtx_equal_p (src
, dest
) && find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1927 /* Can't merge an ASM_OPERANDS. */
1928 || GET_CODE (src
) == ASM_OPERANDS
1929 /* Can't merge a function call. */
1930 || GET_CODE (src
) == CALL
1931 /* Don't eliminate a function call argument. */
1933 && (find_reg_fusage (i3
, USE
, dest
)
1935 && REGNO (dest
) < FIRST_PSEUDO_REGISTER
1936 && global_regs
[REGNO (dest
)])))
1937 /* Don't substitute into an incremented register. */
1938 || FIND_REG_INC_NOTE (i3
, dest
)
1939 || (succ
&& FIND_REG_INC_NOTE (succ
, dest
))
1940 || (succ2
&& FIND_REG_INC_NOTE (succ2
, dest
))
1941 /* Don't substitute into a non-local goto, this confuses CFG. */
1942 || (JUMP_P (i3
) && find_reg_note (i3
, REG_NON_LOCAL_GOTO
, NULL_RTX
))
1943 /* Make sure that DEST is not used after SUCC but before I3. */
1946 && (reg_used_between_p (dest
, succ2
, i3
)
1947 || reg_used_between_p (dest
, succ
, succ2
)))
1948 || (!succ2
&& succ
&& reg_used_between_p (dest
, succ
, i3
))))
1949 /* Make sure that the value that is to be substituted for the register
1950 does not use any registers whose values alter in between. However,
1951 If the insns are adjacent, a use can't cross a set even though we
1952 think it might (this can happen for a sequence of insns each setting
1953 the same destination; last_set of that register might point to
1954 a NOTE). If INSN has a REG_EQUIV note, the register is always
1955 equivalent to the memory so the substitution is valid even if there
1956 are intervening stores. Also, don't move a volatile asm or
1957 UNSPEC_VOLATILE across any other insns. */
1960 || ! find_reg_note (insn
, REG_EQUIV
, src
))
1961 && use_crosses_set_p (src
, DF_INSN_LUID (insn
)))
1962 || (GET_CODE (src
) == ASM_OPERANDS
&& MEM_VOLATILE_P (src
))
1963 || GET_CODE (src
) == UNSPEC_VOLATILE
))
1964 /* Don't combine across a CALL_INSN, because that would possibly
1965 change whether the life span of some REGs crosses calls or not,
1966 and it is a pain to update that information.
1967 Exception: if source is a constant, moving it later can't hurt.
1968 Accept that as a special case. */
1969 || (DF_INSN_LUID (insn
) < last_call_luid
&& ! CONSTANT_P (src
)))
1972 /* DEST must either be a REG or CC0. */
1975 /* If register alignment is being enforced for multi-word items in all
1976 cases except for parameters, it is possible to have a register copy
1977 insn referencing a hard register that is not allowed to contain the
1978 mode being copied and which would not be valid as an operand of most
1979 insns. Eliminate this problem by not combining with such an insn.
1981 Also, on some machines we don't want to extend the life of a hard
1985 && ((REGNO (dest
) < FIRST_PSEUDO_REGISTER
1986 && ! HARD_REGNO_MODE_OK (REGNO (dest
), GET_MODE (dest
)))
1987 /* Don't extend the life of a hard register unless it is
1988 user variable (if we have few registers) or it can't
1989 fit into the desired register (meaning something special
1991 Also avoid substituting a return register into I3, because
1992 reload can't handle a conflict with constraints of other
1994 || (REGNO (src
) < FIRST_PSEUDO_REGISTER
1995 && ! HARD_REGNO_MODE_OK (REGNO (src
), GET_MODE (src
)))))
1998 else if (GET_CODE (dest
) != CC0
)
2002 if (GET_CODE (PATTERN (i3
)) == PARALLEL
)
2003 for (i
= XVECLEN (PATTERN (i3
), 0) - 1; i
>= 0; i
--)
2004 if (GET_CODE (XVECEXP (PATTERN (i3
), 0, i
)) == CLOBBER
)
2006 rtx reg
= XEXP (XVECEXP (PATTERN (i3
), 0, i
), 0);
2008 /* If the clobber represents an earlyclobber operand, we must not
2009 substitute an expression containing the clobbered register.
2010 As we do not analyze the constraint strings here, we have to
2011 make the conservative assumption. However, if the register is
2012 a fixed hard reg, the clobber cannot represent any operand;
2013 we leave it up to the machine description to either accept or
2014 reject use-and-clobber patterns. */
2016 || REGNO (reg
) >= FIRST_PSEUDO_REGISTER
2017 || !fixed_regs
[REGNO (reg
)])
2018 if (reg_overlap_mentioned_p (reg
, src
))
2022 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2023 or not), reject, unless nothing volatile comes between it and I3 */
2025 if (GET_CODE (src
) == ASM_OPERANDS
|| volatile_refs_p (src
))
2027 /* Make sure neither succ nor succ2 contains a volatile reference. */
2028 if (succ2
!= 0 && volatile_refs_p (PATTERN (succ2
)))
2030 if (succ
!= 0 && volatile_refs_p (PATTERN (succ
)))
2032 /* We'll check insns between INSN and I3 below. */
2035 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2036 to be an explicit register variable, and was chosen for a reason. */
2038 if (GET_CODE (src
) == ASM_OPERANDS
2039 && REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
)
2042 /* If INSN contains volatile references (specifically volatile MEMs),
2043 we cannot combine across any other volatile references.
2044 Even if INSN doesn't contain volatile references, any intervening
2045 volatile insn might affect machine state. */
2047 is_volatile_p
= volatile_refs_p (PATTERN (insn
))
2051 for (p
= NEXT_INSN (insn
); p
!= i3
; p
= NEXT_INSN (p
))
2052 if (INSN_P (p
) && p
!= succ
&& p
!= succ2
&& is_volatile_p (PATTERN (p
)))
2055 /* If INSN contains an autoincrement or autodecrement, make sure that
2056 register is not used between there and I3, and not already used in
2057 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2058 Also insist that I3 not be a jump; if it were one
2059 and the incremented register were spilled, we would lose. */
2062 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2063 if (REG_NOTE_KIND (link
) == REG_INC
2065 || reg_used_between_p (XEXP (link
, 0), insn
, i3
)
2066 || (pred
!= NULL_RTX
2067 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred
)))
2068 || (pred2
!= NULL_RTX
2069 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (pred2
)))
2070 || (succ
!= NULL_RTX
2071 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ
)))
2072 || (succ2
!= NULL_RTX
2073 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (succ2
)))
2074 || reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i3
))))
2078 /* Don't combine an insn that follows a CC0-setting insn.
2079 An insn that uses CC0 must not be separated from the one that sets it.
2080 We do, however, allow I2 to follow a CC0-setting insn if that insn
2081 is passed as I1; in that case it will be deleted also.
2082 We also allow combining in this case if all the insns are adjacent
2083 because that would leave the two CC0 insns adjacent as well.
2084 It would be more logical to test whether CC0 occurs inside I1 or I2,
2085 but that would be much slower, and this ought to be equivalent. */
2089 p
= prev_nonnote_insn (insn
);
2090 if (p
&& p
!= pred
&& NONJUMP_INSN_P (p
) && sets_cc0_p (PATTERN (p
))
2095 /* If we get here, we have passed all the tests and the combination is
2104 /* LOC is the location within I3 that contains its pattern or the component
2105 of a PARALLEL of the pattern. We validate that it is valid for combining.
2107 One problem is if I3 modifies its output, as opposed to replacing it
2108 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2109 doing so would produce an insn that is not equivalent to the original insns.
2113 (set (reg:DI 101) (reg:DI 100))
2114 (set (subreg:SI (reg:DI 101) 0) <foo>)
2116 This is NOT equivalent to:
2118 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2119 (set (reg:DI 101) (reg:DI 100))])
2121 Not only does this modify 100 (in which case it might still be valid
2122 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2124 We can also run into a problem if I2 sets a register that I1
2125 uses and I1 gets directly substituted into I3 (not via I2). In that
2126 case, we would be getting the wrong value of I2DEST into I3, so we
2127 must reject the combination. This case occurs when I2 and I1 both
2128 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2129 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2130 of a SET must prevent combination from occurring. The same situation
2131 can occur for I0, in which case I0_NOT_IN_SRC is set.
2133 Before doing the above check, we first try to expand a field assignment
2134 into a set of logical operations.
2136 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2137 we place a register that is both set and used within I3. If more than one
2138 such register is detected, we fail.
2140 Return 1 if the combination is valid, zero otherwise. */
2143 combinable_i3pat (rtx_insn
*i3
, rtx
*loc
, rtx i2dest
, rtx i1dest
, rtx i0dest
,
2144 int i1_not_in_src
, int i0_not_in_src
, rtx
*pi3dest_killed
)
2148 if (GET_CODE (x
) == SET
)
2151 rtx dest
= SET_DEST (set
);
2152 rtx src
= SET_SRC (set
);
2153 rtx inner_dest
= dest
;
2156 while (GET_CODE (inner_dest
) == STRICT_LOW_PART
2157 || GET_CODE (inner_dest
) == SUBREG
2158 || GET_CODE (inner_dest
) == ZERO_EXTRACT
)
2159 inner_dest
= XEXP (inner_dest
, 0);
2161 /* Check for the case where I3 modifies its output, as discussed
2162 above. We don't want to prevent pseudos from being combined
2163 into the address of a MEM, so only prevent the combination if
2164 i1 or i2 set the same MEM. */
2165 if ((inner_dest
!= dest
&&
2166 (!MEM_P (inner_dest
)
2167 || rtx_equal_p (i2dest
, inner_dest
)
2168 || (i1dest
&& rtx_equal_p (i1dest
, inner_dest
))
2169 || (i0dest
&& rtx_equal_p (i0dest
, inner_dest
)))
2170 && (reg_overlap_mentioned_p (i2dest
, inner_dest
)
2171 || (i1dest
&& reg_overlap_mentioned_p (i1dest
, inner_dest
))
2172 || (i0dest
&& reg_overlap_mentioned_p (i0dest
, inner_dest
))))
2174 /* This is the same test done in can_combine_p except we can't test
2175 all_adjacent; we don't have to, since this instruction will stay
2176 in place, thus we are not considering increasing the lifetime of
2179 Also, if this insn sets a function argument, combining it with
2180 something that might need a spill could clobber a previous
2181 function argument; the all_adjacent test in can_combine_p also
2182 checks this; here, we do a more specific test for this case. */
2184 || (REG_P (inner_dest
)
2185 && REGNO (inner_dest
) < FIRST_PSEUDO_REGISTER
2186 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest
),
2187 GET_MODE (inner_dest
))))
2188 || (i1_not_in_src
&& reg_overlap_mentioned_p (i1dest
, src
))
2189 || (i0_not_in_src
&& reg_overlap_mentioned_p (i0dest
, src
)))
2192 /* If DEST is used in I3, it is being killed in this insn, so
2193 record that for later. We have to consider paradoxical
2194 subregs here, since they kill the whole register, but we
2195 ignore partial subregs, STRICT_LOW_PART, etc.
2196 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2197 STACK_POINTER_REGNUM, since these are always considered to be
2198 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2200 if (GET_CODE (subdest
) == SUBREG
2201 && (GET_MODE_SIZE (GET_MODE (subdest
))
2202 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest
)))))
2203 subdest
= SUBREG_REG (subdest
);
2206 && reg_referenced_p (subdest
, PATTERN (i3
))
2207 && REGNO (subdest
) != FRAME_POINTER_REGNUM
2208 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2209 && REGNO (subdest
) != HARD_FRAME_POINTER_REGNUM
2211 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2212 && (REGNO (subdest
) != ARG_POINTER_REGNUM
2213 || ! fixed_regs
[REGNO (subdest
)])
2215 && REGNO (subdest
) != STACK_POINTER_REGNUM
)
2217 if (*pi3dest_killed
)
2220 *pi3dest_killed
= subdest
;
2224 else if (GET_CODE (x
) == PARALLEL
)
2228 for (i
= 0; i
< XVECLEN (x
, 0); i
++)
2229 if (! combinable_i3pat (i3
, &XVECEXP (x
, 0, i
), i2dest
, i1dest
, i0dest
,
2230 i1_not_in_src
, i0_not_in_src
, pi3dest_killed
))
2237 /* Return 1 if X is an arithmetic expression that contains a multiplication
2238 and division. We don't count multiplications by powers of two here. */
2241 contains_muldiv (rtx x
)
2243 switch (GET_CODE (x
))
2245 case MOD
: case DIV
: case UMOD
: case UDIV
:
2249 return ! (CONST_INT_P (XEXP (x
, 1))
2250 && exact_log2 (UINTVAL (XEXP (x
, 1))) >= 0);
2253 return contains_muldiv (XEXP (x
, 0))
2254 || contains_muldiv (XEXP (x
, 1));
2257 return contains_muldiv (XEXP (x
, 0));
2263 /* Determine whether INSN can be used in a combination. Return nonzero if
2264 not. This is used in try_combine to detect early some cases where we
2265 can't perform combinations. */
2268 cant_combine_insn_p (rtx_insn
*insn
)
2273 /* If this isn't really an insn, we can't do anything.
2274 This can occur when flow deletes an insn that it has merged into an
2275 auto-increment address. */
2276 if (! INSN_P (insn
))
2279 /* Never combine loads and stores involving hard regs that are likely
2280 to be spilled. The register allocator can usually handle such
2281 reg-reg moves by tying. If we allow the combiner to make
2282 substitutions of likely-spilled regs, reload might die.
2283 As an exception, we allow combinations involving fixed regs; these are
2284 not available to the register allocator so there's no risk involved. */
2286 set
= single_set (insn
);
2289 src
= SET_SRC (set
);
2290 dest
= SET_DEST (set
);
2291 if (GET_CODE (src
) == SUBREG
)
2292 src
= SUBREG_REG (src
);
2293 if (GET_CODE (dest
) == SUBREG
)
2294 dest
= SUBREG_REG (dest
);
2295 if (REG_P (src
) && REG_P (dest
)
2296 && ((HARD_REGISTER_P (src
)
2297 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (src
))
2298 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src
))))
2299 || (HARD_REGISTER_P (dest
)
2300 && ! TEST_HARD_REG_BIT (fixed_reg_set
, REGNO (dest
))
2301 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest
))))))
2307 struct likely_spilled_retval_info
2309 unsigned regno
, nregs
;
2313 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2314 hard registers that are known to be written to / clobbered in full. */
2316 likely_spilled_retval_1 (rtx x
, const_rtx set
, void *data
)
2318 struct likely_spilled_retval_info
*const info
=
2319 (struct likely_spilled_retval_info
*) data
;
2320 unsigned regno
, nregs
;
2323 if (!REG_P (XEXP (set
, 0)))
2326 if (regno
>= info
->regno
+ info
->nregs
)
2328 nregs
= hard_regno_nregs
[regno
][GET_MODE (x
)];
2329 if (regno
+ nregs
<= info
->regno
)
2331 new_mask
= (2U << (nregs
- 1)) - 1;
2332 if (regno
< info
->regno
)
2333 new_mask
>>= info
->regno
- regno
;
2335 new_mask
<<= regno
- info
->regno
;
2336 info
->mask
&= ~new_mask
;
2339 /* Return nonzero iff part of the return value is live during INSN, and
2340 it is likely spilled. This can happen when more than one insn is needed
2341 to copy the return value, e.g. when we consider to combine into the
2342 second copy insn for a complex value. */
2345 likely_spilled_retval_p (rtx_insn
*insn
)
2347 rtx_insn
*use
= BB_END (this_basic_block
);
2350 unsigned regno
, nregs
;
2351 /* We assume here that no machine mode needs more than
2352 32 hard registers when the value overlaps with a register
2353 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2355 struct likely_spilled_retval_info info
;
2357 if (!NONJUMP_INSN_P (use
) || GET_CODE (PATTERN (use
)) != USE
|| insn
== use
)
2359 reg
= XEXP (PATTERN (use
), 0);
2360 if (!REG_P (reg
) || !targetm
.calls
.function_value_regno_p (REGNO (reg
)))
2362 regno
= REGNO (reg
);
2363 nregs
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
2366 mask
= (2U << (nregs
- 1)) - 1;
2368 /* Disregard parts of the return value that are set later. */
2372 for (p
= PREV_INSN (use
); info
.mask
&& p
!= insn
; p
= PREV_INSN (p
))
2374 note_stores (PATTERN (p
), likely_spilled_retval_1
, &info
);
2377 /* Check if any of the (probably) live return value registers is
2382 if ((mask
& 1 << nregs
)
2383 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (regno
+ nregs
)))
2389 /* Adjust INSN after we made a change to its destination.
2391 Changing the destination can invalidate notes that say something about
2392 the results of the insn and a LOG_LINK pointing to the insn. */
2395 adjust_for_new_dest (rtx_insn
*insn
)
2397 /* For notes, be conservative and simply remove them. */
2398 remove_reg_equal_equiv_notes (insn
);
2400 /* The new insn will have a destination that was previously the destination
2401 of an insn just above it. Call distribute_links to make a LOG_LINK from
2402 the next use of that destination. */
2404 rtx set
= single_set (insn
);
2407 rtx reg
= SET_DEST (set
);
2409 while (GET_CODE (reg
) == ZERO_EXTRACT
2410 || GET_CODE (reg
) == STRICT_LOW_PART
2411 || GET_CODE (reg
) == SUBREG
)
2412 reg
= XEXP (reg
, 0);
2413 gcc_assert (REG_P (reg
));
2415 distribute_links (alloc_insn_link (insn
, REGNO (reg
), NULL
));
2417 df_insn_rescan (insn
);
2420 /* Return TRUE if combine can reuse reg X in mode MODE.
2421 ADDED_SETS is nonzero if the original set is still required. */
2423 can_change_dest_mode (rtx x
, int added_sets
, machine_mode mode
)
2431 /* Allow hard registers if the new mode is legal, and occupies no more
2432 registers than the old mode. */
2433 if (regno
< FIRST_PSEUDO_REGISTER
)
2434 return (HARD_REGNO_MODE_OK (regno
, mode
)
2435 && (hard_regno_nregs
[regno
][GET_MODE (x
)]
2436 >= hard_regno_nregs
[regno
][mode
]));
2438 /* Or a pseudo that is only used once. */
2439 return (regno
< reg_n_sets_max
2440 && REG_N_SETS (regno
) == 1
2442 && !REG_USERVAR_P (x
));
2446 /* Check whether X, the destination of a set, refers to part of
2447 the register specified by REG. */
2450 reg_subword_p (rtx x
, rtx reg
)
2452 /* Check that reg is an integer mode register. */
2453 if (!REG_P (reg
) || GET_MODE_CLASS (GET_MODE (reg
)) != MODE_INT
)
2456 if (GET_CODE (x
) == STRICT_LOW_PART
2457 || GET_CODE (x
) == ZERO_EXTRACT
)
2460 return GET_CODE (x
) == SUBREG
2461 && SUBREG_REG (x
) == reg
2462 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
;
2465 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2466 Note that the INSN should be deleted *after* removing dead edges, so
2467 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2468 but not for a (set (pc) (label_ref FOO)). */
2471 update_cfg_for_uncondjump (rtx_insn
*insn
)
2473 basic_block bb
= BLOCK_FOR_INSN (insn
);
2474 gcc_assert (BB_END (bb
) == insn
);
2476 purge_dead_edges (bb
);
2479 if (EDGE_COUNT (bb
->succs
) == 1)
2483 single_succ_edge (bb
)->flags
|= EDGE_FALLTHRU
;
2485 /* Remove barriers from the footer if there are any. */
2486 for (insn
= BB_FOOTER (bb
); insn
; insn
= NEXT_INSN (insn
))
2487 if (BARRIER_P (insn
))
2489 if (PREV_INSN (insn
))
2490 SET_NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (insn
);
2492 BB_FOOTER (bb
) = NEXT_INSN (insn
);
2493 if (NEXT_INSN (insn
))
2494 SET_PREV_INSN (NEXT_INSN (insn
)) = PREV_INSN (insn
);
2496 else if (LABEL_P (insn
))
2501 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2502 by an arbitrary number of CLOBBERs. */
2504 is_parallel_of_n_reg_sets (rtx pat
, int n
)
2506 if (GET_CODE (pat
) != PARALLEL
)
2509 int len
= XVECLEN (pat
, 0);
2514 for (i
= 0; i
< n
; i
++)
2515 if (GET_CODE (XVECEXP (pat
, 0, i
)) != SET
2516 || !REG_P (SET_DEST (XVECEXP (pat
, 0, i
))))
2518 for ( ; i
< len
; i
++)
2519 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
2526 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2527 CLOBBERs), can be split into individual SETs in that order, without
2528 changing semantics. */
2530 can_split_parallel_of_n_reg_sets (rtx_insn
*insn
, int n
)
2532 if (!insn_nothrow_p (insn
))
2535 rtx pat
= PATTERN (insn
);
2538 for (i
= 0; i
< n
; i
++)
2540 if (side_effects_p (SET_SRC (XVECEXP (pat
, 0, i
))))
2543 rtx reg
= SET_DEST (XVECEXP (pat
, 0, i
));
2545 for (j
= i
+ 1; j
< n
; j
++)
2546 if (reg_referenced_p (reg
, XVECEXP (pat
, 0, j
)))
2554 /* Try to combine the insns I0, I1 and I2 into I3.
2555 Here I0, I1 and I2 appear earlier than I3.
2556 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2559 If we are combining more than two insns and the resulting insn is not
2560 recognized, try splitting it into two insns. If that happens, I2 and I3
2561 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2562 Otherwise, I0, I1 and I2 are pseudo-deleted.
2564 Return 0 if the combination does not work. Then nothing is changed.
2565 If we did the combination, return the insn at which combine should
2568 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2569 new direct jump instruction.
2571 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2572 been I3 passed to an earlier try_combine within the same basic
2576 try_combine (rtx_insn
*i3
, rtx_insn
*i2
, rtx_insn
*i1
, rtx_insn
*i0
,
2577 int *new_direct_jump_p
, rtx_insn
*last_combined_insn
)
2579 /* New patterns for I3 and I2, respectively. */
2580 rtx newpat
, newi2pat
= 0;
2581 rtvec newpat_vec_with_clobbers
= 0;
2582 int substed_i2
= 0, substed_i1
= 0, substed_i0
= 0;
2583 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2585 int added_sets_0
, added_sets_1
, added_sets_2
;
2586 /* Total number of SETs to put into I3. */
2588 /* Nonzero if I2's or I1's body now appears in I3. */
2589 int i2_is_used
= 0, i1_is_used
= 0;
2590 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2591 int insn_code_number
, i2_code_number
= 0, other_code_number
= 0;
2592 /* Contains I3 if the destination of I3 is used in its source, which means
2593 that the old life of I3 is being killed. If that usage is placed into
2594 I2 and not in I3, a REG_DEAD note must be made. */
2595 rtx i3dest_killed
= 0;
2596 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2597 rtx i2dest
= 0, i2src
= 0, i1dest
= 0, i1src
= 0, i0dest
= 0, i0src
= 0;
2598 /* Copy of SET_SRC of I1 and I0, if needed. */
2599 rtx i1src_copy
= 0, i0src_copy
= 0, i0src_copy2
= 0;
2600 /* Set if I2DEST was reused as a scratch register. */
2601 bool i2scratch
= false;
2602 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2603 rtx i0pat
= 0, i1pat
= 0, i2pat
= 0;
2604 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2605 int i2dest_in_i2src
= 0, i1dest_in_i1src
= 0, i2dest_in_i1src
= 0;
2606 int i0dest_in_i0src
= 0, i1dest_in_i0src
= 0, i2dest_in_i0src
= 0;
2607 int i2dest_killed
= 0, i1dest_killed
= 0, i0dest_killed
= 0;
2608 int i1_feeds_i2_n
= 0, i0_feeds_i2_n
= 0, i0_feeds_i1_n
= 0;
2609 /* Notes that must be added to REG_NOTES in I3 and I2. */
2610 rtx new_i3_notes
, new_i2_notes
;
2611 /* Notes that we substituted I3 into I2 instead of the normal case. */
2612 int i3_subst_into_i2
= 0;
2613 /* Notes that I1, I2 or I3 is a MULT operation. */
2616 int changed_i3_dest
= 0;
2619 rtx_insn
*temp_insn
;
2621 struct insn_link
*link
;
2623 rtx new_other_notes
;
2626 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2628 if (i1
== i2
|| i0
== i2
|| (i0
&& i0
== i1
))
2631 /* Only try four-insn combinations when there's high likelihood of
2632 success. Look for simple insns, such as loads of constants or
2633 binary operations involving a constant. */
2641 if (!flag_expensive_optimizations
)
2644 for (i
= 0; i
< 4; i
++)
2646 rtx_insn
*insn
= i
== 0 ? i0
: i
== 1 ? i1
: i
== 2 ? i2
: i3
;
2647 rtx set
= single_set (insn
);
2651 src
= SET_SRC (set
);
2652 if (CONSTANT_P (src
))
2657 else if (BINARY_P (src
) && CONSTANT_P (XEXP (src
, 1)))
2659 else if (GET_CODE (src
) == ASHIFT
|| GET_CODE (src
) == ASHIFTRT
2660 || GET_CODE (src
) == LSHIFTRT
)
2664 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2665 are likely manipulating its value. Ideally we'll be able to combine
2666 all four insns into a bitfield insertion of some kind.
2668 Note the source in I0 might be inside a sign/zero extension and the
2669 memory modes in I0 and I3 might be different. So extract the address
2670 from the destination of I3 and search for it in the source of I0.
2672 In the event that there's a match but the source/dest do not actually
2673 refer to the same memory, the worst that happens is we try some
2674 combinations that we wouldn't have otherwise. */
2675 if ((set0
= single_set (i0
))
2676 /* Ensure the source of SET0 is a MEM, possibly buried inside
2678 && (GET_CODE (SET_SRC (set0
)) == MEM
2679 || ((GET_CODE (SET_SRC (set0
)) == ZERO_EXTEND
2680 || GET_CODE (SET_SRC (set0
)) == SIGN_EXTEND
)
2681 && GET_CODE (XEXP (SET_SRC (set0
), 0)) == MEM
))
2682 && (set3
= single_set (i3
))
2683 /* Ensure the destination of SET3 is a MEM. */
2684 && GET_CODE (SET_DEST (set3
)) == MEM
2685 /* Would it be better to extract the base address for the MEM
2686 in SET3 and look for that? I don't have cases where it matters
2687 but I could envision such cases. */
2688 && rtx_referenced_p (XEXP (SET_DEST (set3
), 0), SET_SRC (set0
)))
2691 if (ngood
< 2 && nshift
< 2)
2695 /* Exit early if one of the insns involved can't be used for
2698 || (i1
&& CALL_P (i1
))
2699 || (i0
&& CALL_P (i0
))
2700 || cant_combine_insn_p (i3
)
2701 || cant_combine_insn_p (i2
)
2702 || (i1
&& cant_combine_insn_p (i1
))
2703 || (i0
&& cant_combine_insn_p (i0
))
2704 || likely_spilled_retval_p (i3
))
2708 undobuf
.other_insn
= 0;
2710 /* Reset the hard register usage information. */
2711 CLEAR_HARD_REG_SET (newpat_used_regs
);
2713 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2716 fprintf (dump_file
, "\nTrying %d, %d, %d -> %d:\n",
2717 INSN_UID (i0
), INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2719 fprintf (dump_file
, "\nTrying %d, %d -> %d:\n",
2720 INSN_UID (i1
), INSN_UID (i2
), INSN_UID (i3
));
2722 fprintf (dump_file
, "\nTrying %d -> %d:\n",
2723 INSN_UID (i2
), INSN_UID (i3
));
2726 /* If multiple insns feed into one of I2 or I3, they can be in any
2727 order. To simplify the code below, reorder them in sequence. */
2728 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i2
))
2729 temp_insn
= i2
, i2
= i0
, i0
= temp_insn
;
2730 if (i0
&& DF_INSN_LUID (i0
) > DF_INSN_LUID (i1
))
2731 temp_insn
= i1
, i1
= i0
, i0
= temp_insn
;
2732 if (i1
&& DF_INSN_LUID (i1
) > DF_INSN_LUID (i2
))
2733 temp_insn
= i1
, i1
= i2
, i2
= temp_insn
;
2735 added_links_insn
= 0;
2737 /* First check for one important special case that the code below will
2738 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2739 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2740 we may be able to replace that destination with the destination of I3.
2741 This occurs in the common code where we compute both a quotient and
2742 remainder into a structure, in which case we want to do the computation
2743 directly into the structure to avoid register-register copies.
2745 Note that this case handles both multiple sets in I2 and also cases
2746 where I2 has a number of CLOBBERs inside the PARALLEL.
2748 We make very conservative checks below and only try to handle the
2749 most common cases of this. For example, we only handle the case
2750 where I2 and I3 are adjacent to avoid making difficult register
2753 if (i1
== 0 && NONJUMP_INSN_P (i3
) && GET_CODE (PATTERN (i3
)) == SET
2754 && REG_P (SET_SRC (PATTERN (i3
)))
2755 && REGNO (SET_SRC (PATTERN (i3
))) >= FIRST_PSEUDO_REGISTER
2756 && find_reg_note (i3
, REG_DEAD
, SET_SRC (PATTERN (i3
)))
2757 && GET_CODE (PATTERN (i2
)) == PARALLEL
2758 && ! side_effects_p (SET_DEST (PATTERN (i3
)))
2759 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2760 below would need to check what is inside (and reg_overlap_mentioned_p
2761 doesn't support those codes anyway). Don't allow those destinations;
2762 the resulting insn isn't likely to be recognized anyway. */
2763 && GET_CODE (SET_DEST (PATTERN (i3
))) != ZERO_EXTRACT
2764 && GET_CODE (SET_DEST (PATTERN (i3
))) != STRICT_LOW_PART
2765 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3
)),
2766 SET_DEST (PATTERN (i3
)))
2767 && next_active_insn (i2
) == i3
)
2769 rtx p2
= PATTERN (i2
);
2771 /* Make sure that the destination of I3,
2772 which we are going to substitute into one output of I2,
2773 is not used within another output of I2. We must avoid making this:
2774 (parallel [(set (mem (reg 69)) ...)
2775 (set (reg 69) ...)])
2776 which is not well-defined as to order of actions.
2777 (Besides, reload can't handle output reloads for this.)
2779 The problem can also happen if the dest of I3 is a memory ref,
2780 if another dest in I2 is an indirect memory ref. */
2781 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2782 if ((GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2783 || GET_CODE (XVECEXP (p2
, 0, i
)) == CLOBBER
)
2784 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3
)),
2785 SET_DEST (XVECEXP (p2
, 0, i
))))
2788 /* Make sure this PARALLEL is not an asm. We do not allow combining
2789 that usually (see can_combine_p), so do not here either. */
2790 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2791 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2792 && GET_CODE (SET_SRC (XVECEXP (p2
, 0, i
))) == ASM_OPERANDS
)
2795 if (i
== XVECLEN (p2
, 0))
2796 for (i
= 0; i
< XVECLEN (p2
, 0); i
++)
2797 if (GET_CODE (XVECEXP (p2
, 0, i
)) == SET
2798 && SET_DEST (XVECEXP (p2
, 0, i
)) == SET_SRC (PATTERN (i3
)))
2803 subst_low_luid
= DF_INSN_LUID (i2
);
2805 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2806 i2src
= SET_SRC (XVECEXP (p2
, 0, i
));
2807 i2dest
= SET_DEST (XVECEXP (p2
, 0, i
));
2808 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2810 /* Replace the dest in I2 with our dest and make the resulting
2811 insn the new pattern for I3. Then skip to where we validate
2812 the pattern. Everything was set up above. */
2813 SUBST (SET_DEST (XVECEXP (p2
, 0, i
)), SET_DEST (PATTERN (i3
)));
2815 i3_subst_into_i2
= 1;
2816 goto validate_replacement
;
2820 /* If I2 is setting a pseudo to a constant and I3 is setting some
2821 sub-part of it to another constant, merge them by making a new
2824 && (temp_expr
= single_set (i2
)) != 0
2825 && CONST_SCALAR_INT_P (SET_SRC (temp_expr
))
2826 && GET_CODE (PATTERN (i3
)) == SET
2827 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3
)))
2828 && reg_subword_p (SET_DEST (PATTERN (i3
)), SET_DEST (temp_expr
)))
2830 rtx dest
= SET_DEST (PATTERN (i3
));
2834 if (GET_CODE (dest
) == ZERO_EXTRACT
)
2836 if (CONST_INT_P (XEXP (dest
, 1))
2837 && CONST_INT_P (XEXP (dest
, 2)))
2839 width
= INTVAL (XEXP (dest
, 1));
2840 offset
= INTVAL (XEXP (dest
, 2));
2841 dest
= XEXP (dest
, 0);
2842 if (BITS_BIG_ENDIAN
)
2843 offset
= GET_MODE_PRECISION (GET_MODE (dest
)) - width
- offset
;
2848 if (GET_CODE (dest
) == STRICT_LOW_PART
)
2849 dest
= XEXP (dest
, 0);
2850 width
= GET_MODE_PRECISION (GET_MODE (dest
));
2856 /* If this is the low part, we're done. */
2857 if (subreg_lowpart_p (dest
))
2859 /* Handle the case where inner is twice the size of outer. */
2860 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr
)))
2861 == 2 * GET_MODE_PRECISION (GET_MODE (dest
)))
2862 offset
+= GET_MODE_PRECISION (GET_MODE (dest
));
2863 /* Otherwise give up for now. */
2870 rtx inner
= SET_SRC (PATTERN (i3
));
2871 rtx outer
= SET_SRC (temp_expr
);
2874 = wi::insert (std::make_pair (outer
, GET_MODE (SET_DEST (temp_expr
))),
2875 std::make_pair (inner
, GET_MODE (dest
)),
2880 subst_low_luid
= DF_INSN_LUID (i2
);
2881 added_sets_2
= added_sets_1
= added_sets_0
= 0;
2882 i2dest
= SET_DEST (temp_expr
);
2883 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2885 /* Replace the source in I2 with the new constant and make the
2886 resulting insn the new pattern for I3. Then skip to where we
2887 validate the pattern. Everything was set up above. */
2888 SUBST (SET_SRC (temp_expr
),
2889 immed_wide_int_const (o
, GET_MODE (SET_DEST (temp_expr
))));
2891 newpat
= PATTERN (i2
);
2893 /* The dest of I3 has been replaced with the dest of I2. */
2894 changed_i3_dest
= 1;
2895 goto validate_replacement
;
2900 /* If we have no I1 and I2 looks like:
2901 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2903 make up a dummy I1 that is
2906 (set (reg:CC X) (compare:CC Y (const_int 0)))
2908 (We can ignore any trailing CLOBBERs.)
2910 This undoes a previous combination and allows us to match a branch-and-
2914 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2915 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0))))
2917 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0))) == COMPARE
2918 && XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 1) == const0_rtx
2919 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2
), 0, 0)), 0),
2920 SET_SRC (XVECEXP (PATTERN (i2
), 0, 1)))
2921 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2922 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2924 /* We make I1 with the same INSN_UID as I2. This gives it
2925 the same DF_INSN_LUID for value tracking. Our fake I1 will
2926 never appear in the insn stream so giving it the same INSN_UID
2927 as I2 will not cause a problem. */
2929 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2930 XVECEXP (PATTERN (i2
), 0, 1), INSN_LOCATION (i2
),
2932 INSN_UID (i1
) = INSN_UID (i2
);
2934 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 0));
2935 SUBST (XEXP (SET_SRC (PATTERN (i2
)), 0),
2936 SET_DEST (PATTERN (i1
)));
2937 unsigned int regno
= REGNO (SET_DEST (PATTERN (i1
)));
2938 SUBST_LINK (LOG_LINKS (i2
),
2939 alloc_insn_link (i1
, regno
, LOG_LINKS (i2
)));
2942 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2943 make those two SETs separate I1 and I2 insns, and make an I0 that is
2946 && is_parallel_of_n_reg_sets (PATTERN (i2
), 2)
2947 && can_split_parallel_of_n_reg_sets (i2
, 2)
2948 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 0)), i2
, i3
)
2949 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2
), 0, 1)), i2
, i3
))
2951 /* If there is no I1, there is no I0 either. */
2954 /* We make I1 with the same INSN_UID as I2. This gives it
2955 the same DF_INSN_LUID for value tracking. Our fake I1 will
2956 never appear in the insn stream so giving it the same INSN_UID
2957 as I2 will not cause a problem. */
2959 i1
= gen_rtx_INSN (VOIDmode
, NULL
, i2
, BLOCK_FOR_INSN (i2
),
2960 XVECEXP (PATTERN (i2
), 0, 0), INSN_LOCATION (i2
),
2962 INSN_UID (i1
) = INSN_UID (i2
);
2964 SUBST (PATTERN (i2
), XVECEXP (PATTERN (i2
), 0, 1));
2968 /* Verify that I2 and I1 are valid for combining. */
2969 if (! can_combine_p (i2
, i3
, i0
, i1
, NULL
, NULL
, &i2dest
, &i2src
)
2970 || (i1
&& ! can_combine_p (i1
, i3
, i0
, NULL
, i2
, NULL
,
2972 || (i0
&& ! can_combine_p (i0
, i3
, NULL
, NULL
, i1
, i2
,
2979 /* Record whether I2DEST is used in I2SRC and similarly for the other
2980 cases. Knowing this will help in register status updating below. */
2981 i2dest_in_i2src
= reg_overlap_mentioned_p (i2dest
, i2src
);
2982 i1dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i1dest
, i1src
);
2983 i2dest_in_i1src
= i1
&& reg_overlap_mentioned_p (i2dest
, i1src
);
2984 i0dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i0dest
, i0src
);
2985 i1dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i1dest
, i0src
);
2986 i2dest_in_i0src
= i0
&& reg_overlap_mentioned_p (i2dest
, i0src
);
2987 i2dest_killed
= dead_or_set_p (i2
, i2dest
);
2988 i1dest_killed
= i1
&& dead_or_set_p (i1
, i1dest
);
2989 i0dest_killed
= i0
&& dead_or_set_p (i0
, i0dest
);
2991 /* For the earlier insns, determine which of the subsequent ones they
2993 i1_feeds_i2_n
= i1
&& insn_a_feeds_b (i1
, i2
);
2994 i0_feeds_i1_n
= i0
&& insn_a_feeds_b (i0
, i1
);
2995 i0_feeds_i2_n
= (i0
&& (!i0_feeds_i1_n
? insn_a_feeds_b (i0
, i2
)
2996 : (!reg_overlap_mentioned_p (i1dest
, i0dest
)
2997 && reg_overlap_mentioned_p (i0dest
, i2src
))));
2999 /* Ensure that I3's pattern can be the destination of combines. */
3000 if (! combinable_i3pat (i3
, &PATTERN (i3
), i2dest
, i1dest
, i0dest
,
3001 i1
&& i2dest_in_i1src
&& !i1_feeds_i2_n
,
3002 i0
&& ((i2dest_in_i0src
&& !i0_feeds_i2_n
)
3003 || (i1dest_in_i0src
&& !i0_feeds_i1_n
)),
3010 /* See if any of the insns is a MULT operation. Unless one is, we will
3011 reject a combination that is, since it must be slower. Be conservative
3013 if (GET_CODE (i2src
) == MULT
3014 || (i1
!= 0 && GET_CODE (i1src
) == MULT
)
3015 || (i0
!= 0 && GET_CODE (i0src
) == MULT
)
3016 || (GET_CODE (PATTERN (i3
)) == SET
3017 && GET_CODE (SET_SRC (PATTERN (i3
))) == MULT
))
3020 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3021 We used to do this EXCEPT in one case: I3 has a post-inc in an
3022 output operand. However, that exception can give rise to insns like
3024 which is a famous insn on the PDP-11 where the value of r3 used as the
3025 source was model-dependent. Avoid this sort of thing. */
3028 if (!(GET_CODE (PATTERN (i3
)) == SET
3029 && REG_P (SET_SRC (PATTERN (i3
)))
3030 && MEM_P (SET_DEST (PATTERN (i3
)))
3031 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_INC
3032 || GET_CODE (XEXP (SET_DEST (PATTERN (i3
)), 0)) == POST_DEC
)))
3033 /* It's not the exception. */
3038 for (link
= REG_NOTES (i3
); link
; link
= XEXP (link
, 1))
3039 if (REG_NOTE_KIND (link
) == REG_INC
3040 && (reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i2
))
3042 && reg_overlap_mentioned_p (XEXP (link
, 0), PATTERN (i1
)))))
3050 /* See if the SETs in I1 or I2 need to be kept around in the merged
3051 instruction: whenever the value set there is still needed past I3.
3052 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3054 For the SET in I1, we have two cases: if I1 and I2 independently feed
3055 into I3, the set in I1 needs to be kept around unless I1DEST dies
3056 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3057 in I1 needs to be kept around unless I1DEST dies or is set in either
3058 I2 or I3. The same considerations apply to I0. */
3060 added_sets_2
= !dead_or_set_p (i3
, i2dest
);
3063 added_sets_1
= !(dead_or_set_p (i3
, i1dest
)
3064 || (i1_feeds_i2_n
&& dead_or_set_p (i2
, i1dest
)));
3069 added_sets_0
= !(dead_or_set_p (i3
, i0dest
)
3070 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
))
3071 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3072 && dead_or_set_p (i2
, i0dest
)));
3076 /* We are about to copy insns for the case where they need to be kept
3077 around. Check that they can be copied in the merged instruction. */
3079 if (targetm
.cannot_copy_insn_p
3080 && ((added_sets_2
&& targetm
.cannot_copy_insn_p (i2
))
3081 || (i1
&& added_sets_1
&& targetm
.cannot_copy_insn_p (i1
))
3082 || (i0
&& added_sets_0
&& targetm
.cannot_copy_insn_p (i0
))))
3088 /* If the set in I2 needs to be kept around, we must make a copy of
3089 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3090 PATTERN (I2), we are only substituting for the original I1DEST, not into
3091 an already-substituted copy. This also prevents making self-referential
3092 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3097 if (GET_CODE (PATTERN (i2
)) == PARALLEL
)
3098 i2pat
= gen_rtx_SET (VOIDmode
, i2dest
, copy_rtx (i2src
));
3100 i2pat
= copy_rtx (PATTERN (i2
));
3105 if (GET_CODE (PATTERN (i1
)) == PARALLEL
)
3106 i1pat
= gen_rtx_SET (VOIDmode
, i1dest
, copy_rtx (i1src
));
3108 i1pat
= copy_rtx (PATTERN (i1
));
3113 if (GET_CODE (PATTERN (i0
)) == PARALLEL
)
3114 i0pat
= gen_rtx_SET (VOIDmode
, i0dest
, copy_rtx (i0src
));
3116 i0pat
= copy_rtx (PATTERN (i0
));
3121 /* Substitute in the latest insn for the regs set by the earlier ones. */
3123 maxreg
= max_reg_num ();
3127 /* Many machines that don't use CC0 have insns that can both perform an
3128 arithmetic operation and set the condition code. These operations will
3129 be represented as a PARALLEL with the first element of the vector
3130 being a COMPARE of an arithmetic operation with the constant zero.
3131 The second element of the vector will set some pseudo to the result
3132 of the same arithmetic operation. If we simplify the COMPARE, we won't
3133 match such a pattern and so will generate an extra insn. Here we test
3134 for this case, where both the comparison and the operation result are
3135 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3136 I2SRC. Later we will make the PARALLEL that contains I2. */
3138 if (!HAVE_cc0
&& i1
== 0 && added_sets_2
&& GET_CODE (PATTERN (i3
)) == SET
3139 && GET_CODE (SET_SRC (PATTERN (i3
))) == COMPARE
3140 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3
)), 1))
3141 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3
)), 0), i2dest
))
3144 rtx
*cc_use_loc
= NULL
;
3145 rtx_insn
*cc_use_insn
= NULL
;
3146 rtx op0
= i2src
, op1
= XEXP (SET_SRC (PATTERN (i3
)), 1);
3147 machine_mode compare_mode
, orig_compare_mode
;
3148 enum rtx_code compare_code
= UNKNOWN
, orig_compare_code
= UNKNOWN
;
3150 newpat
= PATTERN (i3
);
3151 newpat_dest
= SET_DEST (newpat
);
3152 compare_mode
= orig_compare_mode
= GET_MODE (newpat_dest
);
3154 if (undobuf
.other_insn
== 0
3155 && (cc_use_loc
= find_single_use (SET_DEST (newpat
), i3
,
3158 compare_code
= orig_compare_code
= GET_CODE (*cc_use_loc
);
3159 compare_code
= simplify_compare_const (compare_code
,
3160 GET_MODE (i2dest
), op0
, &op1
);
3161 target_canonicalize_comparison (&compare_code
, &op0
, &op1
, 1);
3164 /* Do the rest only if op1 is const0_rtx, which may be the
3165 result of simplification. */
3166 if (op1
== const0_rtx
)
3168 /* If a single use of the CC is found, prepare to modify it
3169 when SELECT_CC_MODE returns a new CC-class mode, or when
3170 the above simplify_compare_const() returned a new comparison
3171 operator. undobuf.other_insn is assigned the CC use insn
3172 when modifying it. */
3175 #ifdef SELECT_CC_MODE
3176 machine_mode new_mode
3177 = SELECT_CC_MODE (compare_code
, op0
, op1
);
3178 if (new_mode
!= orig_compare_mode
3179 && can_change_dest_mode (SET_DEST (newpat
),
3180 added_sets_2
, new_mode
))
3182 unsigned int regno
= REGNO (newpat_dest
);
3183 compare_mode
= new_mode
;
3184 if (regno
< FIRST_PSEUDO_REGISTER
)
3185 newpat_dest
= gen_rtx_REG (compare_mode
, regno
);
3188 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
3189 newpat_dest
= regno_reg_rtx
[regno
];
3193 /* Cases for modifying the CC-using comparison. */
3194 if (compare_code
!= orig_compare_code
3195 /* ??? Do we need to verify the zero rtx? */
3196 && XEXP (*cc_use_loc
, 1) == const0_rtx
)
3198 /* Replace cc_use_loc with entire new RTX. */
3200 gen_rtx_fmt_ee (compare_code
, compare_mode
,
3201 newpat_dest
, const0_rtx
));
3202 undobuf
.other_insn
= cc_use_insn
;
3204 else if (compare_mode
!= orig_compare_mode
)
3206 /* Just replace the CC reg with a new mode. */
3207 SUBST (XEXP (*cc_use_loc
, 0), newpat_dest
);
3208 undobuf
.other_insn
= cc_use_insn
;
3212 /* Now we modify the current newpat:
3213 First, SET_DEST(newpat) is updated if the CC mode has been
3214 altered. For targets without SELECT_CC_MODE, this should be
3216 if (compare_mode
!= orig_compare_mode
)
3217 SUBST (SET_DEST (newpat
), newpat_dest
);
3218 /* This is always done to propagate i2src into newpat. */
3219 SUBST (SET_SRC (newpat
),
3220 gen_rtx_COMPARE (compare_mode
, op0
, op1
));
3221 /* Create new version of i2pat if needed; the below PARALLEL
3222 creation needs this to work correctly. */
3223 if (! rtx_equal_p (i2src
, op0
))
3224 i2pat
= gen_rtx_SET (VOIDmode
, i2dest
, op0
);
3229 if (i2_is_used
== 0)
3231 /* It is possible that the source of I2 or I1 may be performing
3232 an unneeded operation, such as a ZERO_EXTEND of something
3233 that is known to have the high part zero. Handle that case
3234 by letting subst look at the inner insns.
3236 Another way to do this would be to have a function that tries
3237 to simplify a single insn instead of merging two or more
3238 insns. We don't do this because of the potential of infinite
3239 loops and because of the potential extra memory required.
3240 However, doing it the way we are is a bit of a kludge and
3241 doesn't catch all cases.
3243 But only do this if -fexpensive-optimizations since it slows
3244 things down and doesn't usually win.
3246 This is not done in the COMPARE case above because the
3247 unmodified I2PAT is used in the PARALLEL and so a pattern
3248 with a modified I2SRC would not match. */
3250 if (flag_expensive_optimizations
)
3252 /* Pass pc_rtx so no substitutions are done, just
3256 subst_low_luid
= DF_INSN_LUID (i1
);
3257 i1src
= subst (i1src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3260 subst_low_luid
= DF_INSN_LUID (i2
);
3261 i2src
= subst (i2src
, pc_rtx
, pc_rtx
, 0, 0, 0);
3264 n_occurrences
= 0; /* `subst' counts here */
3265 subst_low_luid
= DF_INSN_LUID (i2
);
3267 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3268 copy of I2SRC each time we substitute it, in order to avoid creating
3269 self-referential RTL when we will be substituting I1SRC for I1DEST
3270 later. Likewise if I0 feeds into I2, either directly or indirectly
3271 through I1, and I0DEST is in I0SRC. */
3272 newpat
= subst (PATTERN (i3
), i2dest
, i2src
, 0, 0,
3273 (i1_feeds_i2_n
&& i1dest_in_i1src
)
3274 || ((i0_feeds_i2_n
|| (i0_feeds_i1_n
&& i1_feeds_i2_n
))
3275 && i0dest_in_i0src
));
3278 /* Record whether I2's body now appears within I3's body. */
3279 i2_is_used
= n_occurrences
;
3282 /* If we already got a failure, don't try to do more. Otherwise, try to
3283 substitute I1 if we have it. */
3285 if (i1
&& GET_CODE (newpat
) != CLOBBER
)
3287 /* Check that an autoincrement side-effect on I1 has not been lost.
3288 This happens if I1DEST is mentioned in I2 and dies there, and
3289 has disappeared from the new pattern. */
3290 if ((FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3292 && dead_or_set_p (i2
, i1dest
)
3293 && !reg_overlap_mentioned_p (i1dest
, newpat
))
3294 /* Before we can do this substitution, we must redo the test done
3295 above (see detailed comments there) that ensures I1DEST isn't
3296 mentioned in any SETs in NEWPAT that are field assignments. */
3297 || !combinable_i3pat (NULL
, &newpat
, i1dest
, NULL_RTX
, NULL_RTX
,
3305 subst_low_luid
= DF_INSN_LUID (i1
);
3307 /* If the following substitution will modify I1SRC, make a copy of it
3308 for the case where it is substituted for I1DEST in I2PAT later. */
3309 if (added_sets_2
&& i1_feeds_i2_n
)
3310 i1src_copy
= copy_rtx (i1src
);
3312 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3313 copy of I1SRC each time we substitute it, in order to avoid creating
3314 self-referential RTL when we will be substituting I0SRC for I0DEST
3316 newpat
= subst (newpat
, i1dest
, i1src
, 0, 0,
3317 i0_feeds_i1_n
&& i0dest_in_i0src
);
3320 /* Record whether I1's body now appears within I3's body. */
3321 i1_is_used
= n_occurrences
;
3324 /* Likewise for I0 if we have it. */
3326 if (i0
&& GET_CODE (newpat
) != CLOBBER
)
3328 if ((FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3329 && ((i0_feeds_i2_n
&& dead_or_set_p (i2
, i0dest
))
3330 || (i0_feeds_i1_n
&& dead_or_set_p (i1
, i0dest
)))
3331 && !reg_overlap_mentioned_p (i0dest
, newpat
))
3332 || !combinable_i3pat (NULL
, &newpat
, i0dest
, NULL_RTX
, NULL_RTX
,
3339 /* If the following substitution will modify I0SRC, make a copy of it
3340 for the case where it is substituted for I0DEST in I1PAT later. */
3341 if (added_sets_1
&& i0_feeds_i1_n
)
3342 i0src_copy
= copy_rtx (i0src
);
3343 /* And a copy for I0DEST in I2PAT substitution. */
3344 if (added_sets_2
&& ((i0_feeds_i1_n
&& i1_feeds_i2_n
)
3345 || (i0_feeds_i2_n
)))
3346 i0src_copy2
= copy_rtx (i0src
);
3349 subst_low_luid
= DF_INSN_LUID (i0
);
3350 newpat
= subst (newpat
, i0dest
, i0src
, 0, 0, 0);
3354 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3355 to count all the ways that I2SRC and I1SRC can be used. */
3356 if ((FIND_REG_INC_NOTE (i2
, NULL_RTX
) != 0
3357 && i2_is_used
+ added_sets_2
> 1)
3358 || (i1
!= 0 && FIND_REG_INC_NOTE (i1
, NULL_RTX
) != 0
3359 && (i1_is_used
+ added_sets_1
+ (added_sets_2
&& i1_feeds_i2_n
)
3361 || (i0
!= 0 && FIND_REG_INC_NOTE (i0
, NULL_RTX
) != 0
3362 && (n_occurrences
+ added_sets_0
3363 + (added_sets_1
&& i0_feeds_i1_n
)
3364 + (added_sets_2
&& i0_feeds_i2_n
)
3366 /* Fail if we tried to make a new register. */
3367 || max_reg_num () != maxreg
3368 /* Fail if we couldn't do something and have a CLOBBER. */
3369 || GET_CODE (newpat
) == CLOBBER
3370 /* Fail if this new pattern is a MULT and we didn't have one before
3371 at the outer level. */
3372 || (GET_CODE (newpat
) == SET
&& GET_CODE (SET_SRC (newpat
)) == MULT
3379 /* If the actions of the earlier insns must be kept
3380 in addition to substituting them into the latest one,
3381 we must make a new PARALLEL for the latest insn
3382 to hold additional the SETs. */
3384 if (added_sets_0
|| added_sets_1
|| added_sets_2
)
3386 int extra_sets
= added_sets_0
+ added_sets_1
+ added_sets_2
;
3389 if (GET_CODE (newpat
) == PARALLEL
)
3391 rtvec old
= XVEC (newpat
, 0);
3392 total_sets
= XVECLEN (newpat
, 0) + extra_sets
;
3393 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3394 memcpy (XVEC (newpat
, 0)->elem
, &old
->elem
[0],
3395 sizeof (old
->elem
[0]) * old
->num_elem
);
3400 total_sets
= 1 + extra_sets
;
3401 newpat
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (total_sets
));
3402 XVECEXP (newpat
, 0, 0) = old
;
3406 XVECEXP (newpat
, 0, --total_sets
) = i0pat
;
3412 t
= subst (t
, i0dest
, i0src_copy
? i0src_copy
: i0src
, 0, 0, 0);
3414 XVECEXP (newpat
, 0, --total_sets
) = t
;
3420 t
= subst (t
, i1dest
, i1src_copy
? i1src_copy
: i1src
, 0, 0,
3421 i0_feeds_i1_n
&& i0dest_in_i0src
);
3422 if ((i0_feeds_i1_n
&& i1_feeds_i2_n
) || i0_feeds_i2_n
)
3423 t
= subst (t
, i0dest
, i0src_copy2
? i0src_copy2
: i0src
, 0, 0, 0);
3425 XVECEXP (newpat
, 0, --total_sets
) = t
;
3429 validate_replacement
:
3431 /* Note which hard regs this insn has as inputs. */
3432 mark_used_regs_combine (newpat
);
3434 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3435 consider splitting this pattern, we might need these clobbers. */
3436 if (i1
&& GET_CODE (newpat
) == PARALLEL
3437 && GET_CODE (XVECEXP (newpat
, 0, XVECLEN (newpat
, 0) - 1)) == CLOBBER
)
3439 int len
= XVECLEN (newpat
, 0);
3441 newpat_vec_with_clobbers
= rtvec_alloc (len
);
3442 for (i
= 0; i
< len
; i
++)
3443 RTVEC_ELT (newpat_vec_with_clobbers
, i
) = XVECEXP (newpat
, 0, i
);
3446 /* We have recognized nothing yet. */
3447 insn_code_number
= -1;
3449 /* See if this is a PARALLEL of two SETs where one SET's destination is
3450 a register that is unused and this isn't marked as an instruction that
3451 might trap in an EH region. In that case, we just need the other SET.
3452 We prefer this over the PARALLEL.
3454 This can occur when simplifying a divmod insn. We *must* test for this
3455 case here because the code below that splits two independent SETs doesn't
3456 handle this case correctly when it updates the register status.
3458 It's pointless doing this if we originally had two sets, one from
3459 i3, and one from i2. Combining then splitting the parallel results
3460 in the original i2 again plus an invalid insn (which we delete).
3461 The net effect is only to move instructions around, which makes
3462 debug info less accurate. */
3464 if (!(added_sets_2
&& i1
== 0)
3465 && is_parallel_of_n_reg_sets (newpat
, 2)
3466 && asm_noperands (newpat
) < 0)
3468 rtx set0
= XVECEXP (newpat
, 0, 0);
3469 rtx set1
= XVECEXP (newpat
, 0, 1);
3470 rtx oldpat
= newpat
;
3472 if (((REG_P (SET_DEST (set1
))
3473 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set1
)))
3474 || (GET_CODE (SET_DEST (set1
)) == SUBREG
3475 && find_reg_note (i3
, REG_UNUSED
, SUBREG_REG (SET_DEST (set1
)))))
3476 && insn_nothrow_p (i3
)
3477 && !side_effects_p (SET_SRC (set1
)))
3480 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3483 else if (((REG_P (SET_DEST (set0
))
3484 && find_reg_note (i3
, REG_UNUSED
, SET_DEST (set0
)))
3485 || (GET_CODE (SET_DEST (set0
)) == SUBREG
3486 && find_reg_note (i3
, REG_UNUSED
,
3487 SUBREG_REG (SET_DEST (set0
)))))
3488 && insn_nothrow_p (i3
)
3489 && !side_effects_p (SET_SRC (set0
)))
3492 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3494 if (insn_code_number
>= 0)
3495 changed_i3_dest
= 1;
3498 if (insn_code_number
< 0)
3502 /* Is the result of combination a valid instruction? */
3503 if (insn_code_number
< 0)
3504 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3506 /* If we were combining three insns and the result is a simple SET
3507 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3508 insns. There are two ways to do this. It can be split using a
3509 machine-specific method (like when you have an addition of a large
3510 constant) or by combine in the function find_split_point. */
3512 if (i1
&& insn_code_number
< 0 && GET_CODE (newpat
) == SET
3513 && asm_noperands (newpat
) < 0)
3515 rtx parallel
, *split
;
3516 rtx_insn
*m_split_insn
;
3518 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3519 use I2DEST as a scratch register will help. In the latter case,
3520 convert I2DEST to the mode of the source of NEWPAT if we can. */
3522 m_split_insn
= combine_split_insns (newpat
, i3
);
3524 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3525 inputs of NEWPAT. */
3527 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3528 possible to try that as a scratch reg. This would require adding
3529 more code to make it work though. */
3531 if (m_split_insn
== 0 && ! reg_overlap_mentioned_p (i2dest
, newpat
))
3533 machine_mode new_mode
= GET_MODE (SET_DEST (newpat
));
3535 /* First try to split using the original register as a
3536 scratch register. */
3537 parallel
= gen_rtx_PARALLEL (VOIDmode
,
3538 gen_rtvec (2, newpat
,
3539 gen_rtx_CLOBBER (VOIDmode
,
3541 m_split_insn
= combine_split_insns (parallel
, i3
);
3543 /* If that didn't work, try changing the mode of I2DEST if
3545 if (m_split_insn
== 0
3546 && new_mode
!= GET_MODE (i2dest
)
3547 && new_mode
!= VOIDmode
3548 && can_change_dest_mode (i2dest
, added_sets_2
, new_mode
))
3550 machine_mode old_mode
= GET_MODE (i2dest
);
3553 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3554 ni2dest
= gen_rtx_REG (new_mode
, REGNO (i2dest
));
3557 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], new_mode
);
3558 ni2dest
= regno_reg_rtx
[REGNO (i2dest
)];
3561 parallel
= (gen_rtx_PARALLEL
3563 gen_rtvec (2, newpat
,
3564 gen_rtx_CLOBBER (VOIDmode
,
3566 m_split_insn
= combine_split_insns (parallel
, i3
);
3568 if (m_split_insn
== 0
3569 && REGNO (i2dest
) >= FIRST_PSEUDO_REGISTER
)
3573 adjust_reg_mode (regno_reg_rtx
[REGNO (i2dest
)], old_mode
);
3574 buf
= undobuf
.undos
;
3575 undobuf
.undos
= buf
->next
;
3576 buf
->next
= undobuf
.frees
;
3577 undobuf
.frees
= buf
;
3581 i2scratch
= m_split_insn
!= 0;
3584 /* If recog_for_combine has discarded clobbers, try to use them
3585 again for the split. */
3586 if (m_split_insn
== 0 && newpat_vec_with_clobbers
)
3588 parallel
= gen_rtx_PARALLEL (VOIDmode
, newpat_vec_with_clobbers
);
3589 m_split_insn
= combine_split_insns (parallel
, i3
);
3592 if (m_split_insn
&& NEXT_INSN (m_split_insn
) == NULL_RTX
)
3594 rtx m_split_pat
= PATTERN (m_split_insn
);
3595 insn_code_number
= recog_for_combine (&m_split_pat
, i3
, &new_i3_notes
);
3596 if (insn_code_number
>= 0)
3597 newpat
= m_split_pat
;
3599 else if (m_split_insn
&& NEXT_INSN (NEXT_INSN (m_split_insn
)) == NULL_RTX
3600 && (next_nonnote_nondebug_insn (i2
) == i3
3601 || ! use_crosses_set_p (PATTERN (m_split_insn
), DF_INSN_LUID (i2
))))
3604 rtx newi3pat
= PATTERN (NEXT_INSN (m_split_insn
));
3605 newi2pat
= PATTERN (m_split_insn
);
3607 i3set
= single_set (NEXT_INSN (m_split_insn
));
3608 i2set
= single_set (m_split_insn
);
3610 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3612 /* If I2 or I3 has multiple SETs, we won't know how to track
3613 register status, so don't use these insns. If I2's destination
3614 is used between I2 and I3, we also can't use these insns. */
3616 if (i2_code_number
>= 0 && i2set
&& i3set
3617 && (next_nonnote_nondebug_insn (i2
) == i3
3618 || ! reg_used_between_p (SET_DEST (i2set
), i2
, i3
)))
3619 insn_code_number
= recog_for_combine (&newi3pat
, i3
,
3621 if (insn_code_number
>= 0)
3624 /* It is possible that both insns now set the destination of I3.
3625 If so, we must show an extra use of it. */
3627 if (insn_code_number
>= 0)
3629 rtx new_i3_dest
= SET_DEST (i3set
);
3630 rtx new_i2_dest
= SET_DEST (i2set
);
3632 while (GET_CODE (new_i3_dest
) == ZERO_EXTRACT
3633 || GET_CODE (new_i3_dest
) == STRICT_LOW_PART
3634 || GET_CODE (new_i3_dest
) == SUBREG
)
3635 new_i3_dest
= XEXP (new_i3_dest
, 0);
3637 while (GET_CODE (new_i2_dest
) == ZERO_EXTRACT
3638 || GET_CODE (new_i2_dest
) == STRICT_LOW_PART
3639 || GET_CODE (new_i2_dest
) == SUBREG
)
3640 new_i2_dest
= XEXP (new_i2_dest
, 0);
3642 if (REG_P (new_i3_dest
)
3643 && REG_P (new_i2_dest
)
3644 && REGNO (new_i3_dest
) == REGNO (new_i2_dest
)
3645 && REGNO (new_i2_dest
) < reg_n_sets_max
)
3646 INC_REG_N_SETS (REGNO (new_i2_dest
), 1);
3650 /* If we can split it and use I2DEST, go ahead and see if that
3651 helps things be recognized. Verify that none of the registers
3652 are set between I2 and I3. */
3653 if (insn_code_number
< 0
3654 && (split
= find_split_point (&newpat
, i3
, false)) != 0
3655 && (!HAVE_cc0
|| REG_P (i2dest
))
3656 /* We need I2DEST in the proper mode. If it is a hard register
3657 or the only use of a pseudo, we can change its mode.
3658 Make sure we don't change a hard register to have a mode that
3659 isn't valid for it, or change the number of registers. */
3660 && (GET_MODE (*split
) == GET_MODE (i2dest
)
3661 || GET_MODE (*split
) == VOIDmode
3662 || can_change_dest_mode (i2dest
, added_sets_2
,
3664 && (next_nonnote_nondebug_insn (i2
) == i3
3665 || ! use_crosses_set_p (*split
, DF_INSN_LUID (i2
)))
3666 /* We can't overwrite I2DEST if its value is still used by
3668 && ! reg_referenced_p (i2dest
, newpat
))
3670 rtx newdest
= i2dest
;
3671 enum rtx_code split_code
= GET_CODE (*split
);
3672 machine_mode split_mode
= GET_MODE (*split
);
3673 bool subst_done
= false;
3674 newi2pat
= NULL_RTX
;
3678 /* *SPLIT may be part of I2SRC, so make sure we have the
3679 original expression around for later debug processing.
3680 We should not need I2SRC any more in other cases. */
3681 if (MAY_HAVE_DEBUG_INSNS
)
3682 i2src
= copy_rtx (i2src
);
3686 /* Get NEWDEST as a register in the proper mode. We have already
3687 validated that we can do this. */
3688 if (GET_MODE (i2dest
) != split_mode
&& split_mode
!= VOIDmode
)
3690 if (REGNO (i2dest
) < FIRST_PSEUDO_REGISTER
)
3691 newdest
= gen_rtx_REG (split_mode
, REGNO (i2dest
));
3694 SUBST_MODE (regno_reg_rtx
[REGNO (i2dest
)], split_mode
);
3695 newdest
= regno_reg_rtx
[REGNO (i2dest
)];
3699 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3700 an ASHIFT. This can occur if it was inside a PLUS and hence
3701 appeared to be a memory address. This is a kludge. */
3702 if (split_code
== MULT
3703 && CONST_INT_P (XEXP (*split
, 1))
3704 && INTVAL (XEXP (*split
, 1)) > 0
3705 && (i
= exact_log2 (UINTVAL (XEXP (*split
, 1)))) >= 0)
3707 SUBST (*split
, gen_rtx_ASHIFT (split_mode
,
3708 XEXP (*split
, 0), GEN_INT (i
)));
3709 /* Update split_code because we may not have a multiply
3711 split_code
= GET_CODE (*split
);
3714 #ifdef INSN_SCHEDULING
3715 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3716 be written as a ZERO_EXTEND. */
3717 if (split_code
== SUBREG
&& MEM_P (SUBREG_REG (*split
)))
3719 #ifdef LOAD_EXTEND_OP
3720 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3721 what it really is. */
3722 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split
)))
3724 SUBST (*split
, gen_rtx_SIGN_EXTEND (split_mode
,
3725 SUBREG_REG (*split
)));
3728 SUBST (*split
, gen_rtx_ZERO_EXTEND (split_mode
,
3729 SUBREG_REG (*split
)));
3733 /* Attempt to split binary operators using arithmetic identities. */
3734 if (BINARY_P (SET_SRC (newpat
))
3735 && split_mode
== GET_MODE (SET_SRC (newpat
))
3736 && ! side_effects_p (SET_SRC (newpat
)))
3738 rtx setsrc
= SET_SRC (newpat
);
3739 machine_mode mode
= GET_MODE (setsrc
);
3740 enum rtx_code code
= GET_CODE (setsrc
);
3741 rtx src_op0
= XEXP (setsrc
, 0);
3742 rtx src_op1
= XEXP (setsrc
, 1);
3744 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3745 if (rtx_equal_p (src_op0
, src_op1
))
3747 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, src_op0
);
3748 SUBST (XEXP (setsrc
, 0), newdest
);
3749 SUBST (XEXP (setsrc
, 1), newdest
);
3752 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3753 else if ((code
== PLUS
|| code
== MULT
)
3754 && GET_CODE (src_op0
) == code
3755 && GET_CODE (XEXP (src_op0
, 0)) == code
3756 && (INTEGRAL_MODE_P (mode
)
3757 || (FLOAT_MODE_P (mode
)
3758 && flag_unsafe_math_optimizations
)))
3760 rtx p
= XEXP (XEXP (src_op0
, 0), 0);
3761 rtx q
= XEXP (XEXP (src_op0
, 0), 1);
3762 rtx r
= XEXP (src_op0
, 1);
3765 /* Split both "((X op Y) op X) op Y" and
3766 "((X op Y) op Y) op X" as "T op T" where T is
3768 if ((rtx_equal_p (p
,r
) && rtx_equal_p (q
,s
))
3769 || (rtx_equal_p (p
,s
) && rtx_equal_p (q
,r
)))
3771 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
,
3773 SUBST (XEXP (setsrc
, 0), newdest
);
3774 SUBST (XEXP (setsrc
, 1), newdest
);
3777 /* Split "((X op X) op Y) op Y)" as "T op T" where
3779 else if (rtx_equal_p (p
,q
) && rtx_equal_p (r
,s
))
3781 rtx tmp
= simplify_gen_binary (code
, mode
, p
, r
);
3782 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, tmp
);
3783 SUBST (XEXP (setsrc
, 0), newdest
);
3784 SUBST (XEXP (setsrc
, 1), newdest
);
3792 newi2pat
= gen_rtx_SET (VOIDmode
, newdest
, *split
);
3793 SUBST (*split
, newdest
);
3796 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3798 /* recog_for_combine might have added CLOBBERs to newi2pat.
3799 Make sure NEWPAT does not depend on the clobbered regs. */
3800 if (GET_CODE (newi2pat
) == PARALLEL
)
3801 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3802 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3804 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3805 if (reg_overlap_mentioned_p (reg
, newpat
))
3812 /* If the split point was a MULT and we didn't have one before,
3813 don't use one now. */
3814 if (i2_code_number
>= 0 && ! (split_code
== MULT
&& ! have_mult
))
3815 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3819 /* Check for a case where we loaded from memory in a narrow mode and
3820 then sign extended it, but we need both registers. In that case,
3821 we have a PARALLEL with both loads from the same memory location.
3822 We can split this into a load from memory followed by a register-register
3823 copy. This saves at least one insn, more if register allocation can
3826 We cannot do this if the destination of the first assignment is a
3827 condition code register or cc0. We eliminate this case by making sure
3828 the SET_DEST and SET_SRC have the same mode.
3830 We cannot do this if the destination of the second assignment is
3831 a register that we have already assumed is zero-extended. Similarly
3832 for a SUBREG of such a register. */
3834 else if (i1
&& insn_code_number
< 0 && asm_noperands (newpat
) < 0
3835 && GET_CODE (newpat
) == PARALLEL
3836 && XVECLEN (newpat
, 0) == 2
3837 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3838 && GET_CODE (SET_SRC (XVECEXP (newpat
, 0, 0))) == SIGN_EXTEND
3839 && (GET_MODE (SET_DEST (XVECEXP (newpat
, 0, 0)))
3840 == GET_MODE (SET_SRC (XVECEXP (newpat
, 0, 0))))
3841 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3842 && rtx_equal_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3843 XEXP (SET_SRC (XVECEXP (newpat
, 0, 0)), 0))
3844 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat
, 0, 1)),
3846 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3847 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3848 && ! (temp_expr
= SET_DEST (XVECEXP (newpat
, 0, 1)),
3850 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3851 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3852 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3853 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3854 != GET_MODE_MASK (word_mode
))))
3855 && ! (GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) == SUBREG
3856 && (temp_expr
= SUBREG_REG (SET_DEST (XVECEXP (newpat
, 0, 1))),
3858 && reg_stat
[REGNO (temp_expr
)].nonzero_bits
!= 0
3859 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < BITS_PER_WORD
3860 && GET_MODE_PRECISION (GET_MODE (temp_expr
)) < HOST_BITS_PER_INT
3861 && (reg_stat
[REGNO (temp_expr
)].nonzero_bits
3862 != GET_MODE_MASK (word_mode
)))))
3863 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3864 SET_SRC (XVECEXP (newpat
, 0, 1)))
3865 && ! find_reg_note (i3
, REG_UNUSED
,
3866 SET_DEST (XVECEXP (newpat
, 0, 0))))
3870 newi2pat
= XVECEXP (newpat
, 0, 0);
3871 ni2dest
= SET_DEST (XVECEXP (newpat
, 0, 0));
3872 newpat
= XVECEXP (newpat
, 0, 1);
3873 SUBST (SET_SRC (newpat
),
3874 gen_lowpart (GET_MODE (SET_SRC (newpat
)), ni2dest
));
3875 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3877 if (i2_code_number
>= 0)
3878 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3880 if (insn_code_number
>= 0)
3884 /* Similarly, check for a case where we have a PARALLEL of two independent
3885 SETs but we started with three insns. In this case, we can do the sets
3886 as two separate insns. This case occurs when some SET allows two
3887 other insns to combine, but the destination of that SET is still live.
3889 Also do this if we started with two insns and (at least) one of the
3890 resulting sets is a noop; this noop will be deleted later. */
3892 else if (insn_code_number
< 0 && asm_noperands (newpat
) < 0
3893 && GET_CODE (newpat
) == PARALLEL
3894 && XVECLEN (newpat
, 0) == 2
3895 && GET_CODE (XVECEXP (newpat
, 0, 0)) == SET
3896 && GET_CODE (XVECEXP (newpat
, 0, 1)) == SET
3897 && (i1
|| set_noop_p (XVECEXP (newpat
, 0, 0))
3898 || set_noop_p (XVECEXP (newpat
, 0, 1)))
3899 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != ZERO_EXTRACT
3900 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 0))) != STRICT_LOW_PART
3901 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != ZERO_EXTRACT
3902 && GET_CODE (SET_DEST (XVECEXP (newpat
, 0, 1))) != STRICT_LOW_PART
3903 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 1)),
3904 XVECEXP (newpat
, 0, 0))
3905 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat
, 0, 0)),
3906 XVECEXP (newpat
, 0, 1))
3907 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 0)))
3908 && contains_muldiv (SET_SRC (XVECEXP (newpat
, 0, 1)))))
3910 rtx set0
= XVECEXP (newpat
, 0, 0);
3911 rtx set1
= XVECEXP (newpat
, 0, 1);
3913 /* Normally, it doesn't matter which of the two is done first,
3914 but the one that references cc0 can't be the second, and
3915 one which uses any regs/memory set in between i2 and i3 can't
3916 be first. The PARALLEL might also have been pre-existing in i3,
3917 so we need to make sure that we won't wrongly hoist a SET to i2
3918 that would conflict with a death note present in there. */
3919 if (!use_crosses_set_p (SET_SRC (set1
), DF_INSN_LUID (i2
))
3920 && !(REG_P (SET_DEST (set1
))
3921 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set1
)))
3922 && !(GET_CODE (SET_DEST (set1
)) == SUBREG
3923 && find_reg_note (i2
, REG_DEAD
,
3924 SUBREG_REG (SET_DEST (set1
))))
3925 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set0
))
3926 /* If I3 is a jump, ensure that set0 is a jump so that
3927 we do not create invalid RTL. */
3928 && (!JUMP_P (i3
) || SET_DEST (set0
) == pc_rtx
)
3934 else if (!use_crosses_set_p (SET_SRC (set0
), DF_INSN_LUID (i2
))
3935 && !(REG_P (SET_DEST (set0
))
3936 && find_reg_note (i2
, REG_DEAD
, SET_DEST (set0
)))
3937 && !(GET_CODE (SET_DEST (set0
)) == SUBREG
3938 && find_reg_note (i2
, REG_DEAD
,
3939 SUBREG_REG (SET_DEST (set0
))))
3940 && (!HAVE_cc0
|| !reg_referenced_p (cc0_rtx
, set1
))
3941 /* If I3 is a jump, ensure that set1 is a jump so that
3942 we do not create invalid RTL. */
3943 && (!JUMP_P (i3
) || SET_DEST (set1
) == pc_rtx
)
3955 i2_code_number
= recog_for_combine (&newi2pat
, i2
, &new_i2_notes
);
3957 if (i2_code_number
>= 0)
3959 /* recog_for_combine might have added CLOBBERs to newi2pat.
3960 Make sure NEWPAT does not depend on the clobbered regs. */
3961 if (GET_CODE (newi2pat
) == PARALLEL
)
3963 for (i
= XVECLEN (newi2pat
, 0) - 1; i
>= 0; i
--)
3964 if (GET_CODE (XVECEXP (newi2pat
, 0, i
)) == CLOBBER
)
3966 rtx reg
= XEXP (XVECEXP (newi2pat
, 0, i
), 0);
3967 if (reg_overlap_mentioned_p (reg
, newpat
))
3975 insn_code_number
= recog_for_combine (&newpat
, i3
, &new_i3_notes
);
3979 /* If it still isn't recognized, fail and change things back the way they
3981 if ((insn_code_number
< 0
3982 /* Is the result a reasonable ASM_OPERANDS? */
3983 && (! check_asm_operands (newpat
) || added_sets_1
|| added_sets_2
)))
3989 /* If we had to change another insn, make sure it is valid also. */
3990 if (undobuf
.other_insn
)
3992 CLEAR_HARD_REG_SET (newpat_used_regs
);
3994 other_pat
= PATTERN (undobuf
.other_insn
);
3995 other_code_number
= recog_for_combine (&other_pat
, undobuf
.other_insn
,
3998 if (other_code_number
< 0 && ! check_asm_operands (other_pat
))
4005 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4006 they are adjacent to each other or not. */
4009 rtx_insn
*p
= prev_nonnote_insn (i3
);
4010 if (p
&& p
!= i2
&& NONJUMP_INSN_P (p
) && newi2pat
4011 && sets_cc0_p (newi2pat
))
4018 /* Only allow this combination if insn_rtx_costs reports that the
4019 replacement instructions are cheaper than the originals. */
4020 if (!combine_validate_cost (i0
, i1
, i2
, i3
, newpat
, newi2pat
, other_pat
))
4026 if (MAY_HAVE_DEBUG_INSNS
)
4030 for (undo
= undobuf
.undos
; undo
; undo
= undo
->next
)
4031 if (undo
->kind
== UNDO_MODE
)
4033 rtx reg
= *undo
->where
.r
;
4034 machine_mode new_mode
= GET_MODE (reg
);
4035 machine_mode old_mode
= undo
->old_contents
.m
;
4037 /* Temporarily revert mode back. */
4038 adjust_reg_mode (reg
, old_mode
);
4040 if (reg
== i2dest
&& i2scratch
)
4042 /* If we used i2dest as a scratch register with a
4043 different mode, substitute it for the original
4044 i2src while its original mode is temporarily
4045 restored, and then clear i2scratch so that we don't
4046 do it again later. */
4047 propagate_for_debug (i2
, last_combined_insn
, reg
, i2src
,
4050 /* Put back the new mode. */
4051 adjust_reg_mode (reg
, new_mode
);
4055 rtx tempreg
= gen_raw_REG (old_mode
, REGNO (reg
));
4056 rtx_insn
*first
, *last
;
4061 last
= last_combined_insn
;
4066 last
= undobuf
.other_insn
;
4068 if (DF_INSN_LUID (last
)
4069 < DF_INSN_LUID (last_combined_insn
))
4070 last
= last_combined_insn
;
4073 /* We're dealing with a reg that changed mode but not
4074 meaning, so we want to turn it into a subreg for
4075 the new mode. However, because of REG sharing and
4076 because its mode had already changed, we have to do
4077 it in two steps. First, replace any debug uses of
4078 reg, with its original mode temporarily restored,
4079 with this copy we have created; then, replace the
4080 copy with the SUBREG of the original shared reg,
4081 once again changed to the new mode. */
4082 propagate_for_debug (first
, last
, reg
, tempreg
,
4084 adjust_reg_mode (reg
, new_mode
);
4085 propagate_for_debug (first
, last
, tempreg
,
4086 lowpart_subreg (old_mode
, reg
, new_mode
),
4092 /* If we will be able to accept this, we have made a
4093 change to the destination of I3. This requires us to
4094 do a few adjustments. */
4096 if (changed_i3_dest
)
4098 PATTERN (i3
) = newpat
;
4099 adjust_for_new_dest (i3
);
4102 /* We now know that we can do this combination. Merge the insns and
4103 update the status of registers and LOG_LINKS. */
4105 if (undobuf
.other_insn
)
4109 PATTERN (undobuf
.other_insn
) = other_pat
;
4111 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4112 ensure that they are still valid. Then add any non-duplicate
4113 notes added by recog_for_combine. */
4114 for (note
= REG_NOTES (undobuf
.other_insn
); note
; note
= next
)
4116 next
= XEXP (note
, 1);
4118 if ((REG_NOTE_KIND (note
) == REG_DEAD
4119 && !reg_referenced_p (XEXP (note
, 0),
4120 PATTERN (undobuf
.other_insn
)))
4121 ||(REG_NOTE_KIND (note
) == REG_UNUSED
4122 && !reg_set_p (XEXP (note
, 0),
4123 PATTERN (undobuf
.other_insn
))))
4124 remove_note (undobuf
.other_insn
, note
);
4127 distribute_notes (new_other_notes
, undobuf
.other_insn
,
4128 undobuf
.other_insn
, NULL
, NULL_RTX
, NULL_RTX
,
4135 struct insn_link
*link
;
4138 /* I3 now uses what used to be its destination and which is now
4139 I2's destination. This requires us to do a few adjustments. */
4140 PATTERN (i3
) = newpat
;
4141 adjust_for_new_dest (i3
);
4143 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4146 However, some later insn might be using I2's dest and have
4147 a LOG_LINK pointing at I3. We must remove this link.
4148 The simplest way to remove the link is to point it at I1,
4149 which we know will be a NOTE. */
4151 /* newi2pat is usually a SET here; however, recog_for_combine might
4152 have added some clobbers. */
4153 if (GET_CODE (newi2pat
) == PARALLEL
)
4154 ni2dest
= SET_DEST (XVECEXP (newi2pat
, 0, 0));
4156 ni2dest
= SET_DEST (newi2pat
);
4158 for (insn
= NEXT_INSN (i3
);
4159 insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4160 || insn
!= BB_HEAD (this_basic_block
->next_bb
));
4161 insn
= NEXT_INSN (insn
))
4163 if (INSN_P (insn
) && reg_referenced_p (ni2dest
, PATTERN (insn
)))
4165 FOR_EACH_LOG_LINK (link
, insn
)
4166 if (link
->insn
== i3
)
4175 rtx i3notes
, i2notes
, i1notes
= 0, i0notes
= 0;
4176 struct insn_link
*i3links
, *i2links
, *i1links
= 0, *i0links
= 0;
4179 /* Compute which registers we expect to eliminate. newi2pat may be setting
4180 either i3dest or i2dest, so we must check it. */
4181 rtx elim_i2
= ((newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4182 || i2dest_in_i2src
|| i2dest_in_i1src
|| i2dest_in_i0src
4185 /* For i1, we need to compute both local elimination and global
4186 elimination information with respect to newi2pat because i1dest
4187 may be the same as i3dest, in which case newi2pat may be setting
4188 i1dest. Global information is used when distributing REG_DEAD
4189 note for i2 and i3, in which case it does matter if newi2pat sets
4192 Local information is used when distributing REG_DEAD note for i1,
4193 in which case it doesn't matter if newi2pat sets i1dest or not.
4194 See PR62151, if we have four insns combination:
4196 i1: r1 <- i1src (using r0)
4198 i2: r0 <- i2src (using r1)
4199 i3: r3 <- i3src (using r0)
4201 From i1's point of view, r0 is eliminated, no matter if it is set
4202 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4203 should be discarded.
4205 Note local information only affects cases in forms like "I1->I2->I3",
4206 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4207 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4209 rtx local_elim_i1
= (i1
== 0 || i1dest_in_i1src
|| i1dest_in_i0src
4212 rtx elim_i1
= (local_elim_i1
== 0
4213 || (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4215 /* Same case as i1. */
4216 rtx local_elim_i0
= (i0
== 0 || i0dest_in_i0src
|| !i0dest_killed
4218 rtx elim_i0
= (local_elim_i0
== 0
4219 || (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4222 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4224 i3notes
= REG_NOTES (i3
), i3links
= LOG_LINKS (i3
);
4225 i2notes
= REG_NOTES (i2
), i2links
= LOG_LINKS (i2
);
4227 i1notes
= REG_NOTES (i1
), i1links
= LOG_LINKS (i1
);
4229 i0notes
= REG_NOTES (i0
), i0links
= LOG_LINKS (i0
);
4231 /* Ensure that we do not have something that should not be shared but
4232 occurs multiple times in the new insns. Check this by first
4233 resetting all the `used' flags and then copying anything is shared. */
4235 reset_used_flags (i3notes
);
4236 reset_used_flags (i2notes
);
4237 reset_used_flags (i1notes
);
4238 reset_used_flags (i0notes
);
4239 reset_used_flags (newpat
);
4240 reset_used_flags (newi2pat
);
4241 if (undobuf
.other_insn
)
4242 reset_used_flags (PATTERN (undobuf
.other_insn
));
4244 i3notes
= copy_rtx_if_shared (i3notes
);
4245 i2notes
= copy_rtx_if_shared (i2notes
);
4246 i1notes
= copy_rtx_if_shared (i1notes
);
4247 i0notes
= copy_rtx_if_shared (i0notes
);
4248 newpat
= copy_rtx_if_shared (newpat
);
4249 newi2pat
= copy_rtx_if_shared (newi2pat
);
4250 if (undobuf
.other_insn
)
4251 reset_used_flags (PATTERN (undobuf
.other_insn
));
4253 INSN_CODE (i3
) = insn_code_number
;
4254 PATTERN (i3
) = newpat
;
4256 if (CALL_P (i3
) && CALL_INSN_FUNCTION_USAGE (i3
))
4258 rtx call_usage
= CALL_INSN_FUNCTION_USAGE (i3
);
4260 reset_used_flags (call_usage
);
4261 call_usage
= copy_rtx (call_usage
);
4265 /* I2SRC must still be meaningful at this point. Some splitting
4266 operations can invalidate I2SRC, but those operations do not
4269 replace_rtx (call_usage
, i2dest
, i2src
);
4273 replace_rtx (call_usage
, i1dest
, i1src
);
4275 replace_rtx (call_usage
, i0dest
, i0src
);
4277 CALL_INSN_FUNCTION_USAGE (i3
) = call_usage
;
4280 if (undobuf
.other_insn
)
4281 INSN_CODE (undobuf
.other_insn
) = other_code_number
;
4283 /* We had one special case above where I2 had more than one set and
4284 we replaced a destination of one of those sets with the destination
4285 of I3. In that case, we have to update LOG_LINKS of insns later
4286 in this basic block. Note that this (expensive) case is rare.
4288 Also, in this case, we must pretend that all REG_NOTEs for I2
4289 actually came from I3, so that REG_UNUSED notes from I2 will be
4290 properly handled. */
4292 if (i3_subst_into_i2
)
4294 for (i
= 0; i
< XVECLEN (PATTERN (i2
), 0); i
++)
4295 if ((GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == SET
4296 || GET_CODE (XVECEXP (PATTERN (i2
), 0, i
)) == CLOBBER
)
4297 && REG_P (SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)))
4298 && SET_DEST (XVECEXP (PATTERN (i2
), 0, i
)) != i2dest
4299 && ! find_reg_note (i2
, REG_UNUSED
,
4300 SET_DEST (XVECEXP (PATTERN (i2
), 0, i
))))
4301 for (temp_insn
= NEXT_INSN (i2
);
4303 && (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
4304 || BB_HEAD (this_basic_block
) != temp_insn
);
4305 temp_insn
= NEXT_INSN (temp_insn
))
4306 if (temp_insn
!= i3
&& INSN_P (temp_insn
))
4307 FOR_EACH_LOG_LINK (link
, temp_insn
)
4308 if (link
->insn
== i2
)
4314 while (XEXP (link
, 1))
4315 link
= XEXP (link
, 1);
4316 XEXP (link
, 1) = i2notes
;
4323 LOG_LINKS (i3
) = NULL
;
4325 LOG_LINKS (i2
) = NULL
;
4330 if (MAY_HAVE_DEBUG_INSNS
&& i2scratch
)
4331 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4333 INSN_CODE (i2
) = i2_code_number
;
4334 PATTERN (i2
) = newi2pat
;
4338 if (MAY_HAVE_DEBUG_INSNS
&& i2src
)
4339 propagate_for_debug (i2
, last_combined_insn
, i2dest
, i2src
,
4341 SET_INSN_DELETED (i2
);
4346 LOG_LINKS (i1
) = NULL
;
4348 if (MAY_HAVE_DEBUG_INSNS
)
4349 propagate_for_debug (i1
, last_combined_insn
, i1dest
, i1src
,
4351 SET_INSN_DELETED (i1
);
4356 LOG_LINKS (i0
) = NULL
;
4358 if (MAY_HAVE_DEBUG_INSNS
)
4359 propagate_for_debug (i0
, last_combined_insn
, i0dest
, i0src
,
4361 SET_INSN_DELETED (i0
);
4364 /* Get death notes for everything that is now used in either I3 or
4365 I2 and used to die in a previous insn. If we built two new
4366 patterns, move from I1 to I2 then I2 to I3 so that we get the
4367 proper movement on registers that I2 modifies. */
4370 from_luid
= DF_INSN_LUID (i0
);
4372 from_luid
= DF_INSN_LUID (i1
);
4374 from_luid
= DF_INSN_LUID (i2
);
4376 move_deaths (newi2pat
, NULL_RTX
, from_luid
, i2
, &midnotes
);
4377 move_deaths (newpat
, newi2pat
, from_luid
, i3
, &midnotes
);
4379 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4381 distribute_notes (i3notes
, i3
, i3
, newi2pat
? i2
: NULL
,
4382 elim_i2
, elim_i1
, elim_i0
);
4384 distribute_notes (i2notes
, i2
, i3
, newi2pat
? i2
: NULL
,
4385 elim_i2
, elim_i1
, elim_i0
);
4387 distribute_notes (i1notes
, i1
, i3
, newi2pat
? i2
: NULL
,
4388 elim_i2
, local_elim_i1
, local_elim_i0
);
4390 distribute_notes (i0notes
, i0
, i3
, newi2pat
? i2
: NULL
,
4391 elim_i2
, elim_i1
, local_elim_i0
);
4393 distribute_notes (midnotes
, NULL
, i3
, newi2pat
? i2
: NULL
,
4394 elim_i2
, elim_i1
, elim_i0
);
4396 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4397 know these are REG_UNUSED and want them to go to the desired insn,
4398 so we always pass it as i3. */
4400 if (newi2pat
&& new_i2_notes
)
4401 distribute_notes (new_i2_notes
, i2
, i2
, NULL
, NULL_RTX
, NULL_RTX
,
4405 distribute_notes (new_i3_notes
, i3
, i3
, NULL
, NULL_RTX
, NULL_RTX
,
4408 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4409 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4410 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4411 in that case, it might delete I2. Similarly for I2 and I1.
4412 Show an additional death due to the REG_DEAD note we make here. If
4413 we discard it in distribute_notes, we will decrement it again. */
4417 rtx new_note
= alloc_reg_note (REG_DEAD
, i3dest_killed
, NULL_RTX
);
4418 if (newi2pat
&& reg_set_p (i3dest_killed
, newi2pat
))
4419 distribute_notes (new_note
, NULL
, i2
, NULL
, elim_i2
,
4422 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4423 elim_i2
, elim_i1
, elim_i0
);
4426 if (i2dest_in_i2src
)
4428 rtx new_note
= alloc_reg_note (REG_DEAD
, i2dest
, NULL_RTX
);
4429 if (newi2pat
&& reg_set_p (i2dest
, newi2pat
))
4430 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4431 NULL_RTX
, NULL_RTX
);
4433 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4434 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4437 if (i1dest_in_i1src
)
4439 rtx new_note
= alloc_reg_note (REG_DEAD
, i1dest
, NULL_RTX
);
4440 if (newi2pat
&& reg_set_p (i1dest
, newi2pat
))
4441 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4442 NULL_RTX
, NULL_RTX
);
4444 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4445 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4448 if (i0dest_in_i0src
)
4450 rtx new_note
= alloc_reg_note (REG_DEAD
, i0dest
, NULL_RTX
);
4451 if (newi2pat
&& reg_set_p (i0dest
, newi2pat
))
4452 distribute_notes (new_note
, NULL
, i2
, NULL
, NULL_RTX
,
4453 NULL_RTX
, NULL_RTX
);
4455 distribute_notes (new_note
, NULL
, i3
, newi2pat
? i2
: NULL
,
4456 NULL_RTX
, NULL_RTX
, NULL_RTX
);
4459 distribute_links (i3links
);
4460 distribute_links (i2links
);
4461 distribute_links (i1links
);
4462 distribute_links (i0links
);
4466 struct insn_link
*link
;
4467 rtx_insn
*i2_insn
= 0;
4468 rtx i2_val
= 0, set
;
4470 /* The insn that used to set this register doesn't exist, and
4471 this life of the register may not exist either. See if one of
4472 I3's links points to an insn that sets I2DEST. If it does,
4473 that is now the last known value for I2DEST. If we don't update
4474 this and I2 set the register to a value that depended on its old
4475 contents, we will get confused. If this insn is used, thing
4476 will be set correctly in combine_instructions. */
4477 FOR_EACH_LOG_LINK (link
, i3
)
4478 if ((set
= single_set (link
->insn
)) != 0
4479 && rtx_equal_p (i2dest
, SET_DEST (set
)))
4480 i2_insn
= link
->insn
, i2_val
= SET_SRC (set
);
4482 record_value_for_reg (i2dest
, i2_insn
, i2_val
);
4484 /* If the reg formerly set in I2 died only once and that was in I3,
4485 zero its use count so it won't make `reload' do any work. */
4487 && (newi2pat
== 0 || ! reg_mentioned_p (i2dest
, newi2pat
))
4488 && ! i2dest_in_i2src
4489 && REGNO (i2dest
) < reg_n_sets_max
)
4490 INC_REG_N_SETS (REGNO (i2dest
), -1);
4493 if (i1
&& REG_P (i1dest
))
4495 struct insn_link
*link
;
4496 rtx_insn
*i1_insn
= 0;
4497 rtx i1_val
= 0, set
;
4499 FOR_EACH_LOG_LINK (link
, i3
)
4500 if ((set
= single_set (link
->insn
)) != 0
4501 && rtx_equal_p (i1dest
, SET_DEST (set
)))
4502 i1_insn
= link
->insn
, i1_val
= SET_SRC (set
);
4504 record_value_for_reg (i1dest
, i1_insn
, i1_val
);
4507 && ! i1dest_in_i1src
4508 && REGNO (i1dest
) < reg_n_sets_max
)
4509 INC_REG_N_SETS (REGNO (i1dest
), -1);
4512 if (i0
&& REG_P (i0dest
))
4514 struct insn_link
*link
;
4515 rtx_insn
*i0_insn
= 0;
4516 rtx i0_val
= 0, set
;
4518 FOR_EACH_LOG_LINK (link
, i3
)
4519 if ((set
= single_set (link
->insn
)) != 0
4520 && rtx_equal_p (i0dest
, SET_DEST (set
)))
4521 i0_insn
= link
->insn
, i0_val
= SET_SRC (set
);
4523 record_value_for_reg (i0dest
, i0_insn
, i0_val
);
4526 && ! i0dest_in_i0src
4527 && REGNO (i0dest
) < reg_n_sets_max
)
4528 INC_REG_N_SETS (REGNO (i0dest
), -1);
4531 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4532 been made to this insn. The order is important, because newi2pat
4533 can affect nonzero_bits of newpat. */
4535 note_stores (newi2pat
, set_nonzero_bits_and_sign_copies
, NULL
);
4536 note_stores (newpat
, set_nonzero_bits_and_sign_copies
, NULL
);
4539 if (undobuf
.other_insn
!= NULL_RTX
)
4543 fprintf (dump_file
, "modifying other_insn ");
4544 dump_insn_slim (dump_file
, undobuf
.other_insn
);
4546 df_insn_rescan (undobuf
.other_insn
);
4549 if (i0
&& !(NOTE_P (i0
) && (NOTE_KIND (i0
) == NOTE_INSN_DELETED
)))
4553 fprintf (dump_file
, "modifying insn i0 ");
4554 dump_insn_slim (dump_file
, i0
);
4556 df_insn_rescan (i0
);
4559 if (i1
&& !(NOTE_P (i1
) && (NOTE_KIND (i1
) == NOTE_INSN_DELETED
)))
4563 fprintf (dump_file
, "modifying insn i1 ");
4564 dump_insn_slim (dump_file
, i1
);
4566 df_insn_rescan (i1
);
4569 if (i2
&& !(NOTE_P (i2
) && (NOTE_KIND (i2
) == NOTE_INSN_DELETED
)))
4573 fprintf (dump_file
, "modifying insn i2 ");
4574 dump_insn_slim (dump_file
, i2
);
4576 df_insn_rescan (i2
);
4579 if (i3
&& !(NOTE_P (i3
) && (NOTE_KIND (i3
) == NOTE_INSN_DELETED
)))
4583 fprintf (dump_file
, "modifying insn i3 ");
4584 dump_insn_slim (dump_file
, i3
);
4586 df_insn_rescan (i3
);
4589 /* Set new_direct_jump_p if a new return or simple jump instruction
4590 has been created. Adjust the CFG accordingly. */
4591 if (returnjump_p (i3
) || any_uncondjump_p (i3
))
4593 *new_direct_jump_p
= 1;
4594 mark_jump_label (PATTERN (i3
), i3
, 0);
4595 update_cfg_for_uncondjump (i3
);
4598 if (undobuf
.other_insn
!= NULL_RTX
4599 && (returnjump_p (undobuf
.other_insn
)
4600 || any_uncondjump_p (undobuf
.other_insn
)))
4602 *new_direct_jump_p
= 1;
4603 update_cfg_for_uncondjump (undobuf
.other_insn
);
4606 /* A noop might also need cleaning up of CFG, if it comes from the
4607 simplification of a jump. */
4609 && GET_CODE (newpat
) == SET
4610 && SET_SRC (newpat
) == pc_rtx
4611 && SET_DEST (newpat
) == pc_rtx
)
4613 *new_direct_jump_p
= 1;
4614 update_cfg_for_uncondjump (i3
);
4617 if (undobuf
.other_insn
!= NULL_RTX
4618 && JUMP_P (undobuf
.other_insn
)
4619 && GET_CODE (PATTERN (undobuf
.other_insn
)) == SET
4620 && SET_SRC (PATTERN (undobuf
.other_insn
)) == pc_rtx
4621 && SET_DEST (PATTERN (undobuf
.other_insn
)) == pc_rtx
)
4623 *new_direct_jump_p
= 1;
4624 update_cfg_for_uncondjump (undobuf
.other_insn
);
4627 combine_successes
++;
4630 if (added_links_insn
4631 && (newi2pat
== 0 || DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i2
))
4632 && DF_INSN_LUID (added_links_insn
) < DF_INSN_LUID (i3
))
4633 return added_links_insn
;
4635 return newi2pat
? i2
: i3
;
4638 /* Undo all the modifications recorded in undobuf. */
4643 struct undo
*undo
, *next
;
4645 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4651 *undo
->where
.r
= undo
->old_contents
.r
;
4654 *undo
->where
.i
= undo
->old_contents
.i
;
4657 adjust_reg_mode (*undo
->where
.r
, undo
->old_contents
.m
);
4660 *undo
->where
.l
= undo
->old_contents
.l
;
4666 undo
->next
= undobuf
.frees
;
4667 undobuf
.frees
= undo
;
4673 /* We've committed to accepting the changes we made. Move all
4674 of the undos to the free list. */
4679 struct undo
*undo
, *next
;
4681 for (undo
= undobuf
.undos
; undo
; undo
= next
)
4684 undo
->next
= undobuf
.frees
;
4685 undobuf
.frees
= undo
;
4690 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4691 where we have an arithmetic expression and return that point. LOC will
4694 try_combine will call this function to see if an insn can be split into
4698 find_split_point (rtx
*loc
, rtx_insn
*insn
, bool set_src
)
4701 enum rtx_code code
= GET_CODE (x
);
4703 unsigned HOST_WIDE_INT len
= 0;
4704 HOST_WIDE_INT pos
= 0;
4706 rtx inner
= NULL_RTX
;
4708 /* First special-case some codes. */
4712 #ifdef INSN_SCHEDULING
4713 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4715 if (MEM_P (SUBREG_REG (x
)))
4718 return find_split_point (&SUBREG_REG (x
), insn
, false);
4722 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4723 using LO_SUM and HIGH. */
4724 if (GET_CODE (XEXP (x
, 0)) == CONST
4725 || GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
)
4727 machine_mode address_mode
= get_address_mode (x
);
4730 gen_rtx_LO_SUM (address_mode
,
4731 gen_rtx_HIGH (address_mode
, XEXP (x
, 0)),
4733 return &XEXP (XEXP (x
, 0), 0);
4737 /* If we have a PLUS whose second operand is a constant and the
4738 address is not valid, perhaps will can split it up using
4739 the machine-specific way to split large constants. We use
4740 the first pseudo-reg (one of the virtual regs) as a placeholder;
4741 it will not remain in the result. */
4742 if (GET_CODE (XEXP (x
, 0)) == PLUS
4743 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
4744 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4745 MEM_ADDR_SPACE (x
)))
4747 rtx reg
= regno_reg_rtx
[FIRST_PSEUDO_REGISTER
];
4748 rtx_insn
*seq
= combine_split_insns (gen_rtx_SET (VOIDmode
, reg
,
4752 /* This should have produced two insns, each of which sets our
4753 placeholder. If the source of the second is a valid address,
4754 we can make put both sources together and make a split point
4758 && NEXT_INSN (seq
) != NULL_RTX
4759 && NEXT_INSN (NEXT_INSN (seq
)) == NULL_RTX
4760 && NONJUMP_INSN_P (seq
)
4761 && GET_CODE (PATTERN (seq
)) == SET
4762 && SET_DEST (PATTERN (seq
)) == reg
4763 && ! reg_mentioned_p (reg
,
4764 SET_SRC (PATTERN (seq
)))
4765 && NONJUMP_INSN_P (NEXT_INSN (seq
))
4766 && GET_CODE (PATTERN (NEXT_INSN (seq
))) == SET
4767 && SET_DEST (PATTERN (NEXT_INSN (seq
))) == reg
4768 && memory_address_addr_space_p
4769 (GET_MODE (x
), SET_SRC (PATTERN (NEXT_INSN (seq
))),
4770 MEM_ADDR_SPACE (x
)))
4772 rtx src1
= SET_SRC (PATTERN (seq
));
4773 rtx src2
= SET_SRC (PATTERN (NEXT_INSN (seq
)));
4775 /* Replace the placeholder in SRC2 with SRC1. If we can
4776 find where in SRC2 it was placed, that can become our
4777 split point and we can replace this address with SRC2.
4778 Just try two obvious places. */
4780 src2
= replace_rtx (src2
, reg
, src1
);
4782 if (XEXP (src2
, 0) == src1
)
4783 split
= &XEXP (src2
, 0);
4784 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2
, 0)))[0] == 'e'
4785 && XEXP (XEXP (src2
, 0), 0) == src1
)
4786 split
= &XEXP (XEXP (src2
, 0), 0);
4790 SUBST (XEXP (x
, 0), src2
);
4795 /* If that didn't work, perhaps the first operand is complex and
4796 needs to be computed separately, so make a split point there.
4797 This will occur on machines that just support REG + CONST
4798 and have a constant moved through some previous computation. */
4800 else if (!OBJECT_P (XEXP (XEXP (x
, 0), 0))
4801 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4802 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4803 return &XEXP (XEXP (x
, 0), 0);
4806 /* If we have a PLUS whose first operand is complex, try computing it
4807 separately by making a split there. */
4808 if (GET_CODE (XEXP (x
, 0)) == PLUS
4809 && ! memory_address_addr_space_p (GET_MODE (x
), XEXP (x
, 0),
4811 && ! OBJECT_P (XEXP (XEXP (x
, 0), 0))
4812 && ! (GET_CODE (XEXP (XEXP (x
, 0), 0)) == SUBREG
4813 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x
, 0), 0)))))
4814 return &XEXP (XEXP (x
, 0), 0);
4818 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4819 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4820 we need to put the operand into a register. So split at that
4823 if (SET_DEST (x
) == cc0_rtx
4824 && GET_CODE (SET_SRC (x
)) != COMPARE
4825 && GET_CODE (SET_SRC (x
)) != ZERO_EXTRACT
4826 && !OBJECT_P (SET_SRC (x
))
4827 && ! (GET_CODE (SET_SRC (x
)) == SUBREG
4828 && OBJECT_P (SUBREG_REG (SET_SRC (x
)))))
4829 return &SET_SRC (x
);
4831 /* See if we can split SET_SRC as it stands. */
4832 split
= find_split_point (&SET_SRC (x
), insn
, true);
4833 if (split
&& split
!= &SET_SRC (x
))
4836 /* See if we can split SET_DEST as it stands. */
4837 split
= find_split_point (&SET_DEST (x
), insn
, false);
4838 if (split
&& split
!= &SET_DEST (x
))
4841 /* See if this is a bitfield assignment with everything constant. If
4842 so, this is an IOR of an AND, so split it into that. */
4843 if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
4844 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x
), 0)))
4845 && CONST_INT_P (XEXP (SET_DEST (x
), 1))
4846 && CONST_INT_P (XEXP (SET_DEST (x
), 2))
4847 && CONST_INT_P (SET_SRC (x
))
4848 && ((INTVAL (XEXP (SET_DEST (x
), 1))
4849 + INTVAL (XEXP (SET_DEST (x
), 2)))
4850 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0))))
4851 && ! side_effects_p (XEXP (SET_DEST (x
), 0)))
4853 HOST_WIDE_INT pos
= INTVAL (XEXP (SET_DEST (x
), 2));
4854 unsigned HOST_WIDE_INT len
= INTVAL (XEXP (SET_DEST (x
), 1));
4855 unsigned HOST_WIDE_INT src
= INTVAL (SET_SRC (x
));
4856 rtx dest
= XEXP (SET_DEST (x
), 0);
4857 machine_mode mode
= GET_MODE (dest
);
4858 unsigned HOST_WIDE_INT mask
4859 = ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
4862 if (BITS_BIG_ENDIAN
)
4863 pos
= GET_MODE_PRECISION (mode
) - len
- pos
;
4865 or_mask
= gen_int_mode (src
<< pos
, mode
);
4868 simplify_gen_binary (IOR
, mode
, dest
, or_mask
));
4871 rtx negmask
= gen_int_mode (~(mask
<< pos
), mode
);
4873 simplify_gen_binary (IOR
, mode
,
4874 simplify_gen_binary (AND
, mode
,
4879 SUBST (SET_DEST (x
), dest
);
4881 split
= find_split_point (&SET_SRC (x
), insn
, true);
4882 if (split
&& split
!= &SET_SRC (x
))
4886 /* Otherwise, see if this is an operation that we can split into two.
4887 If so, try to split that. */
4888 code
= GET_CODE (SET_SRC (x
));
4893 /* If we are AND'ing with a large constant that is only a single
4894 bit and the result is only being used in a context where we
4895 need to know if it is zero or nonzero, replace it with a bit
4896 extraction. This will avoid the large constant, which might
4897 have taken more than one insn to make. If the constant were
4898 not a valid argument to the AND but took only one insn to make,
4899 this is no worse, but if it took more than one insn, it will
4902 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4903 && REG_P (XEXP (SET_SRC (x
), 0))
4904 && (pos
= exact_log2 (UINTVAL (XEXP (SET_SRC (x
), 1)))) >= 7
4905 && REG_P (SET_DEST (x
))
4906 && (split
= find_single_use (SET_DEST (x
), insn
, NULL
)) != 0
4907 && (GET_CODE (*split
) == EQ
|| GET_CODE (*split
) == NE
)
4908 && XEXP (*split
, 0) == SET_DEST (x
)
4909 && XEXP (*split
, 1) == const0_rtx
)
4911 rtx extraction
= make_extraction (GET_MODE (SET_DEST (x
)),
4912 XEXP (SET_SRC (x
), 0),
4913 pos
, NULL_RTX
, 1, 1, 0, 0);
4914 if (extraction
!= 0)
4916 SUBST (SET_SRC (x
), extraction
);
4917 return find_split_point (loc
, insn
, false);
4923 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4924 is known to be on, this can be converted into a NEG of a shift. */
4925 if (STORE_FLAG_VALUE
== -1 && XEXP (SET_SRC (x
), 1) == const0_rtx
4926 && GET_MODE (SET_SRC (x
)) == GET_MODE (XEXP (SET_SRC (x
), 0))
4927 && 1 <= (pos
= exact_log2
4928 (nonzero_bits (XEXP (SET_SRC (x
), 0),
4929 GET_MODE (XEXP (SET_SRC (x
), 0))))))
4931 machine_mode mode
= GET_MODE (XEXP (SET_SRC (x
), 0));
4935 gen_rtx_LSHIFTRT (mode
,
4936 XEXP (SET_SRC (x
), 0),
4939 split
= find_split_point (&SET_SRC (x
), insn
, true);
4940 if (split
&& split
!= &SET_SRC (x
))
4946 inner
= XEXP (SET_SRC (x
), 0);
4948 /* We can't optimize if either mode is a partial integer
4949 mode as we don't know how many bits are significant
4951 if (GET_MODE_CLASS (GET_MODE (inner
)) == MODE_PARTIAL_INT
4952 || GET_MODE_CLASS (GET_MODE (SET_SRC (x
))) == MODE_PARTIAL_INT
)
4956 len
= GET_MODE_PRECISION (GET_MODE (inner
));
4962 if (CONST_INT_P (XEXP (SET_SRC (x
), 1))
4963 && CONST_INT_P (XEXP (SET_SRC (x
), 2)))
4965 inner
= XEXP (SET_SRC (x
), 0);
4966 len
= INTVAL (XEXP (SET_SRC (x
), 1));
4967 pos
= INTVAL (XEXP (SET_SRC (x
), 2));
4969 if (BITS_BIG_ENDIAN
)
4970 pos
= GET_MODE_PRECISION (GET_MODE (inner
)) - len
- pos
;
4971 unsignedp
= (code
== ZERO_EXTRACT
);
4980 && pos
+ len
<= GET_MODE_PRECISION (GET_MODE (inner
)))
4982 machine_mode mode
= GET_MODE (SET_SRC (x
));
4984 /* For unsigned, we have a choice of a shift followed by an
4985 AND or two shifts. Use two shifts for field sizes where the
4986 constant might be too large. We assume here that we can
4987 always at least get 8-bit constants in an AND insn, which is
4988 true for every current RISC. */
4990 if (unsignedp
&& len
<= 8)
4992 unsigned HOST_WIDE_INT mask
4993 = ((unsigned HOST_WIDE_INT
) 1 << len
) - 1;
4997 (mode
, gen_lowpart (mode
, inner
),
4999 gen_int_mode (mask
, mode
)));
5001 split
= find_split_point (&SET_SRC (x
), insn
, true);
5002 if (split
&& split
!= &SET_SRC (x
))
5009 (unsignedp
? LSHIFTRT
: ASHIFTRT
, mode
,
5010 gen_rtx_ASHIFT (mode
,
5011 gen_lowpart (mode
, inner
),
5012 GEN_INT (GET_MODE_PRECISION (mode
)
5014 GEN_INT (GET_MODE_PRECISION (mode
) - len
)));
5016 split
= find_split_point (&SET_SRC (x
), insn
, true);
5017 if (split
&& split
!= &SET_SRC (x
))
5022 /* See if this is a simple operation with a constant as the second
5023 operand. It might be that this constant is out of range and hence
5024 could be used as a split point. */
5025 if (BINARY_P (SET_SRC (x
))
5026 && CONSTANT_P (XEXP (SET_SRC (x
), 1))
5027 && (OBJECT_P (XEXP (SET_SRC (x
), 0))
5028 || (GET_CODE (XEXP (SET_SRC (x
), 0)) == SUBREG
5029 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x
), 0))))))
5030 return &XEXP (SET_SRC (x
), 1);
5032 /* Finally, see if this is a simple operation with its first operand
5033 not in a register. The operation might require this operand in a
5034 register, so return it as a split point. We can always do this
5035 because if the first operand were another operation, we would have
5036 already found it as a split point. */
5037 if ((BINARY_P (SET_SRC (x
)) || UNARY_P (SET_SRC (x
)))
5038 && ! register_operand (XEXP (SET_SRC (x
), 0), VOIDmode
))
5039 return &XEXP (SET_SRC (x
), 0);
5045 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5046 it is better to write this as (not (ior A B)) so we can split it.
5047 Similarly for IOR. */
5048 if (GET_CODE (XEXP (x
, 0)) == NOT
&& GET_CODE (XEXP (x
, 1)) == NOT
)
5051 gen_rtx_NOT (GET_MODE (x
),
5052 gen_rtx_fmt_ee (code
== IOR
? AND
: IOR
,
5054 XEXP (XEXP (x
, 0), 0),
5055 XEXP (XEXP (x
, 1), 0))));
5056 return find_split_point (loc
, insn
, set_src
);
5059 /* Many RISC machines have a large set of logical insns. If the
5060 second operand is a NOT, put it first so we will try to split the
5061 other operand first. */
5062 if (GET_CODE (XEXP (x
, 1)) == NOT
)
5064 rtx tem
= XEXP (x
, 0);
5065 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5066 SUBST (XEXP (x
, 1), tem
);
5072 /* Canonicalization can produce (minus A (mult B C)), where C is a
5073 constant. It may be better to try splitting (plus (mult B -C) A)
5074 instead if this isn't a multiply by a power of two. */
5075 if (set_src
&& code
== MINUS
&& GET_CODE (XEXP (x
, 1)) == MULT
5076 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
5077 && exact_log2 (INTVAL (XEXP (XEXP (x
, 1), 1))) < 0)
5079 machine_mode mode
= GET_MODE (x
);
5080 unsigned HOST_WIDE_INT this_int
= INTVAL (XEXP (XEXP (x
, 1), 1));
5081 HOST_WIDE_INT other_int
= trunc_int_for_mode (-this_int
, mode
);
5082 SUBST (*loc
, gen_rtx_PLUS (mode
,
5084 XEXP (XEXP (x
, 1), 0),
5085 gen_int_mode (other_int
,
5088 return find_split_point (loc
, insn
, set_src
);
5091 /* Split at a multiply-accumulate instruction. However if this is
5092 the SET_SRC, we likely do not have such an instruction and it's
5093 worthless to try this split. */
5094 if (!set_src
&& GET_CODE (XEXP (x
, 0)) == MULT
)
5101 /* Otherwise, select our actions depending on our rtx class. */
5102 switch (GET_RTX_CLASS (code
))
5104 case RTX_BITFIELD_OPS
: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5106 split
= find_split_point (&XEXP (x
, 2), insn
, false);
5109 /* ... fall through ... */
5111 case RTX_COMM_ARITH
:
5113 case RTX_COMM_COMPARE
:
5114 split
= find_split_point (&XEXP (x
, 1), insn
, false);
5117 /* ... fall through ... */
5119 /* Some machines have (and (shift ...) ...) insns. If X is not
5120 an AND, but XEXP (X, 0) is, use it as our split point. */
5121 if (GET_CODE (x
) != AND
&& GET_CODE (XEXP (x
, 0)) == AND
)
5122 return &XEXP (x
, 0);
5124 split
= find_split_point (&XEXP (x
, 0), insn
, false);
5130 /* Otherwise, we don't have a split point. */
5135 /* Throughout X, replace FROM with TO, and return the result.
5136 The result is TO if X is FROM;
5137 otherwise the result is X, but its contents may have been modified.
5138 If they were modified, a record was made in undobuf so that
5139 undo_all will (among other things) return X to its original state.
5141 If the number of changes necessary is too much to record to undo,
5142 the excess changes are not made, so the result is invalid.
5143 The changes already made can still be undone.
5144 undobuf.num_undo is incremented for such changes, so by testing that
5145 the caller can tell whether the result is valid.
5147 `n_occurrences' is incremented each time FROM is replaced.
5149 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5151 IN_COND is nonzero if we are at the top level of a condition.
5153 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5154 by copying if `n_occurrences' is nonzero. */
5157 subst (rtx x
, rtx from
, rtx to
, int in_dest
, int in_cond
, int unique_copy
)
5159 enum rtx_code code
= GET_CODE (x
);
5160 machine_mode op0_mode
= VOIDmode
;
5165 /* Two expressions are equal if they are identical copies of a shared
5166 RTX or if they are both registers with the same register number
5169 #define COMBINE_RTX_EQUAL_P(X,Y) \
5171 || (REG_P (X) && REG_P (Y) \
5172 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5174 /* Do not substitute into clobbers of regs -- this will never result in
5176 if (GET_CODE (x
) == CLOBBER
&& REG_P (XEXP (x
, 0)))
5179 if (! in_dest
&& COMBINE_RTX_EQUAL_P (x
, from
))
5182 return (unique_copy
&& n_occurrences
> 1 ? copy_rtx (to
) : to
);
5185 /* If X and FROM are the same register but different modes, they
5186 will not have been seen as equal above. However, the log links code
5187 will make a LOG_LINKS entry for that case. If we do nothing, we
5188 will try to rerecognize our original insn and, when it succeeds,
5189 we will delete the feeding insn, which is incorrect.
5191 So force this insn not to match in this (rare) case. */
5192 if (! in_dest
&& code
== REG
&& REG_P (from
)
5193 && reg_overlap_mentioned_p (x
, from
))
5194 return gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
5196 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5197 of which may contain things that can be combined. */
5198 if (code
!= MEM
&& code
!= LO_SUM
&& OBJECT_P (x
))
5201 /* It is possible to have a subexpression appear twice in the insn.
5202 Suppose that FROM is a register that appears within TO.
5203 Then, after that subexpression has been scanned once by `subst',
5204 the second time it is scanned, TO may be found. If we were
5205 to scan TO here, we would find FROM within it and create a
5206 self-referent rtl structure which is completely wrong. */
5207 if (COMBINE_RTX_EQUAL_P (x
, to
))
5210 /* Parallel asm_operands need special attention because all of the
5211 inputs are shared across the arms. Furthermore, unsharing the
5212 rtl results in recognition failures. Failure to handle this case
5213 specially can result in circular rtl.
5215 Solve this by doing a normal pass across the first entry of the
5216 parallel, and only processing the SET_DESTs of the subsequent
5219 if (code
== PARALLEL
5220 && GET_CODE (XVECEXP (x
, 0, 0)) == SET
5221 && GET_CODE (SET_SRC (XVECEXP (x
, 0, 0))) == ASM_OPERANDS
)
5223 new_rtx
= subst (XVECEXP (x
, 0, 0), from
, to
, 0, 0, unique_copy
);
5225 /* If this substitution failed, this whole thing fails. */
5226 if (GET_CODE (new_rtx
) == CLOBBER
5227 && XEXP (new_rtx
, 0) == const0_rtx
)
5230 SUBST (XVECEXP (x
, 0, 0), new_rtx
);
5232 for (i
= XVECLEN (x
, 0) - 1; i
>= 1; i
--)
5234 rtx dest
= SET_DEST (XVECEXP (x
, 0, i
));
5237 && GET_CODE (dest
) != CC0
5238 && GET_CODE (dest
) != PC
)
5240 new_rtx
= subst (dest
, from
, to
, 0, 0, unique_copy
);
5242 /* If this substitution failed, this whole thing fails. */
5243 if (GET_CODE (new_rtx
) == CLOBBER
5244 && XEXP (new_rtx
, 0) == const0_rtx
)
5247 SUBST (SET_DEST (XVECEXP (x
, 0, i
)), new_rtx
);
5253 len
= GET_RTX_LENGTH (code
);
5254 fmt
= GET_RTX_FORMAT (code
);
5256 /* We don't need to process a SET_DEST that is a register, CC0,
5257 or PC, so set up to skip this common case. All other cases
5258 where we want to suppress replacing something inside a
5259 SET_SRC are handled via the IN_DEST operand. */
5261 && (REG_P (SET_DEST (x
))
5262 || GET_CODE (SET_DEST (x
)) == CC0
5263 || GET_CODE (SET_DEST (x
)) == PC
))
5266 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5269 op0_mode
= GET_MODE (XEXP (x
, 0));
5271 for (i
= 0; i
< len
; i
++)
5276 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5278 if (COMBINE_RTX_EQUAL_P (XVECEXP (x
, i
, j
), from
))
5280 new_rtx
= (unique_copy
&& n_occurrences
5281 ? copy_rtx (to
) : to
);
5286 new_rtx
= subst (XVECEXP (x
, i
, j
), from
, to
, 0, 0,
5289 /* If this substitution failed, this whole thing
5291 if (GET_CODE (new_rtx
) == CLOBBER
5292 && XEXP (new_rtx
, 0) == const0_rtx
)
5296 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
5299 else if (fmt
[i
] == 'e')
5301 /* If this is a register being set, ignore it. */
5302 new_rtx
= XEXP (x
, i
);
5305 && (((code
== SUBREG
|| code
== ZERO_EXTRACT
)
5307 || code
== STRICT_LOW_PART
))
5310 else if (COMBINE_RTX_EQUAL_P (XEXP (x
, i
), from
))
5312 /* In general, don't install a subreg involving two
5313 modes not tieable. It can worsen register
5314 allocation, and can even make invalid reload
5315 insns, since the reg inside may need to be copied
5316 from in the outside mode, and that may be invalid
5317 if it is an fp reg copied in integer mode.
5319 We allow two exceptions to this: It is valid if
5320 it is inside another SUBREG and the mode of that
5321 SUBREG and the mode of the inside of TO is
5322 tieable and it is valid if X is a SET that copies
5325 if (GET_CODE (to
) == SUBREG
5326 && ! MODES_TIEABLE_P (GET_MODE (to
),
5327 GET_MODE (SUBREG_REG (to
)))
5328 && ! (code
== SUBREG
5329 && MODES_TIEABLE_P (GET_MODE (x
),
5330 GET_MODE (SUBREG_REG (to
))))
5332 && ! (code
== SET
&& i
== 1 && XEXP (x
, 0) == cc0_rtx
)
5335 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5339 && REGNO (to
) < FIRST_PSEUDO_REGISTER
5340 && simplify_subreg_regno (REGNO (to
), GET_MODE (to
),
5343 return gen_rtx_CLOBBER (VOIDmode
, const0_rtx
);
5345 new_rtx
= (unique_copy
&& n_occurrences
? copy_rtx (to
) : to
);
5349 /* If we are in a SET_DEST, suppress most cases unless we
5350 have gone inside a MEM, in which case we want to
5351 simplify the address. We assume here that things that
5352 are actually part of the destination have their inner
5353 parts in the first expression. This is true for SUBREG,
5354 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5355 things aside from REG and MEM that should appear in a
5357 new_rtx
= subst (XEXP (x
, i
), from
, to
,
5359 && (code
== SUBREG
|| code
== STRICT_LOW_PART
5360 || code
== ZERO_EXTRACT
))
5363 code
== IF_THEN_ELSE
&& i
== 0,
5366 /* If we found that we will have to reject this combination,
5367 indicate that by returning the CLOBBER ourselves, rather than
5368 an expression containing it. This will speed things up as
5369 well as prevent accidents where two CLOBBERs are considered
5370 to be equal, thus producing an incorrect simplification. */
5372 if (GET_CODE (new_rtx
) == CLOBBER
&& XEXP (new_rtx
, 0) == const0_rtx
)
5375 if (GET_CODE (x
) == SUBREG
&& CONST_SCALAR_INT_P (new_rtx
))
5377 machine_mode mode
= GET_MODE (x
);
5379 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
5380 GET_MODE (SUBREG_REG (x
)),
5383 x
= gen_rtx_CLOBBER (mode
, const0_rtx
);
5385 else if (CONST_SCALAR_INT_P (new_rtx
)
5386 && GET_CODE (x
) == ZERO_EXTEND
)
5388 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
5389 new_rtx
, GET_MODE (XEXP (x
, 0)));
5393 SUBST (XEXP (x
, i
), new_rtx
);
5398 /* Check if we are loading something from the constant pool via float
5399 extension; in this case we would undo compress_float_constant
5400 optimization and degenerate constant load to an immediate value. */
5401 if (GET_CODE (x
) == FLOAT_EXTEND
5402 && MEM_P (XEXP (x
, 0))
5403 && MEM_READONLY_P (XEXP (x
, 0)))
5405 rtx tmp
= avoid_constant_pool_reference (x
);
5410 /* Try to simplify X. If the simplification changed the code, it is likely
5411 that further simplification will help, so loop, but limit the number
5412 of repetitions that will be performed. */
5414 for (i
= 0; i
< 4; i
++)
5416 /* If X is sufficiently simple, don't bother trying to do anything
5418 if (code
!= CONST_INT
&& code
!= REG
&& code
!= CLOBBER
)
5419 x
= combine_simplify_rtx (x
, op0_mode
, in_dest
, in_cond
);
5421 if (GET_CODE (x
) == code
)
5424 code
= GET_CODE (x
);
5426 /* We no longer know the original mode of operand 0 since we
5427 have changed the form of X) */
5428 op0_mode
= VOIDmode
;
5434 /* Simplify X, a piece of RTL. We just operate on the expression at the
5435 outer level; call `subst' to simplify recursively. Return the new
5438 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5439 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5443 combine_simplify_rtx (rtx x
, machine_mode op0_mode
, int in_dest
,
5446 enum rtx_code code
= GET_CODE (x
);
5447 machine_mode mode
= GET_MODE (x
);
5451 /* If this is a commutative operation, put a constant last and a complex
5452 expression first. We don't need to do this for comparisons here. */
5453 if (COMMUTATIVE_ARITH_P (x
)
5454 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5457 SUBST (XEXP (x
, 0), XEXP (x
, 1));
5458 SUBST (XEXP (x
, 1), temp
);
5461 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5462 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5463 things. Check for cases where both arms are testing the same
5466 Don't do anything if all operands are very simple. */
5469 && ((!OBJECT_P (XEXP (x
, 0))
5470 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5471 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))
5472 || (!OBJECT_P (XEXP (x
, 1))
5473 && ! (GET_CODE (XEXP (x
, 1)) == SUBREG
5474 && OBJECT_P (SUBREG_REG (XEXP (x
, 1)))))))
5476 && (!OBJECT_P (XEXP (x
, 0))
5477 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5478 && OBJECT_P (SUBREG_REG (XEXP (x
, 0)))))))
5480 rtx cond
, true_rtx
, false_rtx
;
5482 cond
= if_then_else_cond (x
, &true_rtx
, &false_rtx
);
5484 /* If everything is a comparison, what we have is highly unlikely
5485 to be simpler, so don't use it. */
5486 && ! (COMPARISON_P (x
)
5487 && (COMPARISON_P (true_rtx
) || COMPARISON_P (false_rtx
))))
5489 rtx cop1
= const0_rtx
;
5490 enum rtx_code cond_code
= simplify_comparison (NE
, &cond
, &cop1
);
5492 if (cond_code
== NE
&& COMPARISON_P (cond
))
5495 /* Simplify the alternative arms; this may collapse the true and
5496 false arms to store-flag values. Be careful to use copy_rtx
5497 here since true_rtx or false_rtx might share RTL with x as a
5498 result of the if_then_else_cond call above. */
5499 true_rtx
= subst (copy_rtx (true_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5500 false_rtx
= subst (copy_rtx (false_rtx
), pc_rtx
, pc_rtx
, 0, 0, 0);
5502 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5503 is unlikely to be simpler. */
5504 if (general_operand (true_rtx
, VOIDmode
)
5505 && general_operand (false_rtx
, VOIDmode
))
5507 enum rtx_code reversed
;
5509 /* Restarting if we generate a store-flag expression will cause
5510 us to loop. Just drop through in this case. */
5512 /* If the result values are STORE_FLAG_VALUE and zero, we can
5513 just make the comparison operation. */
5514 if (true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
5515 x
= simplify_gen_relational (cond_code
, mode
, VOIDmode
,
5517 else if (true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
5518 && ((reversed
= reversed_comparison_code_parts
5519 (cond_code
, cond
, cop1
, NULL
))
5521 x
= simplify_gen_relational (reversed
, mode
, VOIDmode
,
5524 /* Likewise, we can make the negate of a comparison operation
5525 if the result values are - STORE_FLAG_VALUE and zero. */
5526 else if (CONST_INT_P (true_rtx
)
5527 && INTVAL (true_rtx
) == - STORE_FLAG_VALUE
5528 && false_rtx
== const0_rtx
)
5529 x
= simplify_gen_unary (NEG
, mode
,
5530 simplify_gen_relational (cond_code
,
5534 else if (CONST_INT_P (false_rtx
)
5535 && INTVAL (false_rtx
) == - STORE_FLAG_VALUE
5536 && true_rtx
== const0_rtx
5537 && ((reversed
= reversed_comparison_code_parts
5538 (cond_code
, cond
, cop1
, NULL
))
5540 x
= simplify_gen_unary (NEG
, mode
,
5541 simplify_gen_relational (reversed
,
5546 return gen_rtx_IF_THEN_ELSE (mode
,
5547 simplify_gen_relational (cond_code
,
5552 true_rtx
, false_rtx
);
5554 code
= GET_CODE (x
);
5555 op0_mode
= VOIDmode
;
5560 /* Try to fold this expression in case we have constants that weren't
5563 switch (GET_RTX_CLASS (code
))
5566 if (op0_mode
== VOIDmode
)
5567 op0_mode
= GET_MODE (XEXP (x
, 0));
5568 temp
= simplify_unary_operation (code
, mode
, XEXP (x
, 0), op0_mode
);
5571 case RTX_COMM_COMPARE
:
5573 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
5574 if (cmp_mode
== VOIDmode
)
5576 cmp_mode
= GET_MODE (XEXP (x
, 1));
5577 if (cmp_mode
== VOIDmode
)
5578 cmp_mode
= op0_mode
;
5580 temp
= simplify_relational_operation (code
, mode
, cmp_mode
,
5581 XEXP (x
, 0), XEXP (x
, 1));
5584 case RTX_COMM_ARITH
:
5586 temp
= simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5588 case RTX_BITFIELD_OPS
:
5590 temp
= simplify_ternary_operation (code
, mode
, op0_mode
, XEXP (x
, 0),
5591 XEXP (x
, 1), XEXP (x
, 2));
5600 code
= GET_CODE (temp
);
5601 op0_mode
= VOIDmode
;
5602 mode
= GET_MODE (temp
);
5605 /* First see if we can apply the inverse distributive law. */
5606 if (code
== PLUS
|| code
== MINUS
5607 || code
== AND
|| code
== IOR
|| code
== XOR
)
5609 x
= apply_distributive_law (x
);
5610 code
= GET_CODE (x
);
5611 op0_mode
= VOIDmode
;
5614 /* If CODE is an associative operation not otherwise handled, see if we
5615 can associate some operands. This can win if they are constants or
5616 if they are logically related (i.e. (a & b) & a). */
5617 if ((code
== PLUS
|| code
== MINUS
|| code
== MULT
|| code
== DIV
5618 || code
== AND
|| code
== IOR
|| code
== XOR
5619 || code
== SMAX
|| code
== SMIN
|| code
== UMAX
|| code
== UMIN
)
5620 && ((INTEGRAL_MODE_P (mode
) && code
!= DIV
)
5621 || (flag_associative_math
&& FLOAT_MODE_P (mode
))))
5623 if (GET_CODE (XEXP (x
, 0)) == code
)
5625 rtx other
= XEXP (XEXP (x
, 0), 0);
5626 rtx inner_op0
= XEXP (XEXP (x
, 0), 1);
5627 rtx inner_op1
= XEXP (x
, 1);
5630 /* Make sure we pass the constant operand if any as the second
5631 one if this is a commutative operation. */
5632 if (CONSTANT_P (inner_op0
) && COMMUTATIVE_ARITH_P (x
))
5634 rtx tem
= inner_op0
;
5635 inner_op0
= inner_op1
;
5638 inner
= simplify_binary_operation (code
== MINUS
? PLUS
5639 : code
== DIV
? MULT
5641 mode
, inner_op0
, inner_op1
);
5643 /* For commutative operations, try the other pair if that one
5645 if (inner
== 0 && COMMUTATIVE_ARITH_P (x
))
5647 other
= XEXP (XEXP (x
, 0), 1);
5648 inner
= simplify_binary_operation (code
, mode
,
5649 XEXP (XEXP (x
, 0), 0),
5654 return simplify_gen_binary (code
, mode
, other
, inner
);
5658 /* A little bit of algebraic simplification here. */
5662 /* Ensure that our address has any ASHIFTs converted to MULT in case
5663 address-recognizing predicates are called later. */
5664 temp
= make_compound_operation (XEXP (x
, 0), MEM
);
5665 SUBST (XEXP (x
, 0), temp
);
5669 if (op0_mode
== VOIDmode
)
5670 op0_mode
= GET_MODE (SUBREG_REG (x
));
5672 /* See if this can be moved to simplify_subreg. */
5673 if (CONSTANT_P (SUBREG_REG (x
))
5674 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5675 /* Don't call gen_lowpart if the inner mode
5676 is VOIDmode and we cannot simplify it, as SUBREG without
5677 inner mode is invalid. */
5678 && (GET_MODE (SUBREG_REG (x
)) != VOIDmode
5679 || gen_lowpart_common (mode
, SUBREG_REG (x
))))
5680 return gen_lowpart (mode
, SUBREG_REG (x
));
5682 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x
))) == MODE_CC
)
5686 temp
= simplify_subreg (mode
, SUBREG_REG (x
), op0_mode
,
5691 /* If op is known to have all lower bits zero, the result is zero. */
5693 && SCALAR_INT_MODE_P (mode
)
5694 && SCALAR_INT_MODE_P (op0_mode
)
5695 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (op0_mode
)
5696 && subreg_lowpart_offset (mode
, op0_mode
) == SUBREG_BYTE (x
)
5697 && HWI_COMPUTABLE_MODE_P (op0_mode
)
5698 && (nonzero_bits (SUBREG_REG (x
), op0_mode
)
5699 & GET_MODE_MASK (mode
)) == 0)
5700 return CONST0_RTX (mode
);
5703 /* Don't change the mode of the MEM if that would change the meaning
5705 if (MEM_P (SUBREG_REG (x
))
5706 && (MEM_VOLATILE_P (SUBREG_REG (x
))
5707 || mode_dependent_address_p (XEXP (SUBREG_REG (x
), 0),
5708 MEM_ADDR_SPACE (SUBREG_REG (x
)))))
5709 return gen_rtx_CLOBBER (mode
, const0_rtx
);
5711 /* Note that we cannot do any narrowing for non-constants since
5712 we might have been counting on using the fact that some bits were
5713 zero. We now do this in the SET. */
5718 temp
= expand_compound_operation (XEXP (x
, 0));
5720 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5721 replaced by (lshiftrt X C). This will convert
5722 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5724 if (GET_CODE (temp
) == ASHIFTRT
5725 && CONST_INT_P (XEXP (temp
, 1))
5726 && INTVAL (XEXP (temp
, 1)) == GET_MODE_PRECISION (mode
) - 1)
5727 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (temp
, 0),
5728 INTVAL (XEXP (temp
, 1)));
5730 /* If X has only a single bit that might be nonzero, say, bit I, convert
5731 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5732 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5733 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5734 or a SUBREG of one since we'd be making the expression more
5735 complex if it was just a register. */
5738 && ! (GET_CODE (temp
) == SUBREG
5739 && REG_P (SUBREG_REG (temp
)))
5740 && (i
= exact_log2 (nonzero_bits (temp
, mode
))) >= 0)
5742 rtx temp1
= simplify_shift_const
5743 (NULL_RTX
, ASHIFTRT
, mode
,
5744 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, temp
,
5745 GET_MODE_PRECISION (mode
) - 1 - i
),
5746 GET_MODE_PRECISION (mode
) - 1 - i
);
5748 /* If all we did was surround TEMP with the two shifts, we
5749 haven't improved anything, so don't use it. Otherwise,
5750 we are better off with TEMP1. */
5751 if (GET_CODE (temp1
) != ASHIFTRT
5752 || GET_CODE (XEXP (temp1
, 0)) != ASHIFT
5753 || XEXP (XEXP (temp1
, 0), 0) != temp
)
5759 /* We can't handle truncation to a partial integer mode here
5760 because we don't know the real bitsize of the partial
5762 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
5765 if (HWI_COMPUTABLE_MODE_P (mode
))
5767 force_to_mode (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)),
5768 GET_MODE_MASK (mode
), 0));
5770 /* We can truncate a constant value and return it. */
5771 if (CONST_INT_P (XEXP (x
, 0)))
5772 return gen_int_mode (INTVAL (XEXP (x
, 0)), mode
);
5774 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5775 whose value is a comparison can be replaced with a subreg if
5776 STORE_FLAG_VALUE permits. */
5777 if (HWI_COMPUTABLE_MODE_P (mode
)
5778 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0
5779 && (temp
= get_last_value (XEXP (x
, 0)))
5780 && COMPARISON_P (temp
))
5781 return gen_lowpart (mode
, XEXP (x
, 0));
5785 /* (const (const X)) can become (const X). Do it this way rather than
5786 returning the inner CONST since CONST can be shared with a
5788 if (GET_CODE (XEXP (x
, 0)) == CONST
)
5789 SUBST (XEXP (x
, 0), XEXP (XEXP (x
, 0), 0));
5794 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5795 can add in an offset. find_split_point will split this address up
5796 again if it doesn't match. */
5797 if (GET_CODE (XEXP (x
, 0)) == HIGH
5798 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))
5804 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5805 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5806 bit-field and can be replaced by either a sign_extend or a
5807 sign_extract. The `and' may be a zero_extend and the two
5808 <c>, -<c> constants may be reversed. */
5809 if (GET_CODE (XEXP (x
, 0)) == XOR
5810 && CONST_INT_P (XEXP (x
, 1))
5811 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
5812 && INTVAL (XEXP (x
, 1)) == -INTVAL (XEXP (XEXP (x
, 0), 1))
5813 && ((i
= exact_log2 (UINTVAL (XEXP (XEXP (x
, 0), 1)))) >= 0
5814 || (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
5815 && HWI_COMPUTABLE_MODE_P (mode
)
5816 && ((GET_CODE (XEXP (XEXP (x
, 0), 0)) == AND
5817 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5818 && (UINTVAL (XEXP (XEXP (XEXP (x
, 0), 0), 1))
5819 == ((unsigned HOST_WIDE_INT
) 1 << (i
+ 1)) - 1))
5820 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) == ZERO_EXTEND
5821 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)))
5822 == (unsigned int) i
+ 1))))
5823 return simplify_shift_const
5824 (NULL_RTX
, ASHIFTRT
, mode
,
5825 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5826 XEXP (XEXP (XEXP (x
, 0), 0), 0),
5827 GET_MODE_PRECISION (mode
) - (i
+ 1)),
5828 GET_MODE_PRECISION (mode
) - (i
+ 1));
5830 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5831 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5832 the bitsize of the mode - 1. This allows simplification of
5833 "a = (b & 8) == 0;" */
5834 if (XEXP (x
, 1) == constm1_rtx
5835 && !REG_P (XEXP (x
, 0))
5836 && ! (GET_CODE (XEXP (x
, 0)) == SUBREG
5837 && REG_P (SUBREG_REG (XEXP (x
, 0))))
5838 && nonzero_bits (XEXP (x
, 0), mode
) == 1)
5839 return simplify_shift_const (NULL_RTX
, ASHIFTRT
, mode
,
5840 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
5841 gen_rtx_XOR (mode
, XEXP (x
, 0), const1_rtx
),
5842 GET_MODE_PRECISION (mode
) - 1),
5843 GET_MODE_PRECISION (mode
) - 1);
5845 /* If we are adding two things that have no bits in common, convert
5846 the addition into an IOR. This will often be further simplified,
5847 for example in cases like ((a & 1) + (a & 2)), which can
5850 if (HWI_COMPUTABLE_MODE_P (mode
)
5851 && (nonzero_bits (XEXP (x
, 0), mode
)
5852 & nonzero_bits (XEXP (x
, 1), mode
)) == 0)
5854 /* Try to simplify the expression further. */
5855 rtx tor
= simplify_gen_binary (IOR
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5856 temp
= combine_simplify_rtx (tor
, VOIDmode
, in_dest
, 0);
5858 /* If we could, great. If not, do not go ahead with the IOR
5859 replacement, since PLUS appears in many special purpose
5860 address arithmetic instructions. */
5861 if (GET_CODE (temp
) != CLOBBER
5862 && (GET_CODE (temp
) != IOR
5863 || ((XEXP (temp
, 0) != XEXP (x
, 0)
5864 || XEXP (temp
, 1) != XEXP (x
, 1))
5865 && (XEXP (temp
, 0) != XEXP (x
, 1)
5866 || XEXP (temp
, 1) != XEXP (x
, 0)))))
5872 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5873 (and <foo> (const_int pow2-1)) */
5874 if (GET_CODE (XEXP (x
, 1)) == AND
5875 && CONST_INT_P (XEXP (XEXP (x
, 1), 1))
5876 && exact_log2 (-UINTVAL (XEXP (XEXP (x
, 1), 1))) >= 0
5877 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
5878 return simplify_and_const_int (NULL_RTX
, mode
, XEXP (x
, 0),
5879 -INTVAL (XEXP (XEXP (x
, 1), 1)) - 1);
5883 /* If we have (mult (plus A B) C), apply the distributive law and then
5884 the inverse distributive law to see if things simplify. This
5885 occurs mostly in addresses, often when unrolling loops. */
5887 if (GET_CODE (XEXP (x
, 0)) == PLUS
)
5889 rtx result
= distribute_and_simplify_rtx (x
, 0);
5894 /* Try simplify a*(b/c) as (a*b)/c. */
5895 if (FLOAT_MODE_P (mode
) && flag_associative_math
5896 && GET_CODE (XEXP (x
, 0)) == DIV
)
5898 rtx tem
= simplify_binary_operation (MULT
, mode
,
5899 XEXP (XEXP (x
, 0), 0),
5902 return simplify_gen_binary (DIV
, mode
, tem
, XEXP (XEXP (x
, 0), 1));
5907 /* If this is a divide by a power of two, treat it as a shift if
5908 its first operand is a shift. */
5909 if (CONST_INT_P (XEXP (x
, 1))
5910 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0
5911 && (GET_CODE (XEXP (x
, 0)) == ASHIFT
5912 || GET_CODE (XEXP (x
, 0)) == LSHIFTRT
5913 || GET_CODE (XEXP (x
, 0)) == ASHIFTRT
5914 || GET_CODE (XEXP (x
, 0)) == ROTATE
5915 || GET_CODE (XEXP (x
, 0)) == ROTATERT
))
5916 return simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
, XEXP (x
, 0), i
);
5920 case GT
: case GTU
: case GE
: case GEU
:
5921 case LT
: case LTU
: case LE
: case LEU
:
5922 case UNEQ
: case LTGT
:
5923 case UNGT
: case UNGE
:
5924 case UNLT
: case UNLE
:
5925 case UNORDERED
: case ORDERED
:
5926 /* If the first operand is a condition code, we can't do anything
5928 if (GET_CODE (XEXP (x
, 0)) == COMPARE
5929 || (GET_MODE_CLASS (GET_MODE (XEXP (x
, 0))) != MODE_CC
5930 && ! CC0_P (XEXP (x
, 0))))
5932 rtx op0
= XEXP (x
, 0);
5933 rtx op1
= XEXP (x
, 1);
5934 enum rtx_code new_code
;
5936 if (GET_CODE (op0
) == COMPARE
)
5937 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
5939 /* Simplify our comparison, if possible. */
5940 new_code
= simplify_comparison (code
, &op0
, &op1
);
5942 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5943 if only the low-order bit is possibly nonzero in X (such as when
5944 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5945 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5946 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5949 Remove any ZERO_EXTRACT we made when thinking this was a
5950 comparison. It may now be simpler to use, e.g., an AND. If a
5951 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5952 the call to make_compound_operation in the SET case.
5954 Don't apply these optimizations if the caller would
5955 prefer a comparison rather than a value.
5956 E.g., for the condition in an IF_THEN_ELSE most targets need
5957 an explicit comparison. */
5962 else if (STORE_FLAG_VALUE
== 1
5963 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5964 && op1
== const0_rtx
5965 && mode
== GET_MODE (op0
)
5966 && nonzero_bits (op0
, mode
) == 1)
5967 return gen_lowpart (mode
,
5968 expand_compound_operation (op0
));
5970 else if (STORE_FLAG_VALUE
== 1
5971 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
5972 && op1
== const0_rtx
5973 && mode
== GET_MODE (op0
)
5974 && (num_sign_bit_copies (op0
, mode
)
5975 == GET_MODE_PRECISION (mode
)))
5977 op0
= expand_compound_operation (op0
);
5978 return simplify_gen_unary (NEG
, mode
,
5979 gen_lowpart (mode
, op0
),
5983 else if (STORE_FLAG_VALUE
== 1
5984 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5985 && op1
== const0_rtx
5986 && mode
== GET_MODE (op0
)
5987 && nonzero_bits (op0
, mode
) == 1)
5989 op0
= expand_compound_operation (op0
);
5990 return simplify_gen_binary (XOR
, mode
,
5991 gen_lowpart (mode
, op0
),
5995 else if (STORE_FLAG_VALUE
== 1
5996 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
5997 && op1
== const0_rtx
5998 && mode
== GET_MODE (op0
)
5999 && (num_sign_bit_copies (op0
, mode
)
6000 == GET_MODE_PRECISION (mode
)))
6002 op0
= expand_compound_operation (op0
);
6003 return plus_constant (mode
, gen_lowpart (mode
, op0
), 1);
6006 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6011 else if (STORE_FLAG_VALUE
== -1
6012 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6013 && op1
== const0_rtx
6014 && mode
== GET_MODE (op0
)
6015 && (num_sign_bit_copies (op0
, mode
)
6016 == GET_MODE_PRECISION (mode
)))
6017 return gen_lowpart (mode
,
6018 expand_compound_operation (op0
));
6020 else if (STORE_FLAG_VALUE
== -1
6021 && new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6022 && op1
== const0_rtx
6023 && mode
== GET_MODE (op0
)
6024 && nonzero_bits (op0
, mode
) == 1)
6026 op0
= expand_compound_operation (op0
);
6027 return simplify_gen_unary (NEG
, mode
,
6028 gen_lowpart (mode
, op0
),
6032 else if (STORE_FLAG_VALUE
== -1
6033 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6034 && op1
== const0_rtx
6035 && mode
== GET_MODE (op0
)
6036 && (num_sign_bit_copies (op0
, mode
)
6037 == GET_MODE_PRECISION (mode
)))
6039 op0
= expand_compound_operation (op0
);
6040 return simplify_gen_unary (NOT
, mode
,
6041 gen_lowpart (mode
, op0
),
6045 /* If X is 0/1, (eq X 0) is X-1. */
6046 else if (STORE_FLAG_VALUE
== -1
6047 && new_code
== EQ
&& GET_MODE_CLASS (mode
) == MODE_INT
6048 && op1
== const0_rtx
6049 && mode
== GET_MODE (op0
)
6050 && nonzero_bits (op0
, mode
) == 1)
6052 op0
= expand_compound_operation (op0
);
6053 return plus_constant (mode
, gen_lowpart (mode
, op0
), -1);
6056 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6057 one bit that might be nonzero, we can convert (ne x 0) to
6058 (ashift x c) where C puts the bit in the sign bit. Remove any
6059 AND with STORE_FLAG_VALUE when we are done, since we are only
6060 going to test the sign bit. */
6061 if (new_code
== NE
&& GET_MODE_CLASS (mode
) == MODE_INT
6062 && HWI_COMPUTABLE_MODE_P (mode
)
6063 && val_signbit_p (mode
, STORE_FLAG_VALUE
)
6064 && op1
== const0_rtx
6065 && mode
== GET_MODE (op0
)
6066 && (i
= exact_log2 (nonzero_bits (op0
, mode
))) >= 0)
6068 x
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6069 expand_compound_operation (op0
),
6070 GET_MODE_PRECISION (mode
) - 1 - i
);
6071 if (GET_CODE (x
) == AND
&& XEXP (x
, 1) == const_true_rtx
)
6077 /* If the code changed, return a whole new comparison.
6078 We also need to avoid using SUBST in cases where
6079 simplify_comparison has widened a comparison with a CONST_INT,
6080 since in that case the wider CONST_INT may fail the sanity
6081 checks in do_SUBST. */
6082 if (new_code
!= code
6083 || (CONST_INT_P (op1
)
6084 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 0))
6085 && GET_MODE (op0
) != GET_MODE (XEXP (x
, 1))))
6086 return gen_rtx_fmt_ee (new_code
, mode
, op0
, op1
);
6088 /* Otherwise, keep this operation, but maybe change its operands.
6089 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6090 SUBST (XEXP (x
, 0), op0
);
6091 SUBST (XEXP (x
, 1), op1
);
6096 return simplify_if_then_else (x
);
6102 /* If we are processing SET_DEST, we are done. */
6106 return expand_compound_operation (x
);
6109 return simplify_set (x
);
6113 return simplify_logical (x
);
6120 /* If this is a shift by a constant amount, simplify it. */
6121 if (CONST_INT_P (XEXP (x
, 1)))
6122 return simplify_shift_const (x
, code
, mode
, XEXP (x
, 0),
6123 INTVAL (XEXP (x
, 1)));
6125 else if (SHIFT_COUNT_TRUNCATED
&& !REG_P (XEXP (x
, 1)))
6127 force_to_mode (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)),
6128 ((unsigned HOST_WIDE_INT
) 1
6129 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x
))))
6141 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6144 simplify_if_then_else (rtx x
)
6146 machine_mode mode
= GET_MODE (x
);
6147 rtx cond
= XEXP (x
, 0);
6148 rtx true_rtx
= XEXP (x
, 1);
6149 rtx false_rtx
= XEXP (x
, 2);
6150 enum rtx_code true_code
= GET_CODE (cond
);
6151 int comparison_p
= COMPARISON_P (cond
);
6154 enum rtx_code false_code
;
6157 /* Simplify storing of the truth value. */
6158 if (comparison_p
&& true_rtx
== const_true_rtx
&& false_rtx
== const0_rtx
)
6159 return simplify_gen_relational (true_code
, mode
, VOIDmode
,
6160 XEXP (cond
, 0), XEXP (cond
, 1));
6162 /* Also when the truth value has to be reversed. */
6164 && true_rtx
== const0_rtx
&& false_rtx
== const_true_rtx
6165 && (reversed
= reversed_comparison (cond
, mode
)))
6168 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6169 in it is being compared against certain values. Get the true and false
6170 comparisons and see if that says anything about the value of each arm. */
6173 && ((false_code
= reversed_comparison_code (cond
, NULL
))
6175 && REG_P (XEXP (cond
, 0)))
6178 rtx from
= XEXP (cond
, 0);
6179 rtx true_val
= XEXP (cond
, 1);
6180 rtx false_val
= true_val
;
6183 /* If FALSE_CODE is EQ, swap the codes and arms. */
6185 if (false_code
== EQ
)
6187 swapped
= 1, true_code
= EQ
, false_code
= NE
;
6188 temp
= true_rtx
, true_rtx
= false_rtx
, false_rtx
= temp
;
6191 /* If we are comparing against zero and the expression being tested has
6192 only a single bit that might be nonzero, that is its value when it is
6193 not equal to zero. Similarly if it is known to be -1 or 0. */
6195 if (true_code
== EQ
&& true_val
== const0_rtx
6196 && exact_log2 (nzb
= nonzero_bits (from
, GET_MODE (from
))) >= 0)
6199 false_val
= gen_int_mode (nzb
, GET_MODE (from
));
6201 else if (true_code
== EQ
&& true_val
== const0_rtx
6202 && (num_sign_bit_copies (from
, GET_MODE (from
))
6203 == GET_MODE_PRECISION (GET_MODE (from
))))
6206 false_val
= constm1_rtx
;
6209 /* Now simplify an arm if we know the value of the register in the
6210 branch and it is used in the arm. Be careful due to the potential
6211 of locally-shared RTL. */
6213 if (reg_mentioned_p (from
, true_rtx
))
6214 true_rtx
= subst (known_cond (copy_rtx (true_rtx
), true_code
,
6216 pc_rtx
, pc_rtx
, 0, 0, 0);
6217 if (reg_mentioned_p (from
, false_rtx
))
6218 false_rtx
= subst (known_cond (copy_rtx (false_rtx
), false_code
,
6220 pc_rtx
, pc_rtx
, 0, 0, 0);
6222 SUBST (XEXP (x
, 1), swapped
? false_rtx
: true_rtx
);
6223 SUBST (XEXP (x
, 2), swapped
? true_rtx
: false_rtx
);
6225 true_rtx
= XEXP (x
, 1);
6226 false_rtx
= XEXP (x
, 2);
6227 true_code
= GET_CODE (cond
);
6230 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6231 reversed, do so to avoid needing two sets of patterns for
6232 subtract-and-branch insns. Similarly if we have a constant in the true
6233 arm, the false arm is the same as the first operand of the comparison, or
6234 the false arm is more complicated than the true arm. */
6237 && reversed_comparison_code (cond
, NULL
) != UNKNOWN
6238 && (true_rtx
== pc_rtx
6239 || (CONSTANT_P (true_rtx
)
6240 && !CONST_INT_P (false_rtx
) && false_rtx
!= pc_rtx
)
6241 || true_rtx
== const0_rtx
6242 || (OBJECT_P (true_rtx
) && !OBJECT_P (false_rtx
))
6243 || (GET_CODE (true_rtx
) == SUBREG
&& OBJECT_P (SUBREG_REG (true_rtx
))
6244 && !OBJECT_P (false_rtx
))
6245 || reg_mentioned_p (true_rtx
, false_rtx
)
6246 || rtx_equal_p (false_rtx
, XEXP (cond
, 0))))
6248 true_code
= reversed_comparison_code (cond
, NULL
);
6249 SUBST (XEXP (x
, 0), reversed_comparison (cond
, GET_MODE (cond
)));
6250 SUBST (XEXP (x
, 1), false_rtx
);
6251 SUBST (XEXP (x
, 2), true_rtx
);
6253 temp
= true_rtx
, true_rtx
= false_rtx
, false_rtx
= temp
;
6256 /* It is possible that the conditional has been simplified out. */
6257 true_code
= GET_CODE (cond
);
6258 comparison_p
= COMPARISON_P (cond
);
6261 /* If the two arms are identical, we don't need the comparison. */
6263 if (rtx_equal_p (true_rtx
, false_rtx
) && ! side_effects_p (cond
))
6266 /* Convert a == b ? b : a to "a". */
6267 if (true_code
== EQ
&& ! side_effects_p (cond
)
6268 && !HONOR_NANS (mode
)
6269 && rtx_equal_p (XEXP (cond
, 0), false_rtx
)
6270 && rtx_equal_p (XEXP (cond
, 1), true_rtx
))
6272 else if (true_code
== NE
&& ! side_effects_p (cond
)
6273 && !HONOR_NANS (mode
)
6274 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6275 && rtx_equal_p (XEXP (cond
, 1), false_rtx
))
6278 /* Look for cases where we have (abs x) or (neg (abs X)). */
6280 if (GET_MODE_CLASS (mode
) == MODE_INT
6282 && XEXP (cond
, 1) == const0_rtx
6283 && GET_CODE (false_rtx
) == NEG
6284 && rtx_equal_p (true_rtx
, XEXP (false_rtx
, 0))
6285 && rtx_equal_p (true_rtx
, XEXP (cond
, 0))
6286 && ! side_effects_p (true_rtx
))
6291 return simplify_gen_unary (ABS
, mode
, true_rtx
, mode
);
6295 simplify_gen_unary (NEG
, mode
,
6296 simplify_gen_unary (ABS
, mode
, true_rtx
, mode
),
6302 /* Look for MIN or MAX. */
6304 if ((! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
6306 && rtx_equal_p (XEXP (cond
, 0), true_rtx
)
6307 && rtx_equal_p (XEXP (cond
, 1), false_rtx
)
6308 && ! side_effects_p (cond
))
6313 return simplify_gen_binary (SMAX
, mode
, true_rtx
, false_rtx
);
6316 return simplify_gen_binary (SMIN
, mode
, true_rtx
, false_rtx
);
6319 return simplify_gen_binary (UMAX
, mode
, true_rtx
, false_rtx
);
6322 return simplify_gen_binary (UMIN
, mode
, true_rtx
, false_rtx
);
6327 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6328 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6329 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6330 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6331 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6332 neither 1 or -1, but it isn't worth checking for. */
6334 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
6336 && GET_MODE_CLASS (mode
) == MODE_INT
6337 && ! side_effects_p (x
))
6339 rtx t
= make_compound_operation (true_rtx
, SET
);
6340 rtx f
= make_compound_operation (false_rtx
, SET
);
6341 rtx cond_op0
= XEXP (cond
, 0);
6342 rtx cond_op1
= XEXP (cond
, 1);
6343 enum rtx_code op
= UNKNOWN
, extend_op
= UNKNOWN
;
6344 machine_mode m
= mode
;
6345 rtx z
= 0, c1
= NULL_RTX
;
6347 if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == MINUS
6348 || GET_CODE (t
) == IOR
|| GET_CODE (t
) == XOR
6349 || GET_CODE (t
) == ASHIFT
6350 || GET_CODE (t
) == LSHIFTRT
|| GET_CODE (t
) == ASHIFTRT
)
6351 && rtx_equal_p (XEXP (t
, 0), f
))
6352 c1
= XEXP (t
, 1), op
= GET_CODE (t
), z
= f
;
6354 /* If an identity-zero op is commutative, check whether there
6355 would be a match if we swapped the operands. */
6356 else if ((GET_CODE (t
) == PLUS
|| GET_CODE (t
) == IOR
6357 || GET_CODE (t
) == XOR
)
6358 && rtx_equal_p (XEXP (t
, 1), f
))
6359 c1
= XEXP (t
, 0), op
= GET_CODE (t
), z
= f
;
6360 else if (GET_CODE (t
) == SIGN_EXTEND
6361 && (GET_CODE (XEXP (t
, 0)) == PLUS
6362 || GET_CODE (XEXP (t
, 0)) == MINUS
6363 || GET_CODE (XEXP (t
, 0)) == IOR
6364 || GET_CODE (XEXP (t
, 0)) == XOR
6365 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6366 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6367 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6368 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6369 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6370 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6371 && (num_sign_bit_copies (f
, GET_MODE (f
))
6373 (GET_MODE_PRECISION (mode
)
6374 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 0))))))
6376 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6377 extend_op
= SIGN_EXTEND
;
6378 m
= GET_MODE (XEXP (t
, 0));
6380 else if (GET_CODE (t
) == SIGN_EXTEND
6381 && (GET_CODE (XEXP (t
, 0)) == PLUS
6382 || GET_CODE (XEXP (t
, 0)) == IOR
6383 || GET_CODE (XEXP (t
, 0)) == XOR
)
6384 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6385 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6386 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6387 && (num_sign_bit_copies (f
, GET_MODE (f
))
6389 (GET_MODE_PRECISION (mode
)
6390 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t
, 0), 1))))))
6392 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6393 extend_op
= SIGN_EXTEND
;
6394 m
= GET_MODE (XEXP (t
, 0));
6396 else if (GET_CODE (t
) == ZERO_EXTEND
6397 && (GET_CODE (XEXP (t
, 0)) == PLUS
6398 || GET_CODE (XEXP (t
, 0)) == MINUS
6399 || GET_CODE (XEXP (t
, 0)) == IOR
6400 || GET_CODE (XEXP (t
, 0)) == XOR
6401 || GET_CODE (XEXP (t
, 0)) == ASHIFT
6402 || GET_CODE (XEXP (t
, 0)) == LSHIFTRT
6403 || GET_CODE (XEXP (t
, 0)) == ASHIFTRT
)
6404 && GET_CODE (XEXP (XEXP (t
, 0), 0)) == SUBREG
6405 && HWI_COMPUTABLE_MODE_P (mode
)
6406 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 0))
6407 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 0)), f
)
6408 && ((nonzero_bits (f
, GET_MODE (f
))
6409 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 0))))
6412 c1
= XEXP (XEXP (t
, 0), 1); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6413 extend_op
= ZERO_EXTEND
;
6414 m
= GET_MODE (XEXP (t
, 0));
6416 else if (GET_CODE (t
) == ZERO_EXTEND
6417 && (GET_CODE (XEXP (t
, 0)) == PLUS
6418 || GET_CODE (XEXP (t
, 0)) == IOR
6419 || GET_CODE (XEXP (t
, 0)) == XOR
)
6420 && GET_CODE (XEXP (XEXP (t
, 0), 1)) == SUBREG
6421 && HWI_COMPUTABLE_MODE_P (mode
)
6422 && subreg_lowpart_p (XEXP (XEXP (t
, 0), 1))
6423 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t
, 0), 1)), f
)
6424 && ((nonzero_bits (f
, GET_MODE (f
))
6425 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t
, 0), 1))))
6428 c1
= XEXP (XEXP (t
, 0), 0); z
= f
; op
= GET_CODE (XEXP (t
, 0));
6429 extend_op
= ZERO_EXTEND
;
6430 m
= GET_MODE (XEXP (t
, 0));
6435 temp
= subst (simplify_gen_relational (true_code
, m
, VOIDmode
,
6436 cond_op0
, cond_op1
),
6437 pc_rtx
, pc_rtx
, 0, 0, 0);
6438 temp
= simplify_gen_binary (MULT
, m
, temp
,
6439 simplify_gen_binary (MULT
, m
, c1
,
6441 temp
= subst (temp
, pc_rtx
, pc_rtx
, 0, 0, 0);
6442 temp
= simplify_gen_binary (op
, m
, gen_lowpart (m
, z
), temp
);
6444 if (extend_op
!= UNKNOWN
)
6445 temp
= simplify_gen_unary (extend_op
, mode
, temp
, m
);
6451 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6452 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6453 negation of a single bit, we can convert this operation to a shift. We
6454 can actually do this more generally, but it doesn't seem worth it. */
6456 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6457 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6458 && ((1 == nonzero_bits (XEXP (cond
, 0), mode
)
6459 && (i
= exact_log2 (UINTVAL (true_rtx
))) >= 0)
6460 || ((num_sign_bit_copies (XEXP (cond
, 0), mode
)
6461 == GET_MODE_PRECISION (mode
))
6462 && (i
= exact_log2 (-UINTVAL (true_rtx
))) >= 0)))
6464 simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
6465 gen_lowpart (mode
, XEXP (cond
, 0)), i
);
6467 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6468 if (true_code
== NE
&& XEXP (cond
, 1) == const0_rtx
6469 && false_rtx
== const0_rtx
&& CONST_INT_P (true_rtx
)
6470 && GET_MODE (XEXP (cond
, 0)) == mode
6471 && (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))
6472 == nonzero_bits (XEXP (cond
, 0), mode
)
6473 && (i
= exact_log2 (UINTVAL (true_rtx
) & GET_MODE_MASK (mode
))) >= 0)
6474 return XEXP (cond
, 0);
6479 /* Simplify X, a SET expression. Return the new expression. */
6482 simplify_set (rtx x
)
6484 rtx src
= SET_SRC (x
);
6485 rtx dest
= SET_DEST (x
);
6487 = GET_MODE (src
) != VOIDmode
? GET_MODE (src
) : GET_MODE (dest
);
6488 rtx_insn
*other_insn
;
6491 /* (set (pc) (return)) gets written as (return). */
6492 if (GET_CODE (dest
) == PC
&& ANY_RETURN_P (src
))
6495 /* Now that we know for sure which bits of SRC we are using, see if we can
6496 simplify the expression for the object knowing that we only need the
6499 if (GET_MODE_CLASS (mode
) == MODE_INT
&& HWI_COMPUTABLE_MODE_P (mode
))
6501 src
= force_to_mode (src
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
6502 SUBST (SET_SRC (x
), src
);
6505 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6506 the comparison result and try to simplify it unless we already have used
6507 undobuf.other_insn. */
6508 if ((GET_MODE_CLASS (mode
) == MODE_CC
6509 || GET_CODE (src
) == COMPARE
6511 && (cc_use
= find_single_use (dest
, subst_insn
, &other_insn
)) != 0
6512 && (undobuf
.other_insn
== 0 || other_insn
== undobuf
.other_insn
)
6513 && COMPARISON_P (*cc_use
)
6514 && rtx_equal_p (XEXP (*cc_use
, 0), dest
))
6516 enum rtx_code old_code
= GET_CODE (*cc_use
);
6517 enum rtx_code new_code
;
6519 int other_changed
= 0;
6520 rtx inner_compare
= NULL_RTX
;
6521 machine_mode compare_mode
= GET_MODE (dest
);
6523 if (GET_CODE (src
) == COMPARE
)
6525 op0
= XEXP (src
, 0), op1
= XEXP (src
, 1);
6526 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
6528 inner_compare
= op0
;
6529 op0
= XEXP (inner_compare
, 0), op1
= XEXP (inner_compare
, 1);
6533 op0
= src
, op1
= CONST0_RTX (GET_MODE (src
));
6535 tmp
= simplify_relational_operation (old_code
, compare_mode
, VOIDmode
,
6538 new_code
= old_code
;
6539 else if (!CONSTANT_P (tmp
))
6541 new_code
= GET_CODE (tmp
);
6542 op0
= XEXP (tmp
, 0);
6543 op1
= XEXP (tmp
, 1);
6547 rtx pat
= PATTERN (other_insn
);
6548 undobuf
.other_insn
= other_insn
;
6549 SUBST (*cc_use
, tmp
);
6551 /* Attempt to simplify CC user. */
6552 if (GET_CODE (pat
) == SET
)
6554 rtx new_rtx
= simplify_rtx (SET_SRC (pat
));
6555 if (new_rtx
!= NULL_RTX
)
6556 SUBST (SET_SRC (pat
), new_rtx
);
6559 /* Convert X into a no-op move. */
6560 SUBST (SET_DEST (x
), pc_rtx
);
6561 SUBST (SET_SRC (x
), pc_rtx
);
6565 /* Simplify our comparison, if possible. */
6566 new_code
= simplify_comparison (new_code
, &op0
, &op1
);
6568 #ifdef SELECT_CC_MODE
6569 /* If this machine has CC modes other than CCmode, check to see if we
6570 need to use a different CC mode here. */
6571 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
6572 compare_mode
= GET_MODE (op0
);
6573 else if (inner_compare
6574 && GET_MODE_CLASS (GET_MODE (inner_compare
)) == MODE_CC
6575 && new_code
== old_code
6576 && op0
== XEXP (inner_compare
, 0)
6577 && op1
== XEXP (inner_compare
, 1))
6578 compare_mode
= GET_MODE (inner_compare
);
6580 compare_mode
= SELECT_CC_MODE (new_code
, op0
, op1
);
6582 /* If the mode changed, we have to change SET_DEST, the mode in the
6583 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6584 a hard register, just build new versions with the proper mode. If it
6585 is a pseudo, we lose unless it is only time we set the pseudo, in
6586 which case we can safely change its mode. */
6587 if (!HAVE_cc0
&& compare_mode
!= GET_MODE (dest
))
6589 if (can_change_dest_mode (dest
, 0, compare_mode
))
6591 unsigned int regno
= REGNO (dest
);
6594 if (regno
< FIRST_PSEUDO_REGISTER
)
6595 new_dest
= gen_rtx_REG (compare_mode
, regno
);
6598 SUBST_MODE (regno_reg_rtx
[regno
], compare_mode
);
6599 new_dest
= regno_reg_rtx
[regno
];
6602 SUBST (SET_DEST (x
), new_dest
);
6603 SUBST (XEXP (*cc_use
, 0), new_dest
);
6609 #endif /* SELECT_CC_MODE */
6611 /* If the code changed, we have to build a new comparison in
6612 undobuf.other_insn. */
6613 if (new_code
!= old_code
)
6615 int other_changed_previously
= other_changed
;
6616 unsigned HOST_WIDE_INT mask
;
6617 rtx old_cc_use
= *cc_use
;
6619 SUBST (*cc_use
, gen_rtx_fmt_ee (new_code
, GET_MODE (*cc_use
),
6623 /* If the only change we made was to change an EQ into an NE or
6624 vice versa, OP0 has only one bit that might be nonzero, and OP1
6625 is zero, check if changing the user of the condition code will
6626 produce a valid insn. If it won't, we can keep the original code
6627 in that insn by surrounding our operation with an XOR. */
6629 if (((old_code
== NE
&& new_code
== EQ
)
6630 || (old_code
== EQ
&& new_code
== NE
))
6631 && ! other_changed_previously
&& op1
== const0_rtx
6632 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
6633 && exact_log2 (mask
= nonzero_bits (op0
, GET_MODE (op0
))) >= 0)
6635 rtx pat
= PATTERN (other_insn
), note
= 0;
6637 if ((recog_for_combine (&pat
, other_insn
, ¬e
) < 0
6638 && ! check_asm_operands (pat
)))
6640 *cc_use
= old_cc_use
;
6643 op0
= simplify_gen_binary (XOR
, GET_MODE (op0
), op0
,
6651 undobuf
.other_insn
= other_insn
;
6653 /* Otherwise, if we didn't previously have a COMPARE in the
6654 correct mode, we need one. */
6655 if (GET_CODE (src
) != COMPARE
|| GET_MODE (src
) != compare_mode
)
6657 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6660 else if (GET_MODE (op0
) == compare_mode
&& op1
== const0_rtx
)
6662 SUBST (SET_SRC (x
), op0
);
6665 /* Otherwise, update the COMPARE if needed. */
6666 else if (XEXP (src
, 0) != op0
|| XEXP (src
, 1) != op1
)
6668 SUBST (SET_SRC (x
), gen_rtx_COMPARE (compare_mode
, op0
, op1
));
6674 /* Get SET_SRC in a form where we have placed back any
6675 compound expressions. Then do the checks below. */
6676 src
= make_compound_operation (src
, SET
);
6677 SUBST (SET_SRC (x
), src
);
6680 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6681 and X being a REG or (subreg (reg)), we may be able to convert this to
6682 (set (subreg:m2 x) (op)).
6684 We can always do this if M1 is narrower than M2 because that means that
6685 we only care about the low bits of the result.
6687 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6688 perform a narrower operation than requested since the high-order bits will
6689 be undefined. On machine where it is defined, this transformation is safe
6690 as long as M1 and M2 have the same number of words. */
6692 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6693 && !OBJECT_P (SUBREG_REG (src
))
6694 && (((GET_MODE_SIZE (GET_MODE (src
)) + (UNITS_PER_WORD
- 1))
6696 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
)))
6697 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
))
6698 #ifndef WORD_REGISTER_OPERATIONS
6699 && (GET_MODE_SIZE (GET_MODE (src
))
6700 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))
6702 #ifdef CANNOT_CHANGE_MODE_CLASS
6703 && ! (REG_P (dest
) && REGNO (dest
) < FIRST_PSEUDO_REGISTER
6704 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest
),
6705 GET_MODE (SUBREG_REG (src
)),
6709 || (GET_CODE (dest
) == SUBREG
6710 && REG_P (SUBREG_REG (dest
)))))
6712 SUBST (SET_DEST (x
),
6713 gen_lowpart (GET_MODE (SUBREG_REG (src
)),
6715 SUBST (SET_SRC (x
), SUBREG_REG (src
));
6717 src
= SET_SRC (x
), dest
= SET_DEST (x
);
6720 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6723 && GET_CODE (src
) == SUBREG
6724 && subreg_lowpart_p (src
)
6725 && (GET_MODE_PRECISION (GET_MODE (src
))
6726 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src
)))))
6728 rtx inner
= SUBREG_REG (src
);
6729 machine_mode inner_mode
= GET_MODE (inner
);
6731 /* Here we make sure that we don't have a sign bit on. */
6732 if (val_signbit_known_clear_p (GET_MODE (src
),
6733 nonzero_bits (inner
, inner_mode
)))
6735 SUBST (SET_SRC (x
), inner
);
6740 #ifdef LOAD_EXTEND_OP
6741 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6742 would require a paradoxical subreg. Replace the subreg with a
6743 zero_extend to avoid the reload that would otherwise be required. */
6745 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
)
6746 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src
)))
6747 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))) != UNKNOWN
6748 && SUBREG_BYTE (src
) == 0
6749 && paradoxical_subreg_p (src
)
6750 && MEM_P (SUBREG_REG (src
)))
6753 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src
))),
6754 GET_MODE (src
), SUBREG_REG (src
)));
6760 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6761 are comparing an item known to be 0 or -1 against 0, use a logical
6762 operation instead. Check for one of the arms being an IOR of the other
6763 arm with some value. We compute three terms to be IOR'ed together. In
6764 practice, at most two will be nonzero. Then we do the IOR's. */
6766 if (GET_CODE (dest
) != PC
6767 && GET_CODE (src
) == IF_THEN_ELSE
6768 && GET_MODE_CLASS (GET_MODE (src
)) == MODE_INT
6769 && (GET_CODE (XEXP (src
, 0)) == EQ
|| GET_CODE (XEXP (src
, 0)) == NE
)
6770 && XEXP (XEXP (src
, 0), 1) == const0_rtx
6771 && GET_MODE (src
) == GET_MODE (XEXP (XEXP (src
, 0), 0))
6772 #ifdef HAVE_conditional_move
6773 && ! can_conditionally_move_p (GET_MODE (src
))
6775 && (num_sign_bit_copies (XEXP (XEXP (src
, 0), 0),
6776 GET_MODE (XEXP (XEXP (src
, 0), 0)))
6777 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src
, 0), 0))))
6778 && ! side_effects_p (src
))
6780 rtx true_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6781 ? XEXP (src
, 1) : XEXP (src
, 2));
6782 rtx false_rtx
= (GET_CODE (XEXP (src
, 0)) == NE
6783 ? XEXP (src
, 2) : XEXP (src
, 1));
6784 rtx term1
= const0_rtx
, term2
, term3
;
6786 if (GET_CODE (true_rtx
) == IOR
6787 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
6788 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 1), false_rtx
= const0_rtx
;
6789 else if (GET_CODE (true_rtx
) == IOR
6790 && rtx_equal_p (XEXP (true_rtx
, 1), false_rtx
))
6791 term1
= false_rtx
, true_rtx
= XEXP (true_rtx
, 0), false_rtx
= const0_rtx
;
6792 else if (GET_CODE (false_rtx
) == IOR
6793 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
))
6794 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 1), true_rtx
= const0_rtx
;
6795 else if (GET_CODE (false_rtx
) == IOR
6796 && rtx_equal_p (XEXP (false_rtx
, 1), true_rtx
))
6797 term1
= true_rtx
, false_rtx
= XEXP (false_rtx
, 0), true_rtx
= const0_rtx
;
6799 term2
= simplify_gen_binary (AND
, GET_MODE (src
),
6800 XEXP (XEXP (src
, 0), 0), true_rtx
);
6801 term3
= simplify_gen_binary (AND
, GET_MODE (src
),
6802 simplify_gen_unary (NOT
, GET_MODE (src
),
6803 XEXP (XEXP (src
, 0), 0),
6808 simplify_gen_binary (IOR
, GET_MODE (src
),
6809 simplify_gen_binary (IOR
, GET_MODE (src
),
6816 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6817 whole thing fail. */
6818 if (GET_CODE (src
) == CLOBBER
&& XEXP (src
, 0) == const0_rtx
)
6820 else if (GET_CODE (dest
) == CLOBBER
&& XEXP (dest
, 0) == const0_rtx
)
6823 /* Convert this into a field assignment operation, if possible. */
6824 return make_field_assignment (x
);
6827 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6831 simplify_logical (rtx x
)
6833 machine_mode mode
= GET_MODE (x
);
6834 rtx op0
= XEXP (x
, 0);
6835 rtx op1
= XEXP (x
, 1);
6837 switch (GET_CODE (x
))
6840 /* We can call simplify_and_const_int only if we don't lose
6841 any (sign) bits when converting INTVAL (op1) to
6842 "unsigned HOST_WIDE_INT". */
6843 if (CONST_INT_P (op1
)
6844 && (HWI_COMPUTABLE_MODE_P (mode
)
6845 || INTVAL (op1
) > 0))
6847 x
= simplify_and_const_int (x
, mode
, op0
, INTVAL (op1
));
6848 if (GET_CODE (x
) != AND
)
6855 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6856 apply the distributive law and then the inverse distributive
6857 law to see if things simplify. */
6858 if (GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == XOR
)
6860 rtx result
= distribute_and_simplify_rtx (x
, 0);
6864 if (GET_CODE (op1
) == IOR
|| GET_CODE (op1
) == XOR
)
6866 rtx result
= distribute_and_simplify_rtx (x
, 1);
6873 /* If we have (ior (and A B) C), apply the distributive law and then
6874 the inverse distributive law to see if things simplify. */
6876 if (GET_CODE (op0
) == AND
)
6878 rtx result
= distribute_and_simplify_rtx (x
, 0);
6883 if (GET_CODE (op1
) == AND
)
6885 rtx result
= distribute_and_simplify_rtx (x
, 1);
6898 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6899 operations" because they can be replaced with two more basic operations.
6900 ZERO_EXTEND is also considered "compound" because it can be replaced with
6901 an AND operation, which is simpler, though only one operation.
6903 The function expand_compound_operation is called with an rtx expression
6904 and will convert it to the appropriate shifts and AND operations,
6905 simplifying at each stage.
6907 The function make_compound_operation is called to convert an expression
6908 consisting of shifts and ANDs into the equivalent compound expression.
6909 It is the inverse of this function, loosely speaking. */
6912 expand_compound_operation (rtx x
)
6914 unsigned HOST_WIDE_INT pos
= 0, len
;
6916 unsigned int modewidth
;
6919 switch (GET_CODE (x
))
6924 /* We can't necessarily use a const_int for a multiword mode;
6925 it depends on implicitly extending the value.
6926 Since we don't know the right way to extend it,
6927 we can't tell whether the implicit way is right.
6929 Even for a mode that is no wider than a const_int,
6930 we can't win, because we need to sign extend one of its bits through
6931 the rest of it, and we don't know which bit. */
6932 if (CONST_INT_P (XEXP (x
, 0)))
6935 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6936 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6937 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6938 reloaded. If not for that, MEM's would very rarely be safe.
6940 Reject MODEs bigger than a word, because we might not be able
6941 to reference a two-register group starting with an arbitrary register
6942 (and currently gen_lowpart might crash for a SUBREG). */
6944 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) > UNITS_PER_WORD
)
6947 /* Reject MODEs that aren't scalar integers because turning vector
6948 or complex modes into shifts causes problems. */
6950 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
6953 len
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)));
6954 /* If the inner object has VOIDmode (the only way this can happen
6955 is if it is an ASM_OPERANDS), we can't do anything since we don't
6956 know how much masking to do. */
6965 /* ... fall through ... */
6968 /* If the operand is a CLOBBER, just return it. */
6969 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
6972 if (!CONST_INT_P (XEXP (x
, 1))
6973 || !CONST_INT_P (XEXP (x
, 2))
6974 || GET_MODE (XEXP (x
, 0)) == VOIDmode
)
6977 /* Reject MODEs that aren't scalar integers because turning vector
6978 or complex modes into shifts causes problems. */
6980 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x
, 0))))
6983 len
= INTVAL (XEXP (x
, 1));
6984 pos
= INTVAL (XEXP (x
, 2));
6986 /* This should stay within the object being extracted, fail otherwise. */
6987 if (len
+ pos
> GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))))
6990 if (BITS_BIG_ENDIAN
)
6991 pos
= GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0))) - len
- pos
;
6998 /* Convert sign extension to zero extension, if we know that the high
6999 bit is not set, as this is easier to optimize. It will be converted
7000 back to cheaper alternative in make_extraction. */
7001 if (GET_CODE (x
) == SIGN_EXTEND
7002 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7003 && ((nonzero_bits (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
7004 & ~(((unsigned HOST_WIDE_INT
)
7005 GET_MODE_MASK (GET_MODE (XEXP (x
, 0))))
7009 rtx temp
= gen_rtx_ZERO_EXTEND (GET_MODE (x
), XEXP (x
, 0));
7010 rtx temp2
= expand_compound_operation (temp
);
7012 /* Make sure this is a profitable operation. */
7013 if (set_src_cost (x
, optimize_this_for_speed_p
)
7014 > set_src_cost (temp2
, optimize_this_for_speed_p
))
7016 else if (set_src_cost (x
, optimize_this_for_speed_p
)
7017 > set_src_cost (temp
, optimize_this_for_speed_p
))
7023 /* We can optimize some special cases of ZERO_EXTEND. */
7024 if (GET_CODE (x
) == ZERO_EXTEND
)
7026 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7027 know that the last value didn't have any inappropriate bits
7029 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7030 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7031 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7032 && (nonzero_bits (XEXP (XEXP (x
, 0), 0), GET_MODE (x
))
7033 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7034 return XEXP (XEXP (x
, 0), 0);
7036 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7037 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7038 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7039 && subreg_lowpart_p (XEXP (x
, 0))
7040 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
))
7041 && (nonzero_bits (SUBREG_REG (XEXP (x
, 0)), GET_MODE (x
))
7042 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7043 return SUBREG_REG (XEXP (x
, 0));
7045 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7046 is a comparison and STORE_FLAG_VALUE permits. This is like
7047 the first case, but it works even when GET_MODE (x) is larger
7048 than HOST_WIDE_INT. */
7049 if (GET_CODE (XEXP (x
, 0)) == TRUNCATE
7050 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == GET_MODE (x
)
7051 && COMPARISON_P (XEXP (XEXP (x
, 0), 0))
7052 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7053 <= HOST_BITS_PER_WIDE_INT
)
7054 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7055 return XEXP (XEXP (x
, 0), 0);
7057 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7058 if (GET_CODE (XEXP (x
, 0)) == SUBREG
7059 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == GET_MODE (x
)
7060 && subreg_lowpart_p (XEXP (x
, 0))
7061 && COMPARISON_P (SUBREG_REG (XEXP (x
, 0)))
7062 && (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
7063 <= HOST_BITS_PER_WIDE_INT
)
7064 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0)))) == 0)
7065 return SUBREG_REG (XEXP (x
, 0));
7069 /* If we reach here, we want to return a pair of shifts. The inner
7070 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7071 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7072 logical depending on the value of UNSIGNEDP.
7074 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7075 converted into an AND of a shift.
7077 We must check for the case where the left shift would have a negative
7078 count. This can happen in a case like (x >> 31) & 255 on machines
7079 that can't shift by a constant. On those machines, we would first
7080 combine the shift with the AND to produce a variable-position
7081 extraction. Then the constant of 31 would be substituted in
7082 to produce such a position. */
7084 modewidth
= GET_MODE_PRECISION (GET_MODE (x
));
7085 if (modewidth
>= pos
+ len
)
7087 machine_mode mode
= GET_MODE (x
);
7088 tem
= gen_lowpart (mode
, XEXP (x
, 0));
7089 if (!tem
|| GET_CODE (tem
) == CLOBBER
)
7091 tem
= simplify_shift_const (NULL_RTX
, ASHIFT
, mode
,
7092 tem
, modewidth
- pos
- len
);
7093 tem
= simplify_shift_const (NULL_RTX
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
7094 mode
, tem
, modewidth
- len
);
7096 else if (unsignedp
&& len
< HOST_BITS_PER_WIDE_INT
)
7097 tem
= simplify_and_const_int (NULL_RTX
, GET_MODE (x
),
7098 simplify_shift_const (NULL_RTX
, LSHIFTRT
,
7101 ((unsigned HOST_WIDE_INT
) 1 << len
) - 1);
7103 /* Any other cases we can't handle. */
7106 /* If we couldn't do this for some reason, return the original
7108 if (GET_CODE (tem
) == CLOBBER
)
7114 /* X is a SET which contains an assignment of one object into
7115 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7116 or certain SUBREGS). If possible, convert it into a series of
7119 We half-heartedly support variable positions, but do not at all
7120 support variable lengths. */
7123 expand_field_assignment (const_rtx x
)
7126 rtx pos
; /* Always counts from low bit. */
7128 rtx mask
, cleared
, masked
;
7129 machine_mode compute_mode
;
7131 /* Loop until we find something we can't simplify. */
7134 if (GET_CODE (SET_DEST (x
)) == STRICT_LOW_PART
7135 && GET_CODE (XEXP (SET_DEST (x
), 0)) == SUBREG
)
7137 inner
= SUBREG_REG (XEXP (SET_DEST (x
), 0));
7138 len
= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x
), 0)));
7139 pos
= GEN_INT (subreg_lsb (XEXP (SET_DEST (x
), 0)));
7141 else if (GET_CODE (SET_DEST (x
)) == ZERO_EXTRACT
7142 && CONST_INT_P (XEXP (SET_DEST (x
), 1)))
7144 inner
= XEXP (SET_DEST (x
), 0);
7145 len
= INTVAL (XEXP (SET_DEST (x
), 1));
7146 pos
= XEXP (SET_DEST (x
), 2);
7148 /* A constant position should stay within the width of INNER. */
7149 if (CONST_INT_P (pos
)
7150 && INTVAL (pos
) + len
> GET_MODE_PRECISION (GET_MODE (inner
)))
7153 if (BITS_BIG_ENDIAN
)
7155 if (CONST_INT_P (pos
))
7156 pos
= GEN_INT (GET_MODE_PRECISION (GET_MODE (inner
)) - len
7158 else if (GET_CODE (pos
) == MINUS
7159 && CONST_INT_P (XEXP (pos
, 1))
7160 && (INTVAL (XEXP (pos
, 1))
7161 == GET_MODE_PRECISION (GET_MODE (inner
)) - len
))
7162 /* If position is ADJUST - X, new position is X. */
7163 pos
= XEXP (pos
, 0);
7166 HOST_WIDE_INT prec
= GET_MODE_PRECISION (GET_MODE (inner
));
7167 pos
= simplify_gen_binary (MINUS
, GET_MODE (pos
),
7168 gen_int_mode (prec
- len
,
7175 /* A SUBREG between two modes that occupy the same numbers of words
7176 can be done by moving the SUBREG to the source. */
7177 else if (GET_CODE (SET_DEST (x
)) == SUBREG
7178 /* We need SUBREGs to compute nonzero_bits properly. */
7179 && nonzero_sign_valid
7180 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x
)))
7181 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
7182 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x
))))
7183 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
7185 x
= gen_rtx_SET (VOIDmode
, SUBREG_REG (SET_DEST (x
)),
7187 (GET_MODE (SUBREG_REG (SET_DEST (x
))),
7194 while (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7195 inner
= SUBREG_REG (inner
);
7197 compute_mode
= GET_MODE (inner
);
7199 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7200 if (! SCALAR_INT_MODE_P (compute_mode
))
7204 /* Don't do anything for vector or complex integral types. */
7205 if (! FLOAT_MODE_P (compute_mode
))
7208 /* Try to find an integral mode to pun with. */
7209 imode
= mode_for_size (GET_MODE_BITSIZE (compute_mode
), MODE_INT
, 0);
7210 if (imode
== BLKmode
)
7213 compute_mode
= imode
;
7214 inner
= gen_lowpart (imode
, inner
);
7217 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7218 if (len
>= HOST_BITS_PER_WIDE_INT
)
7221 /* Now compute the equivalent expression. Make a copy of INNER
7222 for the SET_DEST in case it is a MEM into which we will substitute;
7223 we don't want shared RTL in that case. */
7224 mask
= gen_int_mode (((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
7226 cleared
= simplify_gen_binary (AND
, compute_mode
,
7227 simplify_gen_unary (NOT
, compute_mode
,
7228 simplify_gen_binary (ASHIFT
,
7233 masked
= simplify_gen_binary (ASHIFT
, compute_mode
,
7234 simplify_gen_binary (
7236 gen_lowpart (compute_mode
, SET_SRC (x
)),
7240 x
= gen_rtx_SET (VOIDmode
, copy_rtx (inner
),
7241 simplify_gen_binary (IOR
, compute_mode
,
7248 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7249 it is an RTX that represents the (variable) starting position; otherwise,
7250 POS is the (constant) starting bit position. Both are counted from the LSB.
7252 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7254 IN_DEST is nonzero if this is a reference in the destination of a SET.
7255 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7256 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7259 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7260 ZERO_EXTRACT should be built even for bits starting at bit 0.
7262 MODE is the desired mode of the result (if IN_DEST == 0).
7264 The result is an RTX for the extraction or NULL_RTX if the target
7268 make_extraction (machine_mode mode
, rtx inner
, HOST_WIDE_INT pos
,
7269 rtx pos_rtx
, unsigned HOST_WIDE_INT len
, int unsignedp
,
7270 int in_dest
, int in_compare
)
7272 /* This mode describes the size of the storage area
7273 to fetch the overall value from. Within that, we
7274 ignore the POS lowest bits, etc. */
7275 machine_mode is_mode
= GET_MODE (inner
);
7276 machine_mode inner_mode
;
7277 machine_mode wanted_inner_mode
;
7278 machine_mode wanted_inner_reg_mode
= word_mode
;
7279 machine_mode pos_mode
= word_mode
;
7280 machine_mode extraction_mode
= word_mode
;
7281 machine_mode tmode
= mode_for_size (len
, MODE_INT
, 1);
7283 rtx orig_pos_rtx
= pos_rtx
;
7284 HOST_WIDE_INT orig_pos
;
7286 if (pos_rtx
&& CONST_INT_P (pos_rtx
))
7287 pos
= INTVAL (pos_rtx
), pos_rtx
= 0;
7289 if (GET_CODE (inner
) == SUBREG
&& subreg_lowpart_p (inner
))
7291 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7292 consider just the QI as the memory to extract from.
7293 The subreg adds or removes high bits; its mode is
7294 irrelevant to the meaning of this extraction,
7295 since POS and LEN count from the lsb. */
7296 if (MEM_P (SUBREG_REG (inner
)))
7297 is_mode
= GET_MODE (SUBREG_REG (inner
));
7298 inner
= SUBREG_REG (inner
);
7300 else if (GET_CODE (inner
) == ASHIFT
7301 && CONST_INT_P (XEXP (inner
, 1))
7302 && pos_rtx
== 0 && pos
== 0
7303 && len
> UINTVAL (XEXP (inner
, 1)))
7305 /* We're extracting the least significant bits of an rtx
7306 (ashift X (const_int C)), where LEN > C. Extract the
7307 least significant (LEN - C) bits of X, giving an rtx
7308 whose mode is MODE, then shift it left C times. */
7309 new_rtx
= make_extraction (mode
, XEXP (inner
, 0),
7310 0, 0, len
- INTVAL (XEXP (inner
, 1)),
7311 unsignedp
, in_dest
, in_compare
);
7313 return gen_rtx_ASHIFT (mode
, new_rtx
, XEXP (inner
, 1));
7315 else if (GET_CODE (inner
) == TRUNCATE
)
7316 inner
= XEXP (inner
, 0);
7318 inner_mode
= GET_MODE (inner
);
7320 /* See if this can be done without an extraction. We never can if the
7321 width of the field is not the same as that of some integer mode. For
7322 registers, we can only avoid the extraction if the position is at the
7323 low-order bit and this is either not in the destination or we have the
7324 appropriate STRICT_LOW_PART operation available.
7326 For MEM, we can avoid an extract if the field starts on an appropriate
7327 boundary and we can change the mode of the memory reference. */
7329 if (tmode
!= BLKmode
7330 && ((pos_rtx
== 0 && (pos
% BITS_PER_WORD
) == 0
7332 && (inner_mode
== tmode
7334 || TRULY_NOOP_TRUNCATION_MODES_P (tmode
, inner_mode
)
7335 || reg_truncated_to_mode (tmode
, inner
))
7338 && have_insn_for (STRICT_LOW_PART
, tmode
))))
7339 || (MEM_P (inner
) && pos_rtx
== 0
7341 % (STRICT_ALIGNMENT
? GET_MODE_ALIGNMENT (tmode
)
7342 : BITS_PER_UNIT
)) == 0
7343 /* We can't do this if we are widening INNER_MODE (it
7344 may not be aligned, for one thing). */
7345 && GET_MODE_PRECISION (inner_mode
) >= GET_MODE_PRECISION (tmode
)
7346 && (inner_mode
== tmode
7347 || (! mode_dependent_address_p (XEXP (inner
, 0),
7348 MEM_ADDR_SPACE (inner
))
7349 && ! MEM_VOLATILE_P (inner
))))))
7351 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7352 field. If the original and current mode are the same, we need not
7353 adjust the offset. Otherwise, we do if bytes big endian.
7355 If INNER is not a MEM, get a piece consisting of just the field
7356 of interest (in this case POS % BITS_PER_WORD must be 0). */
7360 HOST_WIDE_INT offset
;
7362 /* POS counts from lsb, but make OFFSET count in memory order. */
7363 if (BYTES_BIG_ENDIAN
)
7364 offset
= (GET_MODE_PRECISION (is_mode
) - len
- pos
) / BITS_PER_UNIT
;
7366 offset
= pos
/ BITS_PER_UNIT
;
7368 new_rtx
= adjust_address_nv (inner
, tmode
, offset
);
7370 else if (REG_P (inner
))
7372 if (tmode
!= inner_mode
)
7374 /* We can't call gen_lowpart in a DEST since we
7375 always want a SUBREG (see below) and it would sometimes
7376 return a new hard register. */
7379 HOST_WIDE_INT final_word
= pos
/ BITS_PER_WORD
;
7381 if (WORDS_BIG_ENDIAN
7382 && GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
7383 final_word
= ((GET_MODE_SIZE (inner_mode
)
7384 - GET_MODE_SIZE (tmode
))
7385 / UNITS_PER_WORD
) - final_word
;
7387 final_word
*= UNITS_PER_WORD
;
7388 if (BYTES_BIG_ENDIAN
&&
7389 GET_MODE_SIZE (inner_mode
) > GET_MODE_SIZE (tmode
))
7390 final_word
+= (GET_MODE_SIZE (inner_mode
)
7391 - GET_MODE_SIZE (tmode
)) % UNITS_PER_WORD
;
7393 /* Avoid creating invalid subregs, for example when
7394 simplifying (x>>32)&255. */
7395 if (!validate_subreg (tmode
, inner_mode
, inner
, final_word
))
7398 new_rtx
= gen_rtx_SUBREG (tmode
, inner
, final_word
);
7401 new_rtx
= gen_lowpart (tmode
, inner
);
7407 new_rtx
= force_to_mode (inner
, tmode
,
7408 len
>= HOST_BITS_PER_WIDE_INT
7409 ? ~(unsigned HOST_WIDE_INT
) 0
7410 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
7413 /* If this extraction is going into the destination of a SET,
7414 make a STRICT_LOW_PART unless we made a MEM. */
7417 return (MEM_P (new_rtx
) ? new_rtx
7418 : (GET_CODE (new_rtx
) != SUBREG
7419 ? gen_rtx_CLOBBER (tmode
, const0_rtx
)
7420 : gen_rtx_STRICT_LOW_PART (VOIDmode
, new_rtx
)));
7425 if (CONST_SCALAR_INT_P (new_rtx
))
7426 return simplify_unary_operation (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7427 mode
, new_rtx
, tmode
);
7429 /* If we know that no extraneous bits are set, and that the high
7430 bit is not set, convert the extraction to the cheaper of
7431 sign and zero extension, that are equivalent in these cases. */
7432 if (flag_expensive_optimizations
7433 && (HWI_COMPUTABLE_MODE_P (tmode
)
7434 && ((nonzero_bits (new_rtx
, tmode
)
7435 & ~(((unsigned HOST_WIDE_INT
)GET_MODE_MASK (tmode
)) >> 1))
7438 rtx temp
= gen_rtx_ZERO_EXTEND (mode
, new_rtx
);
7439 rtx temp1
= gen_rtx_SIGN_EXTEND (mode
, new_rtx
);
7441 /* Prefer ZERO_EXTENSION, since it gives more information to
7443 if (set_src_cost (temp
, optimize_this_for_speed_p
)
7444 <= set_src_cost (temp1
, optimize_this_for_speed_p
))
7449 /* Otherwise, sign- or zero-extend unless we already are in the
7452 return (gen_rtx_fmt_e (unsignedp
? ZERO_EXTEND
: SIGN_EXTEND
,
7456 /* Unless this is a COMPARE or we have a funny memory reference,
7457 don't do anything with zero-extending field extracts starting at
7458 the low-order bit since they are simple AND operations. */
7459 if (pos_rtx
== 0 && pos
== 0 && ! in_dest
7460 && ! in_compare
&& unsignedp
)
7463 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7464 if the position is not a constant and the length is not 1. In all
7465 other cases, we would only be going outside our object in cases when
7466 an original shift would have been undefined. */
7468 && ((pos_rtx
== 0 && pos
+ len
> GET_MODE_PRECISION (is_mode
))
7469 || (pos_rtx
!= 0 && len
!= 1)))
7472 enum extraction_pattern pattern
= (in_dest
? EP_insv
7473 : unsignedp
? EP_extzv
: EP_extv
);
7475 /* If INNER is not from memory, we want it to have the mode of a register
7476 extraction pattern's structure operand, or word_mode if there is no
7477 such pattern. The same applies to extraction_mode and pos_mode
7478 and their respective operands.
7480 For memory, assume that the desired extraction_mode and pos_mode
7481 are the same as for a register operation, since at present we don't
7482 have named patterns for aligned memory structures. */
7483 struct extraction_insn insn
;
7484 if (get_best_reg_extraction_insn (&insn
, pattern
,
7485 GET_MODE_BITSIZE (inner_mode
), mode
))
7487 wanted_inner_reg_mode
= insn
.struct_mode
;
7488 pos_mode
= insn
.pos_mode
;
7489 extraction_mode
= insn
.field_mode
;
7492 /* Never narrow an object, since that might not be safe. */
7494 if (mode
!= VOIDmode
7495 && GET_MODE_SIZE (extraction_mode
) < GET_MODE_SIZE (mode
))
7496 extraction_mode
= mode
;
7499 wanted_inner_mode
= wanted_inner_reg_mode
;
7502 /* Be careful not to go beyond the extracted object and maintain the
7503 natural alignment of the memory. */
7504 wanted_inner_mode
= smallest_mode_for_size (len
, MODE_INT
);
7505 while (pos
% GET_MODE_BITSIZE (wanted_inner_mode
) + len
7506 > GET_MODE_BITSIZE (wanted_inner_mode
))
7508 wanted_inner_mode
= GET_MODE_WIDER_MODE (wanted_inner_mode
);
7509 gcc_assert (wanted_inner_mode
!= VOIDmode
);
7515 if (BITS_BIG_ENDIAN
)
7517 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7518 BITS_BIG_ENDIAN style. If position is constant, compute new
7519 position. Otherwise, build subtraction.
7520 Note that POS is relative to the mode of the original argument.
7521 If it's a MEM we need to recompute POS relative to that.
7522 However, if we're extracting from (or inserting into) a register,
7523 we want to recompute POS relative to wanted_inner_mode. */
7524 int width
= (MEM_P (inner
)
7525 ? GET_MODE_BITSIZE (is_mode
)
7526 : GET_MODE_BITSIZE (wanted_inner_mode
));
7529 pos
= width
- len
- pos
;
7532 = gen_rtx_MINUS (GET_MODE (pos_rtx
),
7533 gen_int_mode (width
- len
, GET_MODE (pos_rtx
)),
7535 /* POS may be less than 0 now, but we check for that below.
7536 Note that it can only be less than 0 if !MEM_P (inner). */
7539 /* If INNER has a wider mode, and this is a constant extraction, try to
7540 make it smaller and adjust the byte to point to the byte containing
7542 if (wanted_inner_mode
!= VOIDmode
7543 && inner_mode
!= wanted_inner_mode
7545 && GET_MODE_SIZE (wanted_inner_mode
) < GET_MODE_SIZE (is_mode
)
7547 && ! mode_dependent_address_p (XEXP (inner
, 0), MEM_ADDR_SPACE (inner
))
7548 && ! MEM_VOLATILE_P (inner
))
7552 /* The computations below will be correct if the machine is big
7553 endian in both bits and bytes or little endian in bits and bytes.
7554 If it is mixed, we must adjust. */
7556 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7557 adjust OFFSET to compensate. */
7558 if (BYTES_BIG_ENDIAN
7559 && GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (is_mode
))
7560 offset
-= GET_MODE_SIZE (is_mode
) - GET_MODE_SIZE (inner_mode
);
7562 /* We can now move to the desired byte. */
7563 offset
+= (pos
/ GET_MODE_BITSIZE (wanted_inner_mode
))
7564 * GET_MODE_SIZE (wanted_inner_mode
);
7565 pos
%= GET_MODE_BITSIZE (wanted_inner_mode
);
7567 if (BYTES_BIG_ENDIAN
!= BITS_BIG_ENDIAN
7568 && is_mode
!= wanted_inner_mode
)
7569 offset
= (GET_MODE_SIZE (is_mode
)
7570 - GET_MODE_SIZE (wanted_inner_mode
) - offset
);
7572 inner
= adjust_address_nv (inner
, wanted_inner_mode
, offset
);
7575 /* If INNER is not memory, get it into the proper mode. If we are changing
7576 its mode, POS must be a constant and smaller than the size of the new
7578 else if (!MEM_P (inner
))
7580 /* On the LHS, don't create paradoxical subregs implicitely truncating
7581 the register unless TRULY_NOOP_TRUNCATION. */
7583 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner
),
7587 if (GET_MODE (inner
) != wanted_inner_mode
7589 || orig_pos
+ len
> GET_MODE_BITSIZE (wanted_inner_mode
)))
7595 inner
= force_to_mode (inner
, wanted_inner_mode
,
7597 || len
+ orig_pos
>= HOST_BITS_PER_WIDE_INT
7598 ? ~(unsigned HOST_WIDE_INT
) 0
7599 : ((((unsigned HOST_WIDE_INT
) 1 << len
) - 1)
7604 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7605 have to zero extend. Otherwise, we can just use a SUBREG. */
7607 && GET_MODE_SIZE (pos_mode
) > GET_MODE_SIZE (GET_MODE (pos_rtx
)))
7609 rtx temp
= simplify_gen_unary (ZERO_EXTEND
, pos_mode
, pos_rtx
,
7610 GET_MODE (pos_rtx
));
7612 /* If we know that no extraneous bits are set, and that the high
7613 bit is not set, convert extraction to cheaper one - either
7614 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7616 if (flag_expensive_optimizations
7617 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx
))
7618 && ((nonzero_bits (pos_rtx
, GET_MODE (pos_rtx
))
7619 & ~(((unsigned HOST_WIDE_INT
)
7620 GET_MODE_MASK (GET_MODE (pos_rtx
)))
7624 rtx temp1
= simplify_gen_unary (SIGN_EXTEND
, pos_mode
, pos_rtx
,
7625 GET_MODE (pos_rtx
));
7627 /* Prefer ZERO_EXTENSION, since it gives more information to
7629 if (set_src_cost (temp1
, optimize_this_for_speed_p
)
7630 < set_src_cost (temp
, optimize_this_for_speed_p
))
7636 /* Make POS_RTX unless we already have it and it is correct. If we don't
7637 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7639 if (pos_rtx
== 0 && orig_pos_rtx
!= 0 && INTVAL (orig_pos_rtx
) == pos
)
7640 pos_rtx
= orig_pos_rtx
;
7642 else if (pos_rtx
== 0)
7643 pos_rtx
= GEN_INT (pos
);
7645 /* Make the required operation. See if we can use existing rtx. */
7646 new_rtx
= gen_rtx_fmt_eee (unsignedp
? ZERO_EXTRACT
: SIGN_EXTRACT
,
7647 extraction_mode
, inner
, GEN_INT (len
), pos_rtx
);
7649 new_rtx
= gen_lowpart (mode
, new_rtx
);
7654 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7655 with any other operations in X. Return X without that shift if so. */
7658 extract_left_shift (rtx x
, int count
)
7660 enum rtx_code code
= GET_CODE (x
);
7661 machine_mode mode
= GET_MODE (x
);
7667 /* This is the shift itself. If it is wide enough, we will return
7668 either the value being shifted if the shift count is equal to
7669 COUNT or a shift for the difference. */
7670 if (CONST_INT_P (XEXP (x
, 1))
7671 && INTVAL (XEXP (x
, 1)) >= count
)
7672 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (x
, 0),
7673 INTVAL (XEXP (x
, 1)) - count
);
7677 if ((tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7678 return simplify_gen_unary (code
, mode
, tem
, mode
);
7682 case PLUS
: case IOR
: case XOR
: case AND
:
7683 /* If we can safely shift this constant and we find the inner shift,
7684 make a new operation. */
7685 if (CONST_INT_P (XEXP (x
, 1))
7686 && (UINTVAL (XEXP (x
, 1))
7687 & ((((unsigned HOST_WIDE_INT
) 1 << count
)) - 1)) == 0
7688 && (tem
= extract_left_shift (XEXP (x
, 0), count
)) != 0)
7690 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1)) >> count
;
7691 return simplify_gen_binary (code
, mode
, tem
,
7692 gen_int_mode (val
, mode
));
7703 /* Look at the expression rooted at X. Look for expressions
7704 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7705 Form these expressions.
7707 Return the new rtx, usually just X.
7709 Also, for machines like the VAX that don't have logical shift insns,
7710 try to convert logical to arithmetic shift operations in cases where
7711 they are equivalent. This undoes the canonicalizations to logical
7712 shifts done elsewhere.
7714 We try, as much as possible, to re-use rtl expressions to save memory.
7716 IN_CODE says what kind of expression we are processing. Normally, it is
7717 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7718 being kludges), it is MEM. When processing the arguments of a comparison
7719 or a COMPARE against zero, it is COMPARE. */
7722 make_compound_operation (rtx x
, enum rtx_code in_code
)
7724 enum rtx_code code
= GET_CODE (x
);
7725 machine_mode mode
= GET_MODE (x
);
7726 int mode_width
= GET_MODE_PRECISION (mode
);
7728 enum rtx_code next_code
;
7734 /* Select the code to be used in recursive calls. Once we are inside an
7735 address, we stay there. If we have a comparison, set to COMPARE,
7736 but once inside, go back to our default of SET. */
7738 next_code
= (code
== MEM
? MEM
7739 : ((code
== PLUS
|| code
== MINUS
)
7740 && SCALAR_INT_MODE_P (mode
)) ? MEM
7741 : ((code
== COMPARE
|| COMPARISON_P (x
))
7742 && XEXP (x
, 1) == const0_rtx
) ? COMPARE
7743 : in_code
== COMPARE
? SET
: in_code
);
7745 /* Process depending on the code of this operation. If NEW is set
7746 nonzero, it will be returned. */
7751 /* Convert shifts by constants into multiplications if inside
7753 if (in_code
== MEM
&& CONST_INT_P (XEXP (x
, 1))
7754 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
7755 && INTVAL (XEXP (x
, 1)) >= 0
7756 && SCALAR_INT_MODE_P (mode
))
7758 HOST_WIDE_INT count
= INTVAL (XEXP (x
, 1));
7759 HOST_WIDE_INT multval
= (HOST_WIDE_INT
) 1 << count
;
7761 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
7762 if (GET_CODE (new_rtx
) == NEG
)
7764 new_rtx
= XEXP (new_rtx
, 0);
7767 multval
= trunc_int_for_mode (multval
, mode
);
7768 new_rtx
= gen_rtx_MULT (mode
, new_rtx
, gen_int_mode (multval
, mode
));
7775 lhs
= make_compound_operation (lhs
, next_code
);
7776 rhs
= make_compound_operation (rhs
, next_code
);
7777 if (GET_CODE (lhs
) == MULT
&& GET_CODE (XEXP (lhs
, 0)) == NEG
7778 && SCALAR_INT_MODE_P (mode
))
7780 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (lhs
, 0), 0),
7782 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7784 else if (GET_CODE (lhs
) == MULT
7785 && (CONST_INT_P (XEXP (lhs
, 1)) && INTVAL (XEXP (lhs
, 1)) < 0))
7787 tem
= simplify_gen_binary (MULT
, mode
, XEXP (lhs
, 0),
7788 simplify_gen_unary (NEG
, mode
,
7791 new_rtx
= simplify_gen_binary (MINUS
, mode
, rhs
, tem
);
7795 SUBST (XEXP (x
, 0), lhs
);
7796 SUBST (XEXP (x
, 1), rhs
);
7799 x
= gen_lowpart (mode
, new_rtx
);
7805 lhs
= make_compound_operation (lhs
, next_code
);
7806 rhs
= make_compound_operation (rhs
, next_code
);
7807 if (GET_CODE (rhs
) == MULT
&& GET_CODE (XEXP (rhs
, 0)) == NEG
7808 && SCALAR_INT_MODE_P (mode
))
7810 tem
= simplify_gen_binary (MULT
, mode
, XEXP (XEXP (rhs
, 0), 0),
7812 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7814 else if (GET_CODE (rhs
) == MULT
7815 && (CONST_INT_P (XEXP (rhs
, 1)) && INTVAL (XEXP (rhs
, 1)) < 0))
7817 tem
= simplify_gen_binary (MULT
, mode
, XEXP (rhs
, 0),
7818 simplify_gen_unary (NEG
, mode
,
7821 new_rtx
= simplify_gen_binary (PLUS
, mode
, tem
, lhs
);
7825 SUBST (XEXP (x
, 0), lhs
);
7826 SUBST (XEXP (x
, 1), rhs
);
7829 return gen_lowpart (mode
, new_rtx
);
7832 /* If the second operand is not a constant, we can't do anything
7834 if (!CONST_INT_P (XEXP (x
, 1)))
7837 /* If the constant is a power of two minus one and the first operand
7838 is a logical right shift, make an extraction. */
7839 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7840 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7842 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7843 new_rtx
= make_extraction (mode
, new_rtx
, 0, XEXP (XEXP (x
, 0), 1), i
, 1,
7844 0, in_code
== COMPARE
);
7847 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7848 else if (GET_CODE (XEXP (x
, 0)) == SUBREG
7849 && subreg_lowpart_p (XEXP (x
, 0))
7850 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == LSHIFTRT
7851 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7853 new_rtx
= make_compound_operation (XEXP (SUBREG_REG (XEXP (x
, 0)), 0),
7855 new_rtx
= make_extraction (GET_MODE (SUBREG_REG (XEXP (x
, 0))), new_rtx
, 0,
7856 XEXP (SUBREG_REG (XEXP (x
, 0)), 1), i
, 1,
7857 0, in_code
== COMPARE
);
7859 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7860 else if ((GET_CODE (XEXP (x
, 0)) == XOR
7861 || GET_CODE (XEXP (x
, 0)) == IOR
)
7862 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LSHIFTRT
7863 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == LSHIFTRT
7864 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7866 /* Apply the distributive law, and then try to make extractions. */
7867 new_rtx
= gen_rtx_fmt_ee (GET_CODE (XEXP (x
, 0)), mode
,
7868 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 0),
7870 gen_rtx_AND (mode
, XEXP (XEXP (x
, 0), 1),
7872 new_rtx
= make_compound_operation (new_rtx
, in_code
);
7875 /* If we are have (and (rotate X C) M) and C is larger than the number
7876 of bits in M, this is an extraction. */
7878 else if (GET_CODE (XEXP (x
, 0)) == ROTATE
7879 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7880 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0
7881 && i
<= INTVAL (XEXP (XEXP (x
, 0), 1)))
7883 new_rtx
= make_compound_operation (XEXP (XEXP (x
, 0), 0), next_code
);
7884 new_rtx
= make_extraction (mode
, new_rtx
,
7885 (GET_MODE_PRECISION (mode
)
7886 - INTVAL (XEXP (XEXP (x
, 0), 1))),
7887 NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7890 /* On machines without logical shifts, if the operand of the AND is
7891 a logical shift and our mask turns off all the propagated sign
7892 bits, we can replace the logical shift with an arithmetic shift. */
7893 else if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
7894 && !have_insn_for (LSHIFTRT
, mode
)
7895 && have_insn_for (ASHIFTRT
, mode
)
7896 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
7897 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
7898 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
7899 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
7901 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
7903 mask
>>= INTVAL (XEXP (XEXP (x
, 0), 1));
7904 if ((INTVAL (XEXP (x
, 1)) & ~mask
) == 0)
7906 gen_rtx_ASHIFTRT (mode
,
7907 make_compound_operation
7908 (XEXP (XEXP (x
, 0), 0), next_code
),
7909 XEXP (XEXP (x
, 0), 1)));
7912 /* If the constant is one less than a power of two, this might be
7913 representable by an extraction even if no shift is present.
7914 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7915 we are in a COMPARE. */
7916 else if ((i
= exact_log2 (UINTVAL (XEXP (x
, 1)) + 1)) >= 0)
7917 new_rtx
= make_extraction (mode
,
7918 make_compound_operation (XEXP (x
, 0),
7920 0, NULL_RTX
, i
, 1, 0, in_code
== COMPARE
);
7922 /* If we are in a comparison and this is an AND with a power of two,
7923 convert this into the appropriate bit extract. */
7924 else if (in_code
== COMPARE
7925 && (i
= exact_log2 (UINTVAL (XEXP (x
, 1)))) >= 0)
7926 new_rtx
= make_extraction (mode
,
7927 make_compound_operation (XEXP (x
, 0),
7929 i
, NULL_RTX
, 1, 1, 0, 1);
7934 /* If the sign bit is known to be zero, replace this with an
7935 arithmetic shift. */
7936 if (have_insn_for (ASHIFTRT
, mode
)
7937 && ! have_insn_for (LSHIFTRT
, mode
)
7938 && mode_width
<= HOST_BITS_PER_WIDE_INT
7939 && (nonzero_bits (XEXP (x
, 0), mode
) & (1 << (mode_width
- 1))) == 0)
7941 new_rtx
= gen_rtx_ASHIFTRT (mode
,
7942 make_compound_operation (XEXP (x
, 0),
7948 /* ... fall through ... */
7954 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7955 this is a SIGN_EXTRACT. */
7956 if (CONST_INT_P (rhs
)
7957 && GET_CODE (lhs
) == ASHIFT
7958 && CONST_INT_P (XEXP (lhs
, 1))
7959 && INTVAL (rhs
) >= INTVAL (XEXP (lhs
, 1))
7960 && INTVAL (XEXP (lhs
, 1)) >= 0
7961 && INTVAL (rhs
) < mode_width
)
7963 new_rtx
= make_compound_operation (XEXP (lhs
, 0), next_code
);
7964 new_rtx
= make_extraction (mode
, new_rtx
,
7965 INTVAL (rhs
) - INTVAL (XEXP (lhs
, 1)),
7966 NULL_RTX
, mode_width
- INTVAL (rhs
),
7967 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
7971 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7972 If so, try to merge the shifts into a SIGN_EXTEND. We could
7973 also do this for some cases of SIGN_EXTRACT, but it doesn't
7974 seem worth the effort; the case checked for occurs on Alpha. */
7977 && ! (GET_CODE (lhs
) == SUBREG
7978 && (OBJECT_P (SUBREG_REG (lhs
))))
7979 && CONST_INT_P (rhs
)
7980 && INTVAL (rhs
) < HOST_BITS_PER_WIDE_INT
7981 && INTVAL (rhs
) < mode_width
7982 && (new_rtx
= extract_left_shift (lhs
, INTVAL (rhs
))) != 0)
7983 new_rtx
= make_extraction (mode
, make_compound_operation (new_rtx
, next_code
),
7984 0, NULL_RTX
, mode_width
- INTVAL (rhs
),
7985 code
== LSHIFTRT
, 0, in_code
== COMPARE
);
7990 /* Call ourselves recursively on the inner expression. If we are
7991 narrowing the object and it has a different RTL code from
7992 what it originally did, do this SUBREG as a force_to_mode. */
7994 rtx inner
= SUBREG_REG (x
), simplified
;
7995 enum rtx_code subreg_code
= in_code
;
7997 /* If in_code is COMPARE, it isn't always safe to pass it through
7998 to the recursive make_compound_operation call. */
7999 if (subreg_code
== COMPARE
8000 && (!subreg_lowpart_p (x
)
8001 || GET_CODE (inner
) == SUBREG
8002 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8003 is (const_int 0), rather than
8004 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8005 || (GET_CODE (inner
) == AND
8006 && CONST_INT_P (XEXP (inner
, 1))
8007 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8008 && exact_log2 (UINTVAL (XEXP (inner
, 1)))
8009 >= GET_MODE_BITSIZE (mode
))))
8012 tem
= make_compound_operation (inner
, subreg_code
);
8015 = simplify_subreg (mode
, tem
, GET_MODE (inner
), SUBREG_BYTE (x
));
8019 if (GET_CODE (tem
) != GET_CODE (inner
)
8020 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (inner
))
8021 && subreg_lowpart_p (x
))
8024 = force_to_mode (tem
, mode
, ~(unsigned HOST_WIDE_INT
) 0, 0);
8026 /* If we have something other than a SUBREG, we might have
8027 done an expansion, so rerun ourselves. */
8028 if (GET_CODE (newer
) != SUBREG
)
8029 newer
= make_compound_operation (newer
, in_code
);
8031 /* force_to_mode can expand compounds. If it just re-expanded the
8032 compound, use gen_lowpart to convert to the desired mode. */
8033 if (rtx_equal_p (newer
, x
)
8034 /* Likewise if it re-expanded the compound only partially.
8035 This happens for SUBREG of ZERO_EXTRACT if they extract
8036 the same number of bits. */
8037 || (GET_CODE (newer
) == SUBREG
8038 && (GET_CODE (SUBREG_REG (newer
)) == LSHIFTRT
8039 || GET_CODE (SUBREG_REG (newer
)) == ASHIFTRT
)
8040 && GET_CODE (inner
) == AND
8041 && rtx_equal_p (SUBREG_REG (newer
), XEXP (inner
, 0))))
8042 return gen_lowpart (GET_MODE (x
), tem
);
8058 x
= gen_lowpart (mode
, new_rtx
);
8059 code
= GET_CODE (x
);
8062 /* Now recursively process each operand of this operation. We need to
8063 handle ZERO_EXTEND specially so that we don't lose track of the
8065 if (GET_CODE (x
) == ZERO_EXTEND
)
8067 new_rtx
= make_compound_operation (XEXP (x
, 0), next_code
);
8068 tem
= simplify_const_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
8069 new_rtx
, GET_MODE (XEXP (x
, 0)));
8072 SUBST (XEXP (x
, 0), new_rtx
);
8076 fmt
= GET_RTX_FORMAT (code
);
8077 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
8080 new_rtx
= make_compound_operation (XEXP (x
, i
), next_code
);
8081 SUBST (XEXP (x
, i
), new_rtx
);
8083 else if (fmt
[i
] == 'E')
8084 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8086 new_rtx
= make_compound_operation (XVECEXP (x
, i
, j
), next_code
);
8087 SUBST (XVECEXP (x
, i
, j
), new_rtx
);
8091 /* If this is a commutative operation, the changes to the operands
8092 may have made it noncanonical. */
8093 if (COMMUTATIVE_ARITH_P (x
)
8094 && swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
8097 SUBST (XEXP (x
, 0), XEXP (x
, 1));
8098 SUBST (XEXP (x
, 1), tem
);
8104 /* Given M see if it is a value that would select a field of bits
8105 within an item, but not the entire word. Return -1 if not.
8106 Otherwise, return the starting position of the field, where 0 is the
8109 *PLEN is set to the length of the field. */
8112 get_pos_from_mask (unsigned HOST_WIDE_INT m
, unsigned HOST_WIDE_INT
*plen
)
8114 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8115 int pos
= m
? ctz_hwi (m
) : -1;
8119 /* Now shift off the low-order zero bits and see if we have a
8120 power of two minus 1. */
8121 len
= exact_log2 ((m
>> pos
) + 1);
8130 /* If X refers to a register that equals REG in value, replace these
8131 references with REG. */
8133 canon_reg_for_combine (rtx x
, rtx reg
)
8140 enum rtx_code code
= GET_CODE (x
);
8141 switch (GET_RTX_CLASS (code
))
8144 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8145 if (op0
!= XEXP (x
, 0))
8146 return simplify_gen_unary (GET_CODE (x
), GET_MODE (x
), op0
,
8151 case RTX_COMM_ARITH
:
8152 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8153 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8154 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8155 return simplify_gen_binary (GET_CODE (x
), GET_MODE (x
), op0
, op1
);
8159 case RTX_COMM_COMPARE
:
8160 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8161 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8162 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8163 return simplify_gen_relational (GET_CODE (x
), GET_MODE (x
),
8164 GET_MODE (op0
), op0
, op1
);
8168 case RTX_BITFIELD_OPS
:
8169 op0
= canon_reg_for_combine (XEXP (x
, 0), reg
);
8170 op1
= canon_reg_for_combine (XEXP (x
, 1), reg
);
8171 op2
= canon_reg_for_combine (XEXP (x
, 2), reg
);
8172 if (op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1) || op2
!= XEXP (x
, 2))
8173 return simplify_gen_ternary (GET_CODE (x
), GET_MODE (x
),
8174 GET_MODE (op0
), op0
, op1
, op2
);
8179 if (rtx_equal_p (get_last_value (reg
), x
)
8180 || rtx_equal_p (reg
, get_last_value (x
)))
8189 fmt
= GET_RTX_FORMAT (code
);
8191 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
8194 rtx op
= canon_reg_for_combine (XEXP (x
, i
), reg
);
8195 if (op
!= XEXP (x
, i
))
8205 else if (fmt
[i
] == 'E')
8208 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
8210 rtx op
= canon_reg_for_combine (XVECEXP (x
, i
, j
), reg
);
8211 if (op
!= XVECEXP (x
, i
, j
))
8218 XVECEXP (x
, i
, j
) = op
;
8229 /* Return X converted to MODE. If the value is already truncated to
8230 MODE we can just return a subreg even though in the general case we
8231 would need an explicit truncation. */
8234 gen_lowpart_or_truncate (machine_mode mode
, rtx x
)
8236 if (!CONST_INT_P (x
)
8237 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (x
))
8238 && !TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (x
))
8239 && !(REG_P (x
) && reg_truncated_to_mode (mode
, x
)))
8241 /* Bit-cast X into an integer mode. */
8242 if (!SCALAR_INT_MODE_P (GET_MODE (x
)))
8243 x
= gen_lowpart (int_mode_for_mode (GET_MODE (x
)), x
);
8244 x
= simplify_gen_unary (TRUNCATE
, int_mode_for_mode (mode
),
8248 return gen_lowpart (mode
, x
);
8251 /* See if X can be simplified knowing that we will only refer to it in
8252 MODE and will only refer to those bits that are nonzero in MASK.
8253 If other bits are being computed or if masking operations are done
8254 that select a superset of the bits in MASK, they can sometimes be
8257 Return a possibly simplified expression, but always convert X to
8258 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8260 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8261 are all off in X. This is used when X will be complemented, by either
8262 NOT, NEG, or XOR. */
8265 force_to_mode (rtx x
, machine_mode mode
, unsigned HOST_WIDE_INT mask
,
8268 enum rtx_code code
= GET_CODE (x
);
8269 int next_select
= just_select
|| code
== XOR
|| code
== NOT
|| code
== NEG
;
8270 machine_mode op_mode
;
8271 unsigned HOST_WIDE_INT fuller_mask
, nonzero
;
8274 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8275 code below will do the wrong thing since the mode of such an
8276 expression is VOIDmode.
8278 Also do nothing if X is a CLOBBER; this can happen if X was
8279 the return value from a call to gen_lowpart. */
8280 if (code
== CALL
|| code
== ASM_OPERANDS
|| code
== CLOBBER
)
8283 /* We want to perform the operation in its present mode unless we know
8284 that the operation is valid in MODE, in which case we do the operation
8286 op_mode
= ((GET_MODE_CLASS (mode
) == GET_MODE_CLASS (GET_MODE (x
))
8287 && have_insn_for (code
, mode
))
8288 ? mode
: GET_MODE (x
));
8290 /* It is not valid to do a right-shift in a narrower mode
8291 than the one it came in with. */
8292 if ((code
== LSHIFTRT
|| code
== ASHIFTRT
)
8293 && GET_MODE_PRECISION (mode
) < GET_MODE_PRECISION (GET_MODE (x
)))
8294 op_mode
= GET_MODE (x
);
8296 /* Truncate MASK to fit OP_MODE. */
8298 mask
&= GET_MODE_MASK (op_mode
);
8300 /* When we have an arithmetic operation, or a shift whose count we
8301 do not know, we need to assume that all bits up to the highest-order
8302 bit in MASK will be needed. This is how we form such a mask. */
8303 if (mask
& ((unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)))
8304 fuller_mask
= ~(unsigned HOST_WIDE_INT
) 0;
8306 fuller_mask
= (((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mask
) + 1))
8309 /* Determine what bits of X are guaranteed to be (non)zero. */
8310 nonzero
= nonzero_bits (x
, mode
);
8312 /* If none of the bits in X are needed, return a zero. */
8313 if (!just_select
&& (nonzero
& mask
) == 0 && !side_effects_p (x
))
8316 /* If X is a CONST_INT, return a new one. Do this here since the
8317 test below will fail. */
8318 if (CONST_INT_P (x
))
8320 if (SCALAR_INT_MODE_P (mode
))
8321 return gen_int_mode (INTVAL (x
) & mask
, mode
);
8324 x
= GEN_INT (INTVAL (x
) & mask
);
8325 return gen_lowpart_common (mode
, x
);
8329 /* If X is narrower than MODE and we want all the bits in X's mode, just
8330 get X in the proper mode. */
8331 if (GET_MODE_SIZE (GET_MODE (x
)) < GET_MODE_SIZE (mode
)
8332 && (GET_MODE_MASK (GET_MODE (x
)) & ~mask
) == 0)
8333 return gen_lowpart (mode
, x
);
8335 /* We can ignore the effect of a SUBREG if it narrows the mode or
8336 if the constant masks to zero all the bits the mode doesn't have. */
8337 if (GET_CODE (x
) == SUBREG
8338 && subreg_lowpart_p (x
)
8339 && ((GET_MODE_SIZE (GET_MODE (x
))
8340 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
8342 & GET_MODE_MASK (GET_MODE (x
))
8343 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
)))))))
8344 return force_to_mode (SUBREG_REG (x
), mode
, mask
, next_select
);
8346 /* The arithmetic simplifications here only work for scalar integer modes. */
8347 if (!SCALAR_INT_MODE_P (mode
) || !SCALAR_INT_MODE_P (GET_MODE (x
)))
8348 return gen_lowpart_or_truncate (mode
, x
);
8353 /* If X is a (clobber (const_int)), return it since we know we are
8354 generating something that won't match. */
8361 x
= expand_compound_operation (x
);
8362 if (GET_CODE (x
) != code
)
8363 return force_to_mode (x
, mode
, mask
, next_select
);
8367 /* Similarly for a truncate. */
8368 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8371 /* If this is an AND with a constant, convert it into an AND
8372 whose constant is the AND of that constant with MASK. If it
8373 remains an AND of MASK, delete it since it is redundant. */
8375 if (CONST_INT_P (XEXP (x
, 1)))
8377 x
= simplify_and_const_int (x
, op_mode
, XEXP (x
, 0),
8378 mask
& INTVAL (XEXP (x
, 1)));
8380 /* If X is still an AND, see if it is an AND with a mask that
8381 is just some low-order bits. If so, and it is MASK, we don't
8384 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8385 && ((INTVAL (XEXP (x
, 1)) & GET_MODE_MASK (GET_MODE (x
)))
8389 /* If it remains an AND, try making another AND with the bits
8390 in the mode mask that aren't in MASK turned on. If the
8391 constant in the AND is wide enough, this might make a
8392 cheaper constant. */
8394 if (GET_CODE (x
) == AND
&& CONST_INT_P (XEXP (x
, 1))
8395 && GET_MODE_MASK (GET_MODE (x
)) != mask
8396 && HWI_COMPUTABLE_MODE_P (GET_MODE (x
)))
8398 unsigned HOST_WIDE_INT cval
8399 = UINTVAL (XEXP (x
, 1))
8400 | (GET_MODE_MASK (GET_MODE (x
)) & ~mask
);
8403 y
= simplify_gen_binary (AND
, GET_MODE (x
), XEXP (x
, 0),
8404 gen_int_mode (cval
, GET_MODE (x
)));
8405 if (set_src_cost (y
, optimize_this_for_speed_p
)
8406 < set_src_cost (x
, optimize_this_for_speed_p
))
8416 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8417 low-order bits (as in an alignment operation) and FOO is already
8418 aligned to that boundary, mask C1 to that boundary as well.
8419 This may eliminate that PLUS and, later, the AND. */
8422 unsigned int width
= GET_MODE_PRECISION (mode
);
8423 unsigned HOST_WIDE_INT smask
= mask
;
8425 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8426 number, sign extend it. */
8428 if (width
< HOST_BITS_PER_WIDE_INT
8429 && (smask
& (HOST_WIDE_INT_1U
<< (width
- 1))) != 0)
8430 smask
|= HOST_WIDE_INT_M1U
<< width
;
8432 if (CONST_INT_P (XEXP (x
, 1))
8433 && exact_log2 (- smask
) >= 0
8434 && (nonzero_bits (XEXP (x
, 0), mode
) & ~smask
) == 0
8435 && (INTVAL (XEXP (x
, 1)) & ~smask
) != 0)
8436 return force_to_mode (plus_constant (GET_MODE (x
), XEXP (x
, 0),
8437 (INTVAL (XEXP (x
, 1)) & smask
)),
8438 mode
, smask
, next_select
);
8441 /* ... fall through ... */
8444 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8445 most significant bit in MASK since carries from those bits will
8446 affect the bits we are interested in. */
8451 /* If X is (minus C Y) where C's least set bit is larger than any bit
8452 in the mask, then we may replace with (neg Y). */
8453 if (CONST_INT_P (XEXP (x
, 0))
8454 && ((UINTVAL (XEXP (x
, 0)) & -UINTVAL (XEXP (x
, 0))) > mask
))
8456 x
= simplify_gen_unary (NEG
, GET_MODE (x
), XEXP (x
, 1),
8458 return force_to_mode (x
, mode
, mask
, next_select
);
8461 /* Similarly, if C contains every bit in the fuller_mask, then we may
8462 replace with (not Y). */
8463 if (CONST_INT_P (XEXP (x
, 0))
8464 && ((UINTVAL (XEXP (x
, 0)) | fuller_mask
) == UINTVAL (XEXP (x
, 0))))
8466 x
= simplify_gen_unary (NOT
, GET_MODE (x
),
8467 XEXP (x
, 1), GET_MODE (x
));
8468 return force_to_mode (x
, mode
, mask
, next_select
);
8476 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8477 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8478 operation which may be a bitfield extraction. Ensure that the
8479 constant we form is not wider than the mode of X. */
8481 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8482 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8483 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8484 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
8485 && CONST_INT_P (XEXP (x
, 1))
8486 && ((INTVAL (XEXP (XEXP (x
, 0), 1))
8487 + floor_log2 (INTVAL (XEXP (x
, 1))))
8488 < GET_MODE_PRECISION (GET_MODE (x
)))
8489 && (UINTVAL (XEXP (x
, 1))
8490 & ~nonzero_bits (XEXP (x
, 0), GET_MODE (x
))) == 0)
8492 temp
= gen_int_mode ((INTVAL (XEXP (x
, 1)) & mask
)
8493 << INTVAL (XEXP (XEXP (x
, 0), 1)),
8495 temp
= simplify_gen_binary (GET_CODE (x
), GET_MODE (x
),
8496 XEXP (XEXP (x
, 0), 0), temp
);
8497 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), temp
,
8498 XEXP (XEXP (x
, 0), 1));
8499 return force_to_mode (x
, mode
, mask
, next_select
);
8503 /* For most binary operations, just propagate into the operation and
8504 change the mode if we have an operation of that mode. */
8506 op0
= force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8507 op1
= force_to_mode (XEXP (x
, 1), mode
, mask
, next_select
);
8509 /* If we ended up truncating both operands, truncate the result of the
8510 operation instead. */
8511 if (GET_CODE (op0
) == TRUNCATE
8512 && GET_CODE (op1
) == TRUNCATE
)
8514 op0
= XEXP (op0
, 0);
8515 op1
= XEXP (op1
, 0);
8518 op0
= gen_lowpart_or_truncate (op_mode
, op0
);
8519 op1
= gen_lowpart_or_truncate (op_mode
, op1
);
8521 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0) || op1
!= XEXP (x
, 1))
8522 x
= simplify_gen_binary (code
, op_mode
, op0
, op1
);
8526 /* For left shifts, do the same, but just for the first operand.
8527 However, we cannot do anything with shifts where we cannot
8528 guarantee that the counts are smaller than the size of the mode
8529 because such a count will have a different meaning in a
8532 if (! (CONST_INT_P (XEXP (x
, 1))
8533 && INTVAL (XEXP (x
, 1)) >= 0
8534 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (mode
))
8535 && ! (GET_MODE (XEXP (x
, 1)) != VOIDmode
8536 && (nonzero_bits (XEXP (x
, 1), GET_MODE (XEXP (x
, 1)))
8537 < (unsigned HOST_WIDE_INT
) GET_MODE_PRECISION (mode
))))
8540 /* If the shift count is a constant and we can do arithmetic in
8541 the mode of the shift, refine which bits we need. Otherwise, use the
8542 conservative form of the mask. */
8543 if (CONST_INT_P (XEXP (x
, 1))
8544 && INTVAL (XEXP (x
, 1)) >= 0
8545 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (op_mode
)
8546 && HWI_COMPUTABLE_MODE_P (op_mode
))
8547 mask
>>= INTVAL (XEXP (x
, 1));
8551 op0
= gen_lowpart_or_truncate (op_mode
,
8552 force_to_mode (XEXP (x
, 0), op_mode
,
8553 mask
, next_select
));
8555 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8556 x
= simplify_gen_binary (code
, op_mode
, op0
, XEXP (x
, 1));
8560 /* Here we can only do something if the shift count is a constant,
8561 this shift constant is valid for the host, and we can do arithmetic
8564 if (CONST_INT_P (XEXP (x
, 1))
8565 && INTVAL (XEXP (x
, 1)) >= 0
8566 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
8567 && HWI_COMPUTABLE_MODE_P (op_mode
))
8569 rtx inner
= XEXP (x
, 0);
8570 unsigned HOST_WIDE_INT inner_mask
;
8572 /* Select the mask of the bits we need for the shift operand. */
8573 inner_mask
= mask
<< INTVAL (XEXP (x
, 1));
8575 /* We can only change the mode of the shift if we can do arithmetic
8576 in the mode of the shift and INNER_MASK is no wider than the
8577 width of X's mode. */
8578 if ((inner_mask
& ~GET_MODE_MASK (GET_MODE (x
))) != 0)
8579 op_mode
= GET_MODE (x
);
8581 inner
= force_to_mode (inner
, op_mode
, inner_mask
, next_select
);
8583 if (GET_MODE (x
) != op_mode
|| inner
!= XEXP (x
, 0))
8584 x
= simplify_gen_binary (LSHIFTRT
, op_mode
, inner
, XEXP (x
, 1));
8587 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8588 shift and AND produces only copies of the sign bit (C2 is one less
8589 than a power of two), we can do this with just a shift. */
8591 if (GET_CODE (x
) == LSHIFTRT
8592 && CONST_INT_P (XEXP (x
, 1))
8593 /* The shift puts one of the sign bit copies in the least significant
8595 && ((INTVAL (XEXP (x
, 1))
8596 + num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
8597 >= GET_MODE_PRECISION (GET_MODE (x
)))
8598 && exact_log2 (mask
+ 1) >= 0
8599 /* Number of bits left after the shift must be more than the mask
8601 && ((INTVAL (XEXP (x
, 1)) + exact_log2 (mask
+ 1))
8602 <= GET_MODE_PRECISION (GET_MODE (x
)))
8603 /* Must be more sign bit copies than the mask needs. */
8604 && ((int) num_sign_bit_copies (XEXP (x
, 0), GET_MODE (XEXP (x
, 0)))
8605 >= exact_log2 (mask
+ 1)))
8606 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8607 GEN_INT (GET_MODE_PRECISION (GET_MODE (x
))
8608 - exact_log2 (mask
+ 1)));
8613 /* If we are just looking for the sign bit, we don't need this shift at
8614 all, even if it has a variable count. */
8615 if (val_signbit_p (GET_MODE (x
), mask
))
8616 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8618 /* If this is a shift by a constant, get a mask that contains those bits
8619 that are not copies of the sign bit. We then have two cases: If
8620 MASK only includes those bits, this can be a logical shift, which may
8621 allow simplifications. If MASK is a single-bit field not within
8622 those bits, we are requesting a copy of the sign bit and hence can
8623 shift the sign bit to the appropriate location. */
8625 if (CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) >= 0
8626 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
8630 /* If the considered data is wider than HOST_WIDE_INT, we can't
8631 represent a mask for all its bits in a single scalar.
8632 But we only care about the lower bits, so calculate these. */
8634 if (GET_MODE_PRECISION (GET_MODE (x
)) > HOST_BITS_PER_WIDE_INT
)
8636 nonzero
= ~(unsigned HOST_WIDE_INT
) 0;
8638 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8639 is the number of bits a full-width mask would have set.
8640 We need only shift if these are fewer than nonzero can
8641 hold. If not, we must keep all bits set in nonzero. */
8643 if (GET_MODE_PRECISION (GET_MODE (x
)) - INTVAL (XEXP (x
, 1))
8644 < HOST_BITS_PER_WIDE_INT
)
8645 nonzero
>>= INTVAL (XEXP (x
, 1))
8646 + HOST_BITS_PER_WIDE_INT
8647 - GET_MODE_PRECISION (GET_MODE (x
)) ;
8651 nonzero
= GET_MODE_MASK (GET_MODE (x
));
8652 nonzero
>>= INTVAL (XEXP (x
, 1));
8655 if ((mask
& ~nonzero
) == 0)
8657 x
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, GET_MODE (x
),
8658 XEXP (x
, 0), INTVAL (XEXP (x
, 1)));
8659 if (GET_CODE (x
) != ASHIFTRT
)
8660 return force_to_mode (x
, mode
, mask
, next_select
);
8663 else if ((i
= exact_log2 (mask
)) >= 0)
8665 x
= simplify_shift_const
8666 (NULL_RTX
, LSHIFTRT
, GET_MODE (x
), XEXP (x
, 0),
8667 GET_MODE_PRECISION (GET_MODE (x
)) - 1 - i
);
8669 if (GET_CODE (x
) != ASHIFTRT
)
8670 return force_to_mode (x
, mode
, mask
, next_select
);
8674 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8675 even if the shift count isn't a constant. */
8677 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8678 XEXP (x
, 0), XEXP (x
, 1));
8682 /* If this is a zero- or sign-extension operation that just affects bits
8683 we don't care about, remove it. Be sure the call above returned
8684 something that is still a shift. */
8686 if ((GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ASHIFTRT
)
8687 && CONST_INT_P (XEXP (x
, 1))
8688 && INTVAL (XEXP (x
, 1)) >= 0
8689 && (INTVAL (XEXP (x
, 1))
8690 <= GET_MODE_PRECISION (GET_MODE (x
)) - (floor_log2 (mask
) + 1))
8691 && GET_CODE (XEXP (x
, 0)) == ASHIFT
8692 && XEXP (XEXP (x
, 0), 1) == XEXP (x
, 1))
8693 return force_to_mode (XEXP (XEXP (x
, 0), 0), mode
, mask
,
8700 /* If the shift count is constant and we can do computations
8701 in the mode of X, compute where the bits we care about are.
8702 Otherwise, we can't do anything. Don't change the mode of
8703 the shift or propagate MODE into the shift, though. */
8704 if (CONST_INT_P (XEXP (x
, 1))
8705 && INTVAL (XEXP (x
, 1)) >= 0)
8707 temp
= simplify_binary_operation (code
== ROTATE
? ROTATERT
: ROTATE
,
8709 gen_int_mode (mask
, GET_MODE (x
)),
8711 if (temp
&& CONST_INT_P (temp
))
8712 x
= simplify_gen_binary (code
, GET_MODE (x
),
8713 force_to_mode (XEXP (x
, 0), GET_MODE (x
),
8714 INTVAL (temp
), next_select
),
8720 /* If we just want the low-order bit, the NEG isn't needed since it
8721 won't change the low-order bit. */
8723 return force_to_mode (XEXP (x
, 0), mode
, mask
, just_select
);
8725 /* We need any bits less significant than the most significant bit in
8726 MASK since carries from those bits will affect the bits we are
8732 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8733 same as the XOR case above. Ensure that the constant we form is not
8734 wider than the mode of X. */
8736 if (GET_CODE (XEXP (x
, 0)) == LSHIFTRT
8737 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
8738 && INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0
8739 && (INTVAL (XEXP (XEXP (x
, 0), 1)) + floor_log2 (mask
)
8740 < GET_MODE_PRECISION (GET_MODE (x
)))
8741 && INTVAL (XEXP (XEXP (x
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
8743 temp
= gen_int_mode (mask
<< INTVAL (XEXP (XEXP (x
, 0), 1)),
8745 temp
= simplify_gen_binary (XOR
, GET_MODE (x
),
8746 XEXP (XEXP (x
, 0), 0), temp
);
8747 x
= simplify_gen_binary (LSHIFTRT
, GET_MODE (x
),
8748 temp
, XEXP (XEXP (x
, 0), 1));
8750 return force_to_mode (x
, mode
, mask
, next_select
);
8753 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8754 use the full mask inside the NOT. */
8758 op0
= gen_lowpart_or_truncate (op_mode
,
8759 force_to_mode (XEXP (x
, 0), mode
, mask
,
8761 if (op_mode
!= GET_MODE (x
) || op0
!= XEXP (x
, 0))
8762 x
= simplify_gen_unary (code
, op_mode
, op0
, op_mode
);
8766 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8767 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8768 which is equal to STORE_FLAG_VALUE. */
8769 if ((mask
& ~STORE_FLAG_VALUE
) == 0
8770 && XEXP (x
, 1) == const0_rtx
8771 && GET_MODE (XEXP (x
, 0)) == mode
8772 && exact_log2 (nonzero_bits (XEXP (x
, 0), mode
)) >= 0
8773 && (nonzero_bits (XEXP (x
, 0), mode
)
8774 == (unsigned HOST_WIDE_INT
) STORE_FLAG_VALUE
))
8775 return force_to_mode (XEXP (x
, 0), mode
, mask
, next_select
);
8780 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8781 written in a narrower mode. We play it safe and do not do so. */
8783 op0
= gen_lowpart_or_truncate (GET_MODE (x
),
8784 force_to_mode (XEXP (x
, 1), mode
,
8785 mask
, next_select
));
8786 op1
= gen_lowpart_or_truncate (GET_MODE (x
),
8787 force_to_mode (XEXP (x
, 2), mode
,
8788 mask
, next_select
));
8789 if (op0
!= XEXP (x
, 1) || op1
!= XEXP (x
, 2))
8790 x
= simplify_gen_ternary (IF_THEN_ELSE
, GET_MODE (x
),
8791 GET_MODE (XEXP (x
, 0)), XEXP (x
, 0),
8799 /* Ensure we return a value of the proper mode. */
8800 return gen_lowpart_or_truncate (mode
, x
);
8803 /* Return nonzero if X is an expression that has one of two values depending on
8804 whether some other value is zero or nonzero. In that case, we return the
8805 value that is being tested, *PTRUE is set to the value if the rtx being
8806 returned has a nonzero value, and *PFALSE is set to the other alternative.
8808 If we return zero, we set *PTRUE and *PFALSE to X. */
8811 if_then_else_cond (rtx x
, rtx
*ptrue
, rtx
*pfalse
)
8813 machine_mode mode
= GET_MODE (x
);
8814 enum rtx_code code
= GET_CODE (x
);
8815 rtx cond0
, cond1
, true0
, true1
, false0
, false1
;
8816 unsigned HOST_WIDE_INT nz
;
8818 /* If we are comparing a value against zero, we are done. */
8819 if ((code
== NE
|| code
== EQ
)
8820 && XEXP (x
, 1) == const0_rtx
)
8822 *ptrue
= (code
== NE
) ? const_true_rtx
: const0_rtx
;
8823 *pfalse
= (code
== NE
) ? const0_rtx
: const_true_rtx
;
8827 /* If this is a unary operation whose operand has one of two values, apply
8828 our opcode to compute those values. */
8829 else if (UNARY_P (x
)
8830 && (cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
)) != 0)
8832 *ptrue
= simplify_gen_unary (code
, mode
, true0
, GET_MODE (XEXP (x
, 0)));
8833 *pfalse
= simplify_gen_unary (code
, mode
, false0
,
8834 GET_MODE (XEXP (x
, 0)));
8838 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8839 make can't possibly match and would suppress other optimizations. */
8840 else if (code
== COMPARE
)
8843 /* If this is a binary operation, see if either side has only one of two
8844 values. If either one does or if both do and they are conditional on
8845 the same value, compute the new true and false values. */
8846 else if (BINARY_P (x
))
8848 cond0
= if_then_else_cond (XEXP (x
, 0), &true0
, &false0
);
8849 cond1
= if_then_else_cond (XEXP (x
, 1), &true1
, &false1
);
8851 if ((cond0
!= 0 || cond1
!= 0)
8852 && ! (cond0
!= 0 && cond1
!= 0 && ! rtx_equal_p (cond0
, cond1
)))
8854 /* If if_then_else_cond returned zero, then true/false are the
8855 same rtl. We must copy one of them to prevent invalid rtl
8858 true0
= copy_rtx (true0
);
8859 else if (cond1
== 0)
8860 true1
= copy_rtx (true1
);
8862 if (COMPARISON_P (x
))
8864 *ptrue
= simplify_gen_relational (code
, mode
, VOIDmode
,
8866 *pfalse
= simplify_gen_relational (code
, mode
, VOIDmode
,
8871 *ptrue
= simplify_gen_binary (code
, mode
, true0
, true1
);
8872 *pfalse
= simplify_gen_binary (code
, mode
, false0
, false1
);
8875 return cond0
? cond0
: cond1
;
8878 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8879 operands is zero when the other is nonzero, and vice-versa,
8880 and STORE_FLAG_VALUE is 1 or -1. */
8882 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8883 && (code
== PLUS
|| code
== IOR
|| code
== XOR
|| code
== MINUS
8885 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8887 rtx op0
= XEXP (XEXP (x
, 0), 1);
8888 rtx op1
= XEXP (XEXP (x
, 1), 1);
8890 cond0
= XEXP (XEXP (x
, 0), 0);
8891 cond1
= XEXP (XEXP (x
, 1), 0);
8893 if (COMPARISON_P (cond0
)
8894 && COMPARISON_P (cond1
)
8895 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8896 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8897 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8898 || ((swap_condition (GET_CODE (cond0
))
8899 == reversed_comparison_code (cond1
, NULL
))
8900 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8901 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8902 && ! side_effects_p (x
))
8904 *ptrue
= simplify_gen_binary (MULT
, mode
, op0
, const_true_rtx
);
8905 *pfalse
= simplify_gen_binary (MULT
, mode
,
8907 ? simplify_gen_unary (NEG
, mode
,
8915 /* Similarly for MULT, AND and UMIN, except that for these the result
8917 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
8918 && (code
== MULT
|| code
== AND
|| code
== UMIN
)
8919 && GET_CODE (XEXP (x
, 0)) == MULT
&& GET_CODE (XEXP (x
, 1)) == MULT
)
8921 cond0
= XEXP (XEXP (x
, 0), 0);
8922 cond1
= XEXP (XEXP (x
, 1), 0);
8924 if (COMPARISON_P (cond0
)
8925 && COMPARISON_P (cond1
)
8926 && ((GET_CODE (cond0
) == reversed_comparison_code (cond1
, NULL
)
8927 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 0))
8928 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 1)))
8929 || ((swap_condition (GET_CODE (cond0
))
8930 == reversed_comparison_code (cond1
, NULL
))
8931 && rtx_equal_p (XEXP (cond0
, 0), XEXP (cond1
, 1))
8932 && rtx_equal_p (XEXP (cond0
, 1), XEXP (cond1
, 0))))
8933 && ! side_effects_p (x
))
8935 *ptrue
= *pfalse
= const0_rtx
;
8941 else if (code
== IF_THEN_ELSE
)
8943 /* If we have IF_THEN_ELSE already, extract the condition and
8944 canonicalize it if it is NE or EQ. */
8945 cond0
= XEXP (x
, 0);
8946 *ptrue
= XEXP (x
, 1), *pfalse
= XEXP (x
, 2);
8947 if (GET_CODE (cond0
) == NE
&& XEXP (cond0
, 1) == const0_rtx
)
8948 return XEXP (cond0
, 0);
8949 else if (GET_CODE (cond0
) == EQ
&& XEXP (cond0
, 1) == const0_rtx
)
8951 *ptrue
= XEXP (x
, 2), *pfalse
= XEXP (x
, 1);
8952 return XEXP (cond0
, 0);
8958 /* If X is a SUBREG, we can narrow both the true and false values
8959 if the inner expression, if there is a condition. */
8960 else if (code
== SUBREG
8961 && 0 != (cond0
= if_then_else_cond (SUBREG_REG (x
),
8964 true0
= simplify_gen_subreg (mode
, true0
,
8965 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
8966 false0
= simplify_gen_subreg (mode
, false0
,
8967 GET_MODE (SUBREG_REG (x
)), SUBREG_BYTE (x
));
8968 if (true0
&& false0
)
8976 /* If X is a constant, this isn't special and will cause confusions
8977 if we treat it as such. Likewise if it is equivalent to a constant. */
8978 else if (CONSTANT_P (x
)
8979 || ((cond0
= get_last_value (x
)) != 0 && CONSTANT_P (cond0
)))
8982 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8983 will be least confusing to the rest of the compiler. */
8984 else if (mode
== BImode
)
8986 *ptrue
= GEN_INT (STORE_FLAG_VALUE
), *pfalse
= const0_rtx
;
8990 /* If X is known to be either 0 or -1, those are the true and
8991 false values when testing X. */
8992 else if (x
== constm1_rtx
|| x
== const0_rtx
8993 || (mode
!= VOIDmode
8994 && num_sign_bit_copies (x
, mode
) == GET_MODE_PRECISION (mode
)))
8996 *ptrue
= constm1_rtx
, *pfalse
= const0_rtx
;
9000 /* Likewise for 0 or a single bit. */
9001 else if (HWI_COMPUTABLE_MODE_P (mode
)
9002 && exact_log2 (nz
= nonzero_bits (x
, mode
)) >= 0)
9004 *ptrue
= gen_int_mode (nz
, mode
), *pfalse
= const0_rtx
;
9008 /* Otherwise fail; show no condition with true and false values the same. */
9009 *ptrue
= *pfalse
= x
;
9013 /* Return the value of expression X given the fact that condition COND
9014 is known to be true when applied to REG as its first operand and VAL
9015 as its second. X is known to not be shared and so can be modified in
9018 We only handle the simplest cases, and specifically those cases that
9019 arise with IF_THEN_ELSE expressions. */
9022 known_cond (rtx x
, enum rtx_code cond
, rtx reg
, rtx val
)
9024 enum rtx_code code
= GET_CODE (x
);
9029 if (side_effects_p (x
))
9032 /* If either operand of the condition is a floating point value,
9033 then we have to avoid collapsing an EQ comparison. */
9035 && rtx_equal_p (x
, reg
)
9036 && ! FLOAT_MODE_P (GET_MODE (x
))
9037 && ! FLOAT_MODE_P (GET_MODE (val
)))
9040 if (cond
== UNEQ
&& rtx_equal_p (x
, reg
))
9043 /* If X is (abs REG) and we know something about REG's relationship
9044 with zero, we may be able to simplify this. */
9046 if (code
== ABS
&& rtx_equal_p (XEXP (x
, 0), reg
) && val
== const0_rtx
)
9049 case GE
: case GT
: case EQ
:
9052 return simplify_gen_unary (NEG
, GET_MODE (XEXP (x
, 0)),
9054 GET_MODE (XEXP (x
, 0)));
9059 /* The only other cases we handle are MIN, MAX, and comparisons if the
9060 operands are the same as REG and VAL. */
9062 else if (COMPARISON_P (x
) || COMMUTATIVE_ARITH_P (x
))
9064 if (rtx_equal_p (XEXP (x
, 0), val
))
9065 cond
= swap_condition (cond
), temp
= val
, val
= reg
, reg
= temp
;
9067 if (rtx_equal_p (XEXP (x
, 0), reg
) && rtx_equal_p (XEXP (x
, 1), val
))
9069 if (COMPARISON_P (x
))
9071 if (comparison_dominates_p (cond
, code
))
9072 return const_true_rtx
;
9074 code
= reversed_comparison_code (x
, NULL
);
9076 && comparison_dominates_p (cond
, code
))
9081 else if (code
== SMAX
|| code
== SMIN
9082 || code
== UMIN
|| code
== UMAX
)
9084 int unsignedp
= (code
== UMIN
|| code
== UMAX
);
9086 /* Do not reverse the condition when it is NE or EQ.
9087 This is because we cannot conclude anything about
9088 the value of 'SMAX (x, y)' when x is not equal to y,
9089 but we can when x equals y. */
9090 if ((code
== SMAX
|| code
== UMAX
)
9091 && ! (cond
== EQ
|| cond
== NE
))
9092 cond
= reverse_condition (cond
);
9097 return unsignedp
? x
: XEXP (x
, 1);
9099 return unsignedp
? x
: XEXP (x
, 0);
9101 return unsignedp
? XEXP (x
, 1) : x
;
9103 return unsignedp
? XEXP (x
, 0) : x
;
9110 else if (code
== SUBREG
)
9112 machine_mode inner_mode
= GET_MODE (SUBREG_REG (x
));
9113 rtx new_rtx
, r
= known_cond (SUBREG_REG (x
), cond
, reg
, val
);
9115 if (SUBREG_REG (x
) != r
)
9117 /* We must simplify subreg here, before we lose track of the
9118 original inner_mode. */
9119 new_rtx
= simplify_subreg (GET_MODE (x
), r
,
9120 inner_mode
, SUBREG_BYTE (x
));
9124 SUBST (SUBREG_REG (x
), r
);
9129 /* We don't have to handle SIGN_EXTEND here, because even in the
9130 case of replacing something with a modeless CONST_INT, a
9131 CONST_INT is already (supposed to be) a valid sign extension for
9132 its narrower mode, which implies it's already properly
9133 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9134 story is different. */
9135 else if (code
== ZERO_EXTEND
)
9137 machine_mode inner_mode
= GET_MODE (XEXP (x
, 0));
9138 rtx new_rtx
, r
= known_cond (XEXP (x
, 0), cond
, reg
, val
);
9140 if (XEXP (x
, 0) != r
)
9142 /* We must simplify the zero_extend here, before we lose
9143 track of the original inner_mode. */
9144 new_rtx
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
9149 SUBST (XEXP (x
, 0), r
);
9155 fmt
= GET_RTX_FORMAT (code
);
9156 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
9159 SUBST (XEXP (x
, i
), known_cond (XEXP (x
, i
), cond
, reg
, val
));
9160 else if (fmt
[i
] == 'E')
9161 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9162 SUBST (XVECEXP (x
, i
, j
), known_cond (XVECEXP (x
, i
, j
),
9169 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9170 assignment as a field assignment. */
9173 rtx_equal_for_field_assignment_p (rtx x
, rtx y
, bool widen_x
)
9175 if (widen_x
&& GET_MODE (x
) != GET_MODE (y
))
9177 if (GET_MODE_SIZE (GET_MODE (x
)) > GET_MODE_SIZE (GET_MODE (y
)))
9179 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
9181 /* For big endian, adjust the memory offset. */
9182 if (BYTES_BIG_ENDIAN
)
9183 x
= adjust_address_nv (x
, GET_MODE (y
),
9184 -subreg_lowpart_offset (GET_MODE (x
),
9187 x
= adjust_address_nv (x
, GET_MODE (y
), 0);
9190 if (x
== y
|| rtx_equal_p (x
, y
))
9193 if (x
== 0 || y
== 0 || GET_MODE (x
) != GET_MODE (y
))
9196 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9197 Note that all SUBREGs of MEM are paradoxical; otherwise they
9198 would have been rewritten. */
9199 if (MEM_P (x
) && GET_CODE (y
) == SUBREG
9200 && MEM_P (SUBREG_REG (y
))
9201 && rtx_equal_p (SUBREG_REG (y
),
9202 gen_lowpart (GET_MODE (SUBREG_REG (y
)), x
)))
9205 if (MEM_P (y
) && GET_CODE (x
) == SUBREG
9206 && MEM_P (SUBREG_REG (x
))
9207 && rtx_equal_p (SUBREG_REG (x
),
9208 gen_lowpart (GET_MODE (SUBREG_REG (x
)), y
)))
9211 /* We used to see if get_last_value of X and Y were the same but that's
9212 not correct. In one direction, we'll cause the assignment to have
9213 the wrong destination and in the case, we'll import a register into this
9214 insn that might have already have been dead. So fail if none of the
9215 above cases are true. */
9219 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9220 Return that assignment if so.
9222 We only handle the most common cases. */
9225 make_field_assignment (rtx x
)
9227 rtx dest
= SET_DEST (x
);
9228 rtx src
= SET_SRC (x
);
9233 unsigned HOST_WIDE_INT len
;
9237 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9238 a clear of a one-bit field. We will have changed it to
9239 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9242 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == ROTATE
9243 && CONST_INT_P (XEXP (XEXP (src
, 0), 0))
9244 && INTVAL (XEXP (XEXP (src
, 0), 0)) == -2
9245 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9247 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9250 return gen_rtx_SET (VOIDmode
, assign
, const0_rtx
);
9254 if (GET_CODE (src
) == AND
&& GET_CODE (XEXP (src
, 0)) == SUBREG
9255 && subreg_lowpart_p (XEXP (src
, 0))
9256 && (GET_MODE_SIZE (GET_MODE (XEXP (src
, 0)))
9257 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src
, 0)))))
9258 && GET_CODE (SUBREG_REG (XEXP (src
, 0))) == ROTATE
9259 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src
, 0)), 0))
9260 && INTVAL (XEXP (SUBREG_REG (XEXP (src
, 0)), 0)) == -2
9261 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9263 assign
= make_extraction (VOIDmode
, dest
, 0,
9264 XEXP (SUBREG_REG (XEXP (src
, 0)), 1),
9267 return gen_rtx_SET (VOIDmode
, assign
, const0_rtx
);
9271 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9273 if (GET_CODE (src
) == IOR
&& GET_CODE (XEXP (src
, 0)) == ASHIFT
9274 && XEXP (XEXP (src
, 0), 0) == const1_rtx
9275 && rtx_equal_for_field_assignment_p (dest
, XEXP (src
, 1)))
9277 assign
= make_extraction (VOIDmode
, dest
, 0, XEXP (XEXP (src
, 0), 1),
9280 return gen_rtx_SET (VOIDmode
, assign
, const1_rtx
);
9284 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9285 SRC is an AND with all bits of that field set, then we can discard
9287 if (GET_CODE (dest
) == ZERO_EXTRACT
9288 && CONST_INT_P (XEXP (dest
, 1))
9289 && GET_CODE (src
) == AND
9290 && CONST_INT_P (XEXP (src
, 1)))
9292 HOST_WIDE_INT width
= INTVAL (XEXP (dest
, 1));
9293 unsigned HOST_WIDE_INT and_mask
= INTVAL (XEXP (src
, 1));
9294 unsigned HOST_WIDE_INT ze_mask
;
9296 if (width
>= HOST_BITS_PER_WIDE_INT
)
9299 ze_mask
= ((unsigned HOST_WIDE_INT
)1 << width
) - 1;
9301 /* Complete overlap. We can remove the source AND. */
9302 if ((and_mask
& ze_mask
) == ze_mask
)
9303 return gen_rtx_SET (VOIDmode
, dest
, XEXP (src
, 0));
9305 /* Partial overlap. We can reduce the source AND. */
9306 if ((and_mask
& ze_mask
) != and_mask
)
9308 mode
= GET_MODE (src
);
9309 src
= gen_rtx_AND (mode
, XEXP (src
, 0),
9310 gen_int_mode (and_mask
& ze_mask
, mode
));
9311 return gen_rtx_SET (VOIDmode
, dest
, src
);
9315 /* The other case we handle is assignments into a constant-position
9316 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9317 a mask that has all one bits except for a group of zero bits and
9318 OTHER is known to have zeros where C1 has ones, this is such an
9319 assignment. Compute the position and length from C1. Shift OTHER
9320 to the appropriate position, force it to the required mode, and
9321 make the extraction. Check for the AND in both operands. */
9323 /* One or more SUBREGs might obscure the constant-position field
9324 assignment. The first one we are likely to encounter is an outer
9325 narrowing SUBREG, which we can just strip for the purposes of
9326 identifying the constant-field assignment. */
9327 if (GET_CODE (src
) == SUBREG
&& subreg_lowpart_p (src
))
9328 src
= SUBREG_REG (src
);
9330 if (GET_CODE (src
) != IOR
&& GET_CODE (src
) != XOR
)
9333 rhs
= expand_compound_operation (XEXP (src
, 0));
9334 lhs
= expand_compound_operation (XEXP (src
, 1));
9336 if (GET_CODE (rhs
) == AND
9337 && CONST_INT_P (XEXP (rhs
, 1))
9338 && rtx_equal_for_field_assignment_p (XEXP (rhs
, 0), dest
))
9339 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9340 /* The second SUBREG that might get in the way is a paradoxical
9341 SUBREG around the first operand of the AND. We want to
9342 pretend the operand is as wide as the destination here. We
9343 do this by adjusting the MEM to wider mode for the sole
9344 purpose of the call to rtx_equal_for_field_assignment_p. Also
9345 note this trick only works for MEMs. */
9346 else if (GET_CODE (rhs
) == AND
9347 && paradoxical_subreg_p (XEXP (rhs
, 0))
9348 && MEM_P (SUBREG_REG (XEXP (rhs
, 0)))
9349 && CONST_INT_P (XEXP (rhs
, 1))
9350 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs
, 0)),
9352 c1
= INTVAL (XEXP (rhs
, 1)), other
= lhs
;
9353 else if (GET_CODE (lhs
) == AND
9354 && CONST_INT_P (XEXP (lhs
, 1))
9355 && rtx_equal_for_field_assignment_p (XEXP (lhs
, 0), dest
))
9356 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9357 /* The second SUBREG that might get in the way is a paradoxical
9358 SUBREG around the first operand of the AND. We want to
9359 pretend the operand is as wide as the destination here. We
9360 do this by adjusting the MEM to wider mode for the sole
9361 purpose of the call to rtx_equal_for_field_assignment_p. Also
9362 note this trick only works for MEMs. */
9363 else if (GET_CODE (lhs
) == AND
9364 && paradoxical_subreg_p (XEXP (lhs
, 0))
9365 && MEM_P (SUBREG_REG (XEXP (lhs
, 0)))
9366 && CONST_INT_P (XEXP (lhs
, 1))
9367 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs
, 0)),
9369 c1
= INTVAL (XEXP (lhs
, 1)), other
= rhs
;
9373 pos
= get_pos_from_mask ((~c1
) & GET_MODE_MASK (GET_MODE (dest
)), &len
);
9374 if (pos
< 0 || pos
+ len
> GET_MODE_PRECISION (GET_MODE (dest
))
9375 || GET_MODE_PRECISION (GET_MODE (dest
)) > HOST_BITS_PER_WIDE_INT
9376 || (c1
& nonzero_bits (other
, GET_MODE (dest
))) != 0)
9379 assign
= make_extraction (VOIDmode
, dest
, pos
, NULL_RTX
, len
, 1, 1, 0);
9383 /* The mode to use for the source is the mode of the assignment, or of
9384 what is inside a possible STRICT_LOW_PART. */
9385 mode
= (GET_CODE (assign
) == STRICT_LOW_PART
9386 ? GET_MODE (XEXP (assign
, 0)) : GET_MODE (assign
));
9388 /* Shift OTHER right POS places and make it the source, restricting it
9389 to the proper length and mode. */
9391 src
= canon_reg_for_combine (simplify_shift_const (NULL_RTX
, LSHIFTRT
,
9395 src
= force_to_mode (src
, mode
,
9396 GET_MODE_PRECISION (mode
) >= HOST_BITS_PER_WIDE_INT
9397 ? ~(unsigned HOST_WIDE_INT
) 0
9398 : ((unsigned HOST_WIDE_INT
) 1 << len
) - 1,
9401 /* If SRC is masked by an AND that does not make a difference in
9402 the value being stored, strip it. */
9403 if (GET_CODE (assign
) == ZERO_EXTRACT
9404 && CONST_INT_P (XEXP (assign
, 1))
9405 && INTVAL (XEXP (assign
, 1)) < HOST_BITS_PER_WIDE_INT
9406 && GET_CODE (src
) == AND
9407 && CONST_INT_P (XEXP (src
, 1))
9408 && UINTVAL (XEXP (src
, 1))
9409 == ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (assign
, 1))) - 1)
9410 src
= XEXP (src
, 0);
9412 return gen_rtx_SET (VOIDmode
, assign
, src
);
9415 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9419 apply_distributive_law (rtx x
)
9421 enum rtx_code code
= GET_CODE (x
);
9422 enum rtx_code inner_code
;
9423 rtx lhs
, rhs
, other
;
9426 /* Distributivity is not true for floating point as it can change the
9427 value. So we don't do it unless -funsafe-math-optimizations. */
9428 if (FLOAT_MODE_P (GET_MODE (x
))
9429 && ! flag_unsafe_math_optimizations
)
9432 /* The outer operation can only be one of the following: */
9433 if (code
!= IOR
&& code
!= AND
&& code
!= XOR
9434 && code
!= PLUS
&& code
!= MINUS
)
9440 /* If either operand is a primitive we can't do anything, so get out
9442 if (OBJECT_P (lhs
) || OBJECT_P (rhs
))
9445 lhs
= expand_compound_operation (lhs
);
9446 rhs
= expand_compound_operation (rhs
);
9447 inner_code
= GET_CODE (lhs
);
9448 if (inner_code
!= GET_CODE (rhs
))
9451 /* See if the inner and outer operations distribute. */
9458 /* These all distribute except over PLUS. */
9459 if (code
== PLUS
|| code
== MINUS
)
9464 if (code
!= PLUS
&& code
!= MINUS
)
9469 /* This is also a multiply, so it distributes over everything. */
9472 /* This used to handle SUBREG, but this turned out to be counter-
9473 productive, since (subreg (op ...)) usually is not handled by
9474 insn patterns, and this "optimization" therefore transformed
9475 recognizable patterns into unrecognizable ones. Therefore the
9476 SUBREG case was removed from here.
9478 It is possible that distributing SUBREG over arithmetic operations
9479 leads to an intermediate result than can then be optimized further,
9480 e.g. by moving the outer SUBREG to the other side of a SET as done
9481 in simplify_set. This seems to have been the original intent of
9482 handling SUBREGs here.
9484 However, with current GCC this does not appear to actually happen,
9485 at least on major platforms. If some case is found where removing
9486 the SUBREG case here prevents follow-on optimizations, distributing
9487 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9493 /* Set LHS and RHS to the inner operands (A and B in the example
9494 above) and set OTHER to the common operand (C in the example).
9495 There is only one way to do this unless the inner operation is
9497 if (COMMUTATIVE_ARITH_P (lhs
)
9498 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 0)))
9499 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 1);
9500 else if (COMMUTATIVE_ARITH_P (lhs
)
9501 && rtx_equal_p (XEXP (lhs
, 0), XEXP (rhs
, 1)))
9502 other
= XEXP (lhs
, 0), lhs
= XEXP (lhs
, 1), rhs
= XEXP (rhs
, 0);
9503 else if (COMMUTATIVE_ARITH_P (lhs
)
9504 && rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 0)))
9505 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 1);
9506 else if (rtx_equal_p (XEXP (lhs
, 1), XEXP (rhs
, 1)))
9507 other
= XEXP (lhs
, 1), lhs
= XEXP (lhs
, 0), rhs
= XEXP (rhs
, 0);
9511 /* Form the new inner operation, seeing if it simplifies first. */
9512 tem
= simplify_gen_binary (code
, GET_MODE (x
), lhs
, rhs
);
9514 /* There is one exception to the general way of distributing:
9515 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9516 if (code
== XOR
&& inner_code
== IOR
)
9519 other
= simplify_gen_unary (NOT
, GET_MODE (x
), other
, GET_MODE (x
));
9522 /* We may be able to continuing distributing the result, so call
9523 ourselves recursively on the inner operation before forming the
9524 outer operation, which we return. */
9525 return simplify_gen_binary (inner_code
, GET_MODE (x
),
9526 apply_distributive_law (tem
), other
);
9529 /* See if X is of the form (* (+ A B) C), and if so convert to
9530 (+ (* A C) (* B C)) and try to simplify.
9532 Most of the time, this results in no change. However, if some of
9533 the operands are the same or inverses of each other, simplifications
9536 For example, (and (ior A B) (not B)) can occur as the result of
9537 expanding a bit field assignment. When we apply the distributive
9538 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9539 which then simplifies to (and (A (not B))).
9541 Note that no checks happen on the validity of applying the inverse
9542 distributive law. This is pointless since we can do it in the
9543 few places where this routine is called.
9545 N is the index of the term that is decomposed (the arithmetic operation,
9546 i.e. (+ A B) in the first example above). !N is the index of the term that
9547 is distributed, i.e. of C in the first example above. */
9549 distribute_and_simplify_rtx (rtx x
, int n
)
9552 enum rtx_code outer_code
, inner_code
;
9553 rtx decomposed
, distributed
, inner_op0
, inner_op1
, new_op0
, new_op1
, tmp
;
9555 /* Distributivity is not true for floating point as it can change the
9556 value. So we don't do it unless -funsafe-math-optimizations. */
9557 if (FLOAT_MODE_P (GET_MODE (x
))
9558 && ! flag_unsafe_math_optimizations
)
9561 decomposed
= XEXP (x
, n
);
9562 if (!ARITHMETIC_P (decomposed
))
9565 mode
= GET_MODE (x
);
9566 outer_code
= GET_CODE (x
);
9567 distributed
= XEXP (x
, !n
);
9569 inner_code
= GET_CODE (decomposed
);
9570 inner_op0
= XEXP (decomposed
, 0);
9571 inner_op1
= XEXP (decomposed
, 1);
9573 /* Special case (and (xor B C) (not A)), which is equivalent to
9574 (xor (ior A B) (ior A C)) */
9575 if (outer_code
== AND
&& inner_code
== XOR
&& GET_CODE (distributed
) == NOT
)
9577 distributed
= XEXP (distributed
, 0);
9583 /* Distribute the second term. */
9584 new_op0
= simplify_gen_binary (outer_code
, mode
, inner_op0
, distributed
);
9585 new_op1
= simplify_gen_binary (outer_code
, mode
, inner_op1
, distributed
);
9589 /* Distribute the first term. */
9590 new_op0
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op0
);
9591 new_op1
= simplify_gen_binary (outer_code
, mode
, distributed
, inner_op1
);
9594 tmp
= apply_distributive_law (simplify_gen_binary (inner_code
, mode
,
9596 if (GET_CODE (tmp
) != outer_code
9597 && (set_src_cost (tmp
, optimize_this_for_speed_p
)
9598 < set_src_cost (x
, optimize_this_for_speed_p
)))
9604 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9605 in MODE. Return an equivalent form, if different from (and VAROP
9606 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9609 simplify_and_const_int_1 (machine_mode mode
, rtx varop
,
9610 unsigned HOST_WIDE_INT constop
)
9612 unsigned HOST_WIDE_INT nonzero
;
9613 unsigned HOST_WIDE_INT orig_constop
;
9618 orig_constop
= constop
;
9619 if (GET_CODE (varop
) == CLOBBER
)
9622 /* Simplify VAROP knowing that we will be only looking at some of the
9625 Note by passing in CONSTOP, we guarantee that the bits not set in
9626 CONSTOP are not significant and will never be examined. We must
9627 ensure that is the case by explicitly masking out those bits
9628 before returning. */
9629 varop
= force_to_mode (varop
, mode
, constop
, 0);
9631 /* If VAROP is a CLOBBER, we will fail so return it. */
9632 if (GET_CODE (varop
) == CLOBBER
)
9635 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9636 to VAROP and return the new constant. */
9637 if (CONST_INT_P (varop
))
9638 return gen_int_mode (INTVAL (varop
) & constop
, mode
);
9640 /* See what bits may be nonzero in VAROP. Unlike the general case of
9641 a call to nonzero_bits, here we don't care about bits outside
9644 nonzero
= nonzero_bits (varop
, mode
) & GET_MODE_MASK (mode
);
9646 /* Turn off all bits in the constant that are known to already be zero.
9647 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9648 which is tested below. */
9652 /* If we don't have any bits left, return zero. */
9656 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9657 a power of two, we can replace this with an ASHIFT. */
9658 if (GET_CODE (varop
) == NEG
&& nonzero_bits (XEXP (varop
, 0), mode
) == 1
9659 && (i
= exact_log2 (constop
)) >= 0)
9660 return simplify_shift_const (NULL_RTX
, ASHIFT
, mode
, XEXP (varop
, 0), i
);
9662 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9663 or XOR, then try to apply the distributive law. This may eliminate
9664 operations if either branch can be simplified because of the AND.
9665 It may also make some cases more complex, but those cases probably
9666 won't match a pattern either with or without this. */
9668 if (GET_CODE (varop
) == IOR
|| GET_CODE (varop
) == XOR
)
9672 apply_distributive_law
9673 (simplify_gen_binary (GET_CODE (varop
), GET_MODE (varop
),
9674 simplify_and_const_int (NULL_RTX
,
9678 simplify_and_const_int (NULL_RTX
,
9683 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9684 the AND and see if one of the operands simplifies to zero. If so, we
9685 may eliminate it. */
9687 if (GET_CODE (varop
) == PLUS
9688 && exact_log2 (constop
+ 1) >= 0)
9692 o0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 0), constop
);
9693 o1
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (varop
, 1), constop
);
9694 if (o0
== const0_rtx
)
9696 if (o1
== const0_rtx
)
9700 /* Make a SUBREG if necessary. If we can't make it, fail. */
9701 varop
= gen_lowpart (mode
, varop
);
9702 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
9705 /* If we are only masking insignificant bits, return VAROP. */
9706 if (constop
== nonzero
)
9709 if (varop
== orig_varop
&& constop
== orig_constop
)
9712 /* Otherwise, return an AND. */
9713 return simplify_gen_binary (AND
, mode
, varop
, gen_int_mode (constop
, mode
));
9717 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9720 Return an equivalent form, if different from X. Otherwise, return X. If
9721 X is zero, we are to always construct the equivalent form. */
9724 simplify_and_const_int (rtx x
, machine_mode mode
, rtx varop
,
9725 unsigned HOST_WIDE_INT constop
)
9727 rtx tem
= simplify_and_const_int_1 (mode
, varop
, constop
);
9732 x
= simplify_gen_binary (AND
, GET_MODE (varop
), varop
,
9733 gen_int_mode (constop
, mode
));
9734 if (GET_MODE (x
) != mode
)
9735 x
= gen_lowpart (mode
, x
);
9739 /* Given a REG, X, compute which bits in X can be nonzero.
9740 We don't care about bits outside of those defined in MODE.
9742 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9743 a shift, AND, or zero_extract, we can do better. */
9746 reg_nonzero_bits_for_combine (const_rtx x
, machine_mode mode
,
9747 const_rtx known_x ATTRIBUTE_UNUSED
,
9748 machine_mode known_mode ATTRIBUTE_UNUSED
,
9749 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED
,
9750 unsigned HOST_WIDE_INT
*nonzero
)
9755 /* If X is a register whose nonzero bits value is current, use it.
9756 Otherwise, if X is a register whose value we can find, use that
9757 value. Otherwise, use the previously-computed global nonzero bits
9758 for this register. */
9760 rsp
= ®_stat
[REGNO (x
)];
9761 if (rsp
->last_set_value
!= 0
9762 && (rsp
->last_set_mode
== mode
9763 || (GET_MODE_CLASS (rsp
->last_set_mode
) == MODE_INT
9764 && GET_MODE_CLASS (mode
) == MODE_INT
))
9765 && ((rsp
->last_set_label
>= label_tick_ebb_start
9766 && rsp
->last_set_label
< label_tick
)
9767 || (rsp
->last_set_label
== label_tick
9768 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9769 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9770 && REGNO (x
) < reg_n_sets_max
9771 && REG_N_SETS (REGNO (x
)) == 1
9773 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9776 unsigned HOST_WIDE_INT mask
= rsp
->last_set_nonzero_bits
;
9778 if (GET_MODE_PRECISION (rsp
->last_set_mode
) < GET_MODE_PRECISION (mode
))
9779 /* We don't know anything about the upper bits. */
9780 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (rsp
->last_set_mode
);
9786 tem
= get_last_value (x
);
9790 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9791 /* If X is narrower than MODE and TEM is a non-negative
9792 constant that would appear negative in the mode of X,
9793 sign-extend it for use in reg_nonzero_bits because some
9794 machines (maybe most) will actually do the sign-extension
9795 and this is the conservative approach.
9797 ??? For 2.5, try to tighten up the MD files in this regard
9798 instead of this kludge. */
9800 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
)
9801 && CONST_INT_P (tem
)
9803 && val_signbit_known_set_p (GET_MODE (x
), INTVAL (tem
)))
9804 tem
= GEN_INT (INTVAL (tem
) | ~GET_MODE_MASK (GET_MODE (x
)));
9808 else if (nonzero_sign_valid
&& rsp
->nonzero_bits
)
9810 unsigned HOST_WIDE_INT mask
= rsp
->nonzero_bits
;
9812 if (GET_MODE_PRECISION (GET_MODE (x
)) < GET_MODE_PRECISION (mode
))
9813 /* We don't know anything about the upper bits. */
9814 mask
|= GET_MODE_MASK (mode
) ^ GET_MODE_MASK (GET_MODE (x
));
9822 /* Return the number of bits at the high-order end of X that are known to
9823 be equal to the sign bit. X will be used in mode MODE; if MODE is
9824 VOIDmode, X will be used in its own mode. The returned value will always
9825 be between 1 and the number of bits in MODE. */
9828 reg_num_sign_bit_copies_for_combine (const_rtx x
, machine_mode mode
,
9829 const_rtx known_x ATTRIBUTE_UNUSED
,
9830 machine_mode known_mode
9832 unsigned int known_ret ATTRIBUTE_UNUSED
,
9833 unsigned int *result
)
9838 rsp
= ®_stat
[REGNO (x
)];
9839 if (rsp
->last_set_value
!= 0
9840 && rsp
->last_set_mode
== mode
9841 && ((rsp
->last_set_label
>= label_tick_ebb_start
9842 && rsp
->last_set_label
< label_tick
)
9843 || (rsp
->last_set_label
== label_tick
9844 && DF_INSN_LUID (rsp
->last_set
) < subst_low_luid
)
9845 || (REGNO (x
) >= FIRST_PSEUDO_REGISTER
9846 && REGNO (x
) < reg_n_sets_max
9847 && REG_N_SETS (REGNO (x
)) == 1
9849 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
9852 *result
= rsp
->last_set_sign_bit_copies
;
9856 tem
= get_last_value (x
);
9860 if (nonzero_sign_valid
&& rsp
->sign_bit_copies
!= 0
9861 && GET_MODE_PRECISION (GET_MODE (x
)) == GET_MODE_PRECISION (mode
))
9862 *result
= rsp
->sign_bit_copies
;
9867 /* Return the number of "extended" bits there are in X, when interpreted
9868 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9869 unsigned quantities, this is the number of high-order zero bits.
9870 For signed quantities, this is the number of copies of the sign bit
9871 minus 1. In both case, this function returns the number of "spare"
9872 bits. For example, if two quantities for which this function returns
9873 at least 1 are added, the addition is known not to overflow.
9875 This function will always return 0 unless called during combine, which
9876 implies that it must be called from a define_split. */
9879 extended_count (const_rtx x
, machine_mode mode
, int unsignedp
)
9881 if (nonzero_sign_valid
== 0)
9885 ? (HWI_COMPUTABLE_MODE_P (mode
)
9886 ? (unsigned int) (GET_MODE_PRECISION (mode
) - 1
9887 - floor_log2 (nonzero_bits (x
, mode
)))
9889 : num_sign_bit_copies (x
, mode
) - 1);
9892 /* This function is called from `simplify_shift_const' to merge two
9893 outer operations. Specifically, we have already found that we need
9894 to perform operation *POP0 with constant *PCONST0 at the outermost
9895 position. We would now like to also perform OP1 with constant CONST1
9896 (with *POP0 being done last).
9898 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9899 the resulting operation. *PCOMP_P is set to 1 if we would need to
9900 complement the innermost operand, otherwise it is unchanged.
9902 MODE is the mode in which the operation will be done. No bits outside
9903 the width of this mode matter. It is assumed that the width of this mode
9904 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9906 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9907 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9908 result is simply *PCONST0.
9910 If the resulting operation cannot be expressed as one operation, we
9911 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9914 merge_outer_ops (enum rtx_code
*pop0
, HOST_WIDE_INT
*pconst0
, enum rtx_code op1
, HOST_WIDE_INT const1
, machine_mode mode
, int *pcomp_p
)
9916 enum rtx_code op0
= *pop0
;
9917 HOST_WIDE_INT const0
= *pconst0
;
9919 const0
&= GET_MODE_MASK (mode
);
9920 const1
&= GET_MODE_MASK (mode
);
9922 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9926 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9929 if (op1
== UNKNOWN
|| op0
== SET
)
9932 else if (op0
== UNKNOWN
)
9933 op0
= op1
, const0
= const1
;
9935 else if (op0
== op1
)
9959 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9960 else if (op0
== PLUS
|| op1
== PLUS
|| op0
== NEG
|| op1
== NEG
)
9963 /* If the two constants aren't the same, we can't do anything. The
9964 remaining six cases can all be done. */
9965 else if (const0
!= const1
)
9973 /* (a & b) | b == b */
9975 else /* op1 == XOR */
9976 /* (a ^ b) | b == a | b */
9982 /* (a & b) ^ b == (~a) & b */
9983 op0
= AND
, *pcomp_p
= 1;
9984 else /* op1 == IOR */
9985 /* (a | b) ^ b == a & ~b */
9986 op0
= AND
, const0
= ~const0
;
9991 /* (a | b) & b == b */
9993 else /* op1 == XOR */
9994 /* (a ^ b) & b) == (~a) & b */
10001 /* Check for NO-OP cases. */
10002 const0
&= GET_MODE_MASK (mode
);
10004 && (op0
== IOR
|| op0
== XOR
|| op0
== PLUS
))
10006 else if (const0
== 0 && op0
== AND
)
10008 else if ((unsigned HOST_WIDE_INT
) const0
== GET_MODE_MASK (mode
)
10014 /* ??? Slightly redundant with the above mask, but not entirely.
10015 Moving this above means we'd have to sign-extend the mode mask
10016 for the final test. */
10017 if (op0
!= UNKNOWN
&& op0
!= NEG
)
10018 *pconst0
= trunc_int_for_mode (const0
, mode
);
10023 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10024 the shift in. The original shift operation CODE is performed on OP in
10025 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10026 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10027 result of the shift is subject to operation OUTER_CODE with operand
10030 static machine_mode
10031 try_widen_shift_mode (enum rtx_code code
, rtx op
, int count
,
10032 machine_mode orig_mode
, machine_mode mode
,
10033 enum rtx_code outer_code
, HOST_WIDE_INT outer_const
)
10035 if (orig_mode
== mode
)
10037 gcc_assert (GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (orig_mode
));
10039 /* In general we can't perform in wider mode for right shift and rotate. */
10043 /* We can still widen if the bits brought in from the left are identical
10044 to the sign bit of ORIG_MODE. */
10045 if (num_sign_bit_copies (op
, mode
)
10046 > (unsigned) (GET_MODE_PRECISION (mode
)
10047 - GET_MODE_PRECISION (orig_mode
)))
10052 /* Similarly here but with zero bits. */
10053 if (HWI_COMPUTABLE_MODE_P (mode
)
10054 && (nonzero_bits (op
, mode
) & ~GET_MODE_MASK (orig_mode
)) == 0)
10057 /* We can also widen if the bits brought in will be masked off. This
10058 operation is performed in ORIG_MODE. */
10059 if (outer_code
== AND
)
10061 int care_bits
= low_bitmask_len (orig_mode
, outer_const
);
10064 && GET_MODE_PRECISION (orig_mode
) - care_bits
>= count
)
10073 gcc_unreachable ();
10080 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10081 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10082 if we cannot simplify it. Otherwise, return a simplified value.
10084 The shift is normally computed in the widest mode we find in VAROP, as
10085 long as it isn't a different number of words than RESULT_MODE. Exceptions
10086 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10089 simplify_shift_const_1 (enum rtx_code code
, machine_mode result_mode
,
10090 rtx varop
, int orig_count
)
10092 enum rtx_code orig_code
= code
;
10093 rtx orig_varop
= varop
;
10095 machine_mode mode
= result_mode
;
10096 machine_mode shift_mode
, tmode
;
10097 unsigned int mode_words
10098 = (GET_MODE_SIZE (mode
) + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
;
10099 /* We form (outer_op (code varop count) (outer_const)). */
10100 enum rtx_code outer_op
= UNKNOWN
;
10101 HOST_WIDE_INT outer_const
= 0;
10102 int complement_p
= 0;
10105 /* Make sure and truncate the "natural" shift on the way in. We don't
10106 want to do this inside the loop as it makes it more difficult to
10108 if (SHIFT_COUNT_TRUNCATED
)
10109 orig_count
&= GET_MODE_BITSIZE (mode
) - 1;
10111 /* If we were given an invalid count, don't do anything except exactly
10112 what was requested. */
10114 if (orig_count
< 0 || orig_count
>= (int) GET_MODE_PRECISION (mode
))
10117 count
= orig_count
;
10119 /* Unless one of the branches of the `if' in this loop does a `continue',
10120 we will `break' the loop after the `if'. */
10124 /* If we have an operand of (clobber (const_int 0)), fail. */
10125 if (GET_CODE (varop
) == CLOBBER
)
10128 /* Convert ROTATERT to ROTATE. */
10129 if (code
== ROTATERT
)
10131 unsigned int bitsize
= GET_MODE_PRECISION (result_mode
);
10133 if (VECTOR_MODE_P (result_mode
))
10134 count
= bitsize
/ GET_MODE_NUNITS (result_mode
) - count
;
10136 count
= bitsize
- count
;
10139 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
,
10140 mode
, outer_op
, outer_const
);
10142 /* Handle cases where the count is greater than the size of the mode
10143 minus 1. For ASHIFT, use the size minus one as the count (this can
10144 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10145 take the count modulo the size. For other shifts, the result is
10148 Since these shifts are being produced by the compiler by combining
10149 multiple operations, each of which are defined, we know what the
10150 result is supposed to be. */
10152 if (count
> (GET_MODE_PRECISION (shift_mode
) - 1))
10154 if (code
== ASHIFTRT
)
10155 count
= GET_MODE_PRECISION (shift_mode
) - 1;
10156 else if (code
== ROTATE
|| code
== ROTATERT
)
10157 count
%= GET_MODE_PRECISION (shift_mode
);
10160 /* We can't simply return zero because there may be an
10162 varop
= const0_rtx
;
10168 /* If we discovered we had to complement VAROP, leave. Making a NOT
10169 here would cause an infinite loop. */
10173 /* An arithmetic right shift of a quantity known to be -1 or 0
10175 if (code
== ASHIFTRT
10176 && (num_sign_bit_copies (varop
, shift_mode
)
10177 == GET_MODE_PRECISION (shift_mode
)))
10183 /* If we are doing an arithmetic right shift and discarding all but
10184 the sign bit copies, this is equivalent to doing a shift by the
10185 bitsize minus one. Convert it into that shift because it will often
10186 allow other simplifications. */
10188 if (code
== ASHIFTRT
10189 && (count
+ num_sign_bit_copies (varop
, shift_mode
)
10190 >= GET_MODE_PRECISION (shift_mode
)))
10191 count
= GET_MODE_PRECISION (shift_mode
) - 1;
10193 /* We simplify the tests below and elsewhere by converting
10194 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10195 `make_compound_operation' will convert it to an ASHIFTRT for
10196 those machines (such as VAX) that don't have an LSHIFTRT. */
10197 if (code
== ASHIFTRT
10198 && val_signbit_known_clear_p (shift_mode
,
10199 nonzero_bits (varop
, shift_mode
)))
10202 if (((code
== LSHIFTRT
10203 && HWI_COMPUTABLE_MODE_P (shift_mode
)
10204 && !(nonzero_bits (varop
, shift_mode
) >> count
))
10206 && HWI_COMPUTABLE_MODE_P (shift_mode
)
10207 && !((nonzero_bits (varop
, shift_mode
) << count
)
10208 & GET_MODE_MASK (shift_mode
))))
10209 && !side_effects_p (varop
))
10210 varop
= const0_rtx
;
10212 switch (GET_CODE (varop
))
10218 new_rtx
= expand_compound_operation (varop
);
10219 if (new_rtx
!= varop
)
10227 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10228 minus the width of a smaller mode, we can do this with a
10229 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10230 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10231 && ! mode_dependent_address_p (XEXP (varop
, 0),
10232 MEM_ADDR_SPACE (varop
))
10233 && ! MEM_VOLATILE_P (varop
)
10234 && (tmode
= mode_for_size (GET_MODE_BITSIZE (mode
) - count
,
10235 MODE_INT
, 1)) != BLKmode
)
10237 new_rtx
= adjust_address_nv (varop
, tmode
,
10238 BYTES_BIG_ENDIAN
? 0
10239 : count
/ BITS_PER_UNIT
);
10241 varop
= gen_rtx_fmt_e (code
== ASHIFTRT
? SIGN_EXTEND
10242 : ZERO_EXTEND
, mode
, new_rtx
);
10249 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10250 the same number of words as what we've seen so far. Then store
10251 the widest mode in MODE. */
10252 if (subreg_lowpart_p (varop
)
10253 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10254 > GET_MODE_SIZE (GET_MODE (varop
)))
10255 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop
)))
10256 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
10258 && GET_MODE_CLASS (GET_MODE (varop
)) == MODE_INT
10259 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop
))) == MODE_INT
)
10261 varop
= SUBREG_REG (varop
);
10262 if (GET_MODE_SIZE (GET_MODE (varop
)) > GET_MODE_SIZE (mode
))
10263 mode
= GET_MODE (varop
);
10269 /* Some machines use MULT instead of ASHIFT because MULT
10270 is cheaper. But it is still better on those machines to
10271 merge two shifts into one. */
10272 if (CONST_INT_P (XEXP (varop
, 1))
10273 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10276 = simplify_gen_binary (ASHIFT
, GET_MODE (varop
),
10278 GEN_INT (exact_log2 (
10279 UINTVAL (XEXP (varop
, 1)))));
10285 /* Similar, for when divides are cheaper. */
10286 if (CONST_INT_P (XEXP (varop
, 1))
10287 && exact_log2 (UINTVAL (XEXP (varop
, 1))) >= 0)
10290 = simplify_gen_binary (LSHIFTRT
, GET_MODE (varop
),
10292 GEN_INT (exact_log2 (
10293 UINTVAL (XEXP (varop
, 1)))));
10299 /* If we are extracting just the sign bit of an arithmetic
10300 right shift, that shift is not needed. However, the sign
10301 bit of a wider mode may be different from what would be
10302 interpreted as the sign bit in a narrower mode, so, if
10303 the result is narrower, don't discard the shift. */
10304 if (code
== LSHIFTRT
10305 && count
== (GET_MODE_BITSIZE (result_mode
) - 1)
10306 && (GET_MODE_BITSIZE (result_mode
)
10307 >= GET_MODE_BITSIZE (GET_MODE (varop
))))
10309 varop
= XEXP (varop
, 0);
10313 /* ... fall through ... */
10318 /* Here we have two nested shifts. The result is usually the
10319 AND of a new shift with a mask. We compute the result below. */
10320 if (CONST_INT_P (XEXP (varop
, 1))
10321 && INTVAL (XEXP (varop
, 1)) >= 0
10322 && INTVAL (XEXP (varop
, 1)) < GET_MODE_PRECISION (GET_MODE (varop
))
10323 && HWI_COMPUTABLE_MODE_P (result_mode
)
10324 && HWI_COMPUTABLE_MODE_P (mode
)
10325 && !VECTOR_MODE_P (result_mode
))
10327 enum rtx_code first_code
= GET_CODE (varop
);
10328 unsigned int first_count
= INTVAL (XEXP (varop
, 1));
10329 unsigned HOST_WIDE_INT mask
;
10332 /* We have one common special case. We can't do any merging if
10333 the inner code is an ASHIFTRT of a smaller mode. However, if
10334 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10335 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10336 we can convert it to
10337 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10338 This simplifies certain SIGN_EXTEND operations. */
10339 if (code
== ASHIFT
&& first_code
== ASHIFTRT
10340 && count
== (GET_MODE_PRECISION (result_mode
)
10341 - GET_MODE_PRECISION (GET_MODE (varop
))))
10343 /* C3 has the low-order C1 bits zero. */
10345 mask
= GET_MODE_MASK (mode
)
10346 & ~(((unsigned HOST_WIDE_INT
) 1 << first_count
) - 1);
10348 varop
= simplify_and_const_int (NULL_RTX
, result_mode
,
10349 XEXP (varop
, 0), mask
);
10350 varop
= simplify_shift_const (NULL_RTX
, ASHIFT
, result_mode
,
10352 count
= first_count
;
10357 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10358 than C1 high-order bits equal to the sign bit, we can convert
10359 this to either an ASHIFT or an ASHIFTRT depending on the
10362 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10364 if (code
== ASHIFTRT
&& first_code
== ASHIFT
10365 && GET_MODE (varop
) == shift_mode
10366 && (num_sign_bit_copies (XEXP (varop
, 0), shift_mode
)
10369 varop
= XEXP (varop
, 0);
10370 count
-= first_count
;
10380 /* There are some cases we can't do. If CODE is ASHIFTRT,
10381 we can only do this if FIRST_CODE is also ASHIFTRT.
10383 We can't do the case when CODE is ROTATE and FIRST_CODE is
10386 If the mode of this shift is not the mode of the outer shift,
10387 we can't do this if either shift is a right shift or ROTATE.
10389 Finally, we can't do any of these if the mode is too wide
10390 unless the codes are the same.
10392 Handle the case where the shift codes are the same
10395 if (code
== first_code
)
10397 if (GET_MODE (varop
) != result_mode
10398 && (code
== ASHIFTRT
|| code
== LSHIFTRT
10399 || code
== ROTATE
))
10402 count
+= first_count
;
10403 varop
= XEXP (varop
, 0);
10407 if (code
== ASHIFTRT
10408 || (code
== ROTATE
&& first_code
== ASHIFTRT
)
10409 || GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
10410 || (GET_MODE (varop
) != result_mode
10411 && (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
10412 || first_code
== ROTATE
10413 || code
== ROTATE
)))
10416 /* To compute the mask to apply after the shift, shift the
10417 nonzero bits of the inner shift the same way the
10418 outer shift will. */
10420 mask_rtx
= gen_int_mode (nonzero_bits (varop
, GET_MODE (varop
)),
10424 = simplify_const_binary_operation (code
, result_mode
, mask_rtx
,
10427 /* Give up if we can't compute an outer operation to use. */
10429 || !CONST_INT_P (mask_rtx
)
10430 || ! merge_outer_ops (&outer_op
, &outer_const
, AND
,
10432 result_mode
, &complement_p
))
10435 /* If the shifts are in the same direction, we add the
10436 counts. Otherwise, we subtract them. */
10437 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10438 == (first_code
== ASHIFTRT
|| first_code
== LSHIFTRT
))
10439 count
+= first_count
;
10441 count
-= first_count
;
10443 /* If COUNT is positive, the new shift is usually CODE,
10444 except for the two exceptions below, in which case it is
10445 FIRST_CODE. If the count is negative, FIRST_CODE should
10448 && ((first_code
== ROTATE
&& code
== ASHIFT
)
10449 || (first_code
== ASHIFTRT
&& code
== LSHIFTRT
)))
10451 else if (count
< 0)
10452 code
= first_code
, count
= -count
;
10454 varop
= XEXP (varop
, 0);
10458 /* If we have (A << B << C) for any shift, we can convert this to
10459 (A << C << B). This wins if A is a constant. Only try this if
10460 B is not a constant. */
10462 else if (GET_CODE (varop
) == code
10463 && CONST_INT_P (XEXP (varop
, 0))
10464 && !CONST_INT_P (XEXP (varop
, 1)))
10466 rtx new_rtx
= simplify_const_binary_operation (code
, mode
,
10469 varop
= gen_rtx_fmt_ee (code
, mode
, new_rtx
, XEXP (varop
, 1));
10476 if (VECTOR_MODE_P (mode
))
10479 /* Make this fit the case below. */
10480 varop
= gen_rtx_XOR (mode
, XEXP (varop
, 0), constm1_rtx
);
10486 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10487 with C the size of VAROP - 1 and the shift is logical if
10488 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10489 we have an (le X 0) operation. If we have an arithmetic shift
10490 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10491 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10493 if (GET_CODE (varop
) == IOR
&& GET_CODE (XEXP (varop
, 0)) == PLUS
10494 && XEXP (XEXP (varop
, 0), 1) == constm1_rtx
10495 && (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10496 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10497 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10498 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10501 varop
= gen_rtx_LE (GET_MODE (varop
), XEXP (varop
, 1),
10504 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10505 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10510 /* If we have (shift (logical)), move the logical to the outside
10511 to allow it to possibly combine with another logical and the
10512 shift to combine with another shift. This also canonicalizes to
10513 what a ZERO_EXTRACT looks like. Also, some machines have
10514 (and (shift)) insns. */
10516 if (CONST_INT_P (XEXP (varop
, 1))
10517 /* We can't do this if we have (ashiftrt (xor)) and the
10518 constant has its sign bit set in shift_mode with shift_mode
10519 wider than result_mode. */
10520 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10521 && result_mode
!= shift_mode
10522 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10524 && (new_rtx
= simplify_const_binary_operation
10525 (code
, result_mode
,
10526 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10527 GEN_INT (count
))) != 0
10528 && CONST_INT_P (new_rtx
)
10529 && merge_outer_ops (&outer_op
, &outer_const
, GET_CODE (varop
),
10530 INTVAL (new_rtx
), result_mode
, &complement_p
))
10532 varop
= XEXP (varop
, 0);
10536 /* If we can't do that, try to simplify the shift in each arm of the
10537 logical expression, make a new logical expression, and apply
10538 the inverse distributive law. This also can't be done for
10539 (ashiftrt (xor)) where we've widened the shift and the constant
10540 changes the sign bit. */
10541 if (CONST_INT_P (XEXP (varop
, 1))
10542 && !(code
== ASHIFTRT
&& GET_CODE (varop
) == XOR
10543 && result_mode
!= shift_mode
10544 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop
, 1)),
10547 rtx lhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10548 XEXP (varop
, 0), count
);
10549 rtx rhs
= simplify_shift_const (NULL_RTX
, code
, shift_mode
,
10550 XEXP (varop
, 1), count
);
10552 varop
= simplify_gen_binary (GET_CODE (varop
), shift_mode
,
10554 varop
= apply_distributive_law (varop
);
10562 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10563 says that the sign bit can be tested, FOO has mode MODE, C is
10564 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10565 that may be nonzero. */
10566 if (code
== LSHIFTRT
10567 && XEXP (varop
, 1) == const0_rtx
10568 && GET_MODE (XEXP (varop
, 0)) == result_mode
10569 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10570 && HWI_COMPUTABLE_MODE_P (result_mode
)
10571 && STORE_FLAG_VALUE
== -1
10572 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10573 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10576 varop
= XEXP (varop
, 0);
10583 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10584 than the number of bits in the mode is equivalent to A. */
10585 if (code
== LSHIFTRT
10586 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10587 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1)
10589 varop
= XEXP (varop
, 0);
10594 /* NEG commutes with ASHIFT since it is multiplication. Move the
10595 NEG outside to allow shifts to combine. */
10597 && merge_outer_ops (&outer_op
, &outer_const
, NEG
, 0, result_mode
,
10600 varop
= XEXP (varop
, 0);
10606 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10607 is one less than the number of bits in the mode is
10608 equivalent to (xor A 1). */
10609 if (code
== LSHIFTRT
10610 && count
== (GET_MODE_PRECISION (result_mode
) - 1)
10611 && XEXP (varop
, 1) == constm1_rtx
10612 && nonzero_bits (XEXP (varop
, 0), result_mode
) == 1
10613 && merge_outer_ops (&outer_op
, &outer_const
, XOR
, 1, result_mode
,
10617 varop
= XEXP (varop
, 0);
10621 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10622 that might be nonzero in BAR are those being shifted out and those
10623 bits are known zero in FOO, we can replace the PLUS with FOO.
10624 Similarly in the other operand order. This code occurs when
10625 we are computing the size of a variable-size array. */
10627 if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10628 && count
< HOST_BITS_PER_WIDE_INT
10629 && nonzero_bits (XEXP (varop
, 1), result_mode
) >> count
== 0
10630 && (nonzero_bits (XEXP (varop
, 1), result_mode
)
10631 & nonzero_bits (XEXP (varop
, 0), result_mode
)) == 0)
10633 varop
= XEXP (varop
, 0);
10636 else if ((code
== ASHIFTRT
|| code
== LSHIFTRT
)
10637 && count
< HOST_BITS_PER_WIDE_INT
10638 && HWI_COMPUTABLE_MODE_P (result_mode
)
10639 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10641 && 0 == (nonzero_bits (XEXP (varop
, 0), result_mode
)
10642 & nonzero_bits (XEXP (varop
, 1),
10645 varop
= XEXP (varop
, 1);
10649 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10651 && CONST_INT_P (XEXP (varop
, 1))
10652 && (new_rtx
= simplify_const_binary_operation
10653 (ASHIFT
, result_mode
,
10654 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10655 GEN_INT (count
))) != 0
10656 && CONST_INT_P (new_rtx
)
10657 && merge_outer_ops (&outer_op
, &outer_const
, PLUS
,
10658 INTVAL (new_rtx
), result_mode
, &complement_p
))
10660 varop
= XEXP (varop
, 0);
10664 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10665 signbit', and attempt to change the PLUS to an XOR and move it to
10666 the outer operation as is done above in the AND/IOR/XOR case
10667 leg for shift(logical). See details in logical handling above
10668 for reasoning in doing so. */
10669 if (code
== LSHIFTRT
10670 && CONST_INT_P (XEXP (varop
, 1))
10671 && mode_signbit_p (result_mode
, XEXP (varop
, 1))
10672 && (new_rtx
= simplify_const_binary_operation
10673 (code
, result_mode
,
10674 gen_int_mode (INTVAL (XEXP (varop
, 1)), result_mode
),
10675 GEN_INT (count
))) != 0
10676 && CONST_INT_P (new_rtx
)
10677 && merge_outer_ops (&outer_op
, &outer_const
, XOR
,
10678 INTVAL (new_rtx
), result_mode
, &complement_p
))
10680 varop
= XEXP (varop
, 0);
10687 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10688 with C the size of VAROP - 1 and the shift is logical if
10689 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10690 we have a (gt X 0) operation. If the shift is arithmetic with
10691 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10692 we have a (neg (gt X 0)) operation. */
10694 if ((STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
10695 && GET_CODE (XEXP (varop
, 0)) == ASHIFTRT
10696 && count
== (GET_MODE_PRECISION (GET_MODE (varop
)) - 1)
10697 && (code
== LSHIFTRT
|| code
== ASHIFTRT
)
10698 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10699 && INTVAL (XEXP (XEXP (varop
, 0), 1)) == count
10700 && rtx_equal_p (XEXP (XEXP (varop
, 0), 0), XEXP (varop
, 1)))
10703 varop
= gen_rtx_GT (GET_MODE (varop
), XEXP (varop
, 1),
10706 if (STORE_FLAG_VALUE
== 1 ? code
== ASHIFTRT
: code
== LSHIFTRT
)
10707 varop
= gen_rtx_NEG (GET_MODE (varop
), varop
);
10714 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10715 if the truncate does not affect the value. */
10716 if (code
== LSHIFTRT
10717 && GET_CODE (XEXP (varop
, 0)) == LSHIFTRT
10718 && CONST_INT_P (XEXP (XEXP (varop
, 0), 1))
10719 && (INTVAL (XEXP (XEXP (varop
, 0), 1))
10720 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop
, 0)))
10721 - GET_MODE_PRECISION (GET_MODE (varop
)))))
10723 rtx varop_inner
= XEXP (varop
, 0);
10726 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner
),
10727 XEXP (varop_inner
, 0),
10729 (count
+ INTVAL (XEXP (varop_inner
, 1))));
10730 varop
= gen_rtx_TRUNCATE (GET_MODE (varop
), varop_inner
);
10743 shift_mode
= try_widen_shift_mode (code
, varop
, count
, result_mode
, mode
,
10744 outer_op
, outer_const
);
10746 /* We have now finished analyzing the shift. The result should be
10747 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10748 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10749 to the result of the shift. OUTER_CONST is the relevant constant,
10750 but we must turn off all bits turned off in the shift. */
10752 if (outer_op
== UNKNOWN
10753 && orig_code
== code
&& orig_count
== count
10754 && varop
== orig_varop
10755 && shift_mode
== GET_MODE (varop
))
10758 /* Make a SUBREG if necessary. If we can't make it, fail. */
10759 varop
= gen_lowpart (shift_mode
, varop
);
10760 if (varop
== NULL_RTX
|| GET_CODE (varop
) == CLOBBER
)
10763 /* If we have an outer operation and we just made a shift, it is
10764 possible that we could have simplified the shift were it not
10765 for the outer operation. So try to do the simplification
10768 if (outer_op
!= UNKNOWN
)
10769 x
= simplify_shift_const_1 (code
, shift_mode
, varop
, count
);
10774 x
= simplify_gen_binary (code
, shift_mode
, varop
, GEN_INT (count
));
10776 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10777 turn off all the bits that the shift would have turned off. */
10778 if (orig_code
== LSHIFTRT
&& result_mode
!= shift_mode
)
10779 x
= simplify_and_const_int (NULL_RTX
, shift_mode
, x
,
10780 GET_MODE_MASK (result_mode
) >> orig_count
);
10782 /* Do the remainder of the processing in RESULT_MODE. */
10783 x
= gen_lowpart_or_truncate (result_mode
, x
);
10785 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10788 x
= simplify_gen_unary (NOT
, result_mode
, x
, result_mode
);
10790 if (outer_op
!= UNKNOWN
)
10792 if (GET_RTX_CLASS (outer_op
) != RTX_UNARY
10793 && GET_MODE_PRECISION (result_mode
) < HOST_BITS_PER_WIDE_INT
)
10794 outer_const
= trunc_int_for_mode (outer_const
, result_mode
);
10796 if (outer_op
== AND
)
10797 x
= simplify_and_const_int (NULL_RTX
, result_mode
, x
, outer_const
);
10798 else if (outer_op
== SET
)
10800 /* This means that we have determined that the result is
10801 equivalent to a constant. This should be rare. */
10802 if (!side_effects_p (x
))
10803 x
= GEN_INT (outer_const
);
10805 else if (GET_RTX_CLASS (outer_op
) == RTX_UNARY
)
10806 x
= simplify_gen_unary (outer_op
, result_mode
, x
, result_mode
);
10808 x
= simplify_gen_binary (outer_op
, result_mode
, x
,
10809 GEN_INT (outer_const
));
10815 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10816 The result of the shift is RESULT_MODE. If we cannot simplify it,
10817 return X or, if it is NULL, synthesize the expression with
10818 simplify_gen_binary. Otherwise, return a simplified value.
10820 The shift is normally computed in the widest mode we find in VAROP, as
10821 long as it isn't a different number of words than RESULT_MODE. Exceptions
10822 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10825 simplify_shift_const (rtx x
, enum rtx_code code
, machine_mode result_mode
,
10826 rtx varop
, int count
)
10828 rtx tem
= simplify_shift_const_1 (code
, result_mode
, varop
, count
);
10833 x
= simplify_gen_binary (code
, GET_MODE (varop
), varop
, GEN_INT (count
));
10834 if (GET_MODE (x
) != result_mode
)
10835 x
= gen_lowpart (result_mode
, x
);
10840 /* Like recog, but we receive the address of a pointer to a new pattern.
10841 We try to match the rtx that the pointer points to.
10842 If that fails, we may try to modify or replace the pattern,
10843 storing the replacement into the same pointer object.
10845 Modifications include deletion or addition of CLOBBERs.
10847 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10848 the CLOBBERs are placed.
10850 The value is the final insn code from the pattern ultimately matched,
10854 recog_for_combine (rtx
*pnewpat
, rtx_insn
*insn
, rtx
*pnotes
)
10856 rtx pat
= *pnewpat
;
10857 rtx pat_without_clobbers
;
10858 int insn_code_number
;
10859 int num_clobbers_to_add
= 0;
10861 rtx notes
= NULL_RTX
;
10862 rtx old_notes
, old_pat
;
10865 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10866 we use to indicate that something didn't match. If we find such a
10867 thing, force rejection. */
10868 if (GET_CODE (pat
) == PARALLEL
)
10869 for (i
= XVECLEN (pat
, 0) - 1; i
>= 0; i
--)
10870 if (GET_CODE (XVECEXP (pat
, 0, i
)) == CLOBBER
10871 && XEXP (XVECEXP (pat
, 0, i
), 0) == const0_rtx
)
10874 old_pat
= PATTERN (insn
);
10875 old_notes
= REG_NOTES (insn
);
10876 PATTERN (insn
) = pat
;
10877 REG_NOTES (insn
) = NULL_RTX
;
10879 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10880 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10882 if (insn_code_number
< 0)
10883 fputs ("Failed to match this instruction:\n", dump_file
);
10885 fputs ("Successfully matched this instruction:\n", dump_file
);
10886 print_rtl_single (dump_file
, pat
);
10889 /* If it isn't, there is the possibility that we previously had an insn
10890 that clobbered some register as a side effect, but the combined
10891 insn doesn't need to do that. So try once more without the clobbers
10892 unless this represents an ASM insn. */
10894 if (insn_code_number
< 0 && ! check_asm_operands (pat
)
10895 && GET_CODE (pat
) == PARALLEL
)
10899 for (pos
= 0, i
= 0; i
< XVECLEN (pat
, 0); i
++)
10900 if (GET_CODE (XVECEXP (pat
, 0, i
)) != CLOBBER
)
10903 SUBST (XVECEXP (pat
, 0, pos
), XVECEXP (pat
, 0, i
));
10907 SUBST_INT (XVECLEN (pat
, 0), pos
);
10910 pat
= XVECEXP (pat
, 0, 0);
10912 PATTERN (insn
) = pat
;
10913 insn_code_number
= recog (pat
, insn
, &num_clobbers_to_add
);
10914 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10916 if (insn_code_number
< 0)
10917 fputs ("Failed to match this instruction:\n", dump_file
);
10919 fputs ("Successfully matched this instruction:\n", dump_file
);
10920 print_rtl_single (dump_file
, pat
);
10924 pat_without_clobbers
= pat
;
10926 PATTERN (insn
) = old_pat
;
10927 REG_NOTES (insn
) = old_notes
;
10929 /* Recognize all noop sets, these will be killed by followup pass. */
10930 if (insn_code_number
< 0 && GET_CODE (pat
) == SET
&& set_noop_p (pat
))
10931 insn_code_number
= NOOP_MOVE_INSN_CODE
, num_clobbers_to_add
= 0;
10933 /* If we had any clobbers to add, make a new pattern than contains
10934 them. Then check to make sure that all of them are dead. */
10935 if (num_clobbers_to_add
)
10937 rtx newpat
= gen_rtx_PARALLEL (VOIDmode
,
10938 rtvec_alloc (GET_CODE (pat
) == PARALLEL
10939 ? (XVECLEN (pat
, 0)
10940 + num_clobbers_to_add
)
10941 : num_clobbers_to_add
+ 1));
10943 if (GET_CODE (pat
) == PARALLEL
)
10944 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
10945 XVECEXP (newpat
, 0, i
) = XVECEXP (pat
, 0, i
);
10947 XVECEXP (newpat
, 0, 0) = pat
;
10949 add_clobbers (newpat
, insn_code_number
);
10951 for (i
= XVECLEN (newpat
, 0) - num_clobbers_to_add
;
10952 i
< XVECLEN (newpat
, 0); i
++)
10954 if (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0))
10955 && ! reg_dead_at_p (XEXP (XVECEXP (newpat
, 0, i
), 0), insn
))
10957 if (GET_CODE (XEXP (XVECEXP (newpat
, 0, i
), 0)) != SCRATCH
)
10959 gcc_assert (REG_P (XEXP (XVECEXP (newpat
, 0, i
), 0)));
10960 notes
= alloc_reg_note (REG_UNUSED
,
10961 XEXP (XVECEXP (newpat
, 0, i
), 0), notes
);
10967 if (insn_code_number
>= 0
10968 && insn_code_number
!= NOOP_MOVE_INSN_CODE
)
10970 old_pat
= PATTERN (insn
);
10971 old_notes
= REG_NOTES (insn
);
10972 old_icode
= INSN_CODE (insn
);
10973 PATTERN (insn
) = pat
;
10974 REG_NOTES (insn
) = notes
;
10976 /* Allow targets to reject combined insn. */
10977 if (!targetm
.legitimate_combined_insn (insn
))
10979 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
10980 fputs ("Instruction not appropriate for target.",
10983 /* Callers expect recog_for_combine to strip
10984 clobbers from the pattern on failure. */
10985 pat
= pat_without_clobbers
;
10988 insn_code_number
= -1;
10991 PATTERN (insn
) = old_pat
;
10992 REG_NOTES (insn
) = old_notes
;
10993 INSN_CODE (insn
) = old_icode
;
10999 return insn_code_number
;
11002 /* Like gen_lowpart_general but for use by combine. In combine it
11003 is not possible to create any new pseudoregs. However, it is
11004 safe to create invalid memory addresses, because combine will
11005 try to recognize them and all they will do is make the combine
11008 If for some reason this cannot do its job, an rtx
11009 (clobber (const_int 0)) is returned.
11010 An insn containing that will not be recognized. */
11013 gen_lowpart_for_combine (machine_mode omode
, rtx x
)
11015 machine_mode imode
= GET_MODE (x
);
11016 unsigned int osize
= GET_MODE_SIZE (omode
);
11017 unsigned int isize
= GET_MODE_SIZE (imode
);
11020 if (omode
== imode
)
11023 /* We can only support MODE being wider than a word if X is a
11024 constant integer or has a mode the same size. */
11025 if (GET_MODE_SIZE (omode
) > UNITS_PER_WORD
11026 && ! (CONST_SCALAR_INT_P (x
) || isize
== osize
))
11029 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11030 won't know what to do. So we will strip off the SUBREG here and
11031 process normally. */
11032 if (GET_CODE (x
) == SUBREG
&& MEM_P (SUBREG_REG (x
)))
11034 x
= SUBREG_REG (x
);
11036 /* For use in case we fall down into the address adjustments
11037 further below, we need to adjust the known mode and size of
11038 x; imode and isize, since we just adjusted x. */
11039 imode
= GET_MODE (x
);
11041 if (imode
== omode
)
11044 isize
= GET_MODE_SIZE (imode
);
11047 result
= gen_lowpart_common (omode
, x
);
11056 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11058 if (MEM_VOLATILE_P (x
)
11059 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
)))
11062 /* If we want to refer to something bigger than the original memref,
11063 generate a paradoxical subreg instead. That will force a reload
11064 of the original memref X. */
11066 return gen_rtx_SUBREG (omode
, x
, 0);
11068 if (WORDS_BIG_ENDIAN
)
11069 offset
= MAX (isize
, UNITS_PER_WORD
) - MAX (osize
, UNITS_PER_WORD
);
11071 /* Adjust the address so that the address-after-the-data is
11073 if (BYTES_BIG_ENDIAN
)
11074 offset
-= MIN (UNITS_PER_WORD
, osize
) - MIN (UNITS_PER_WORD
, isize
);
11076 return adjust_address_nv (x
, omode
, offset
);
11079 /* If X is a comparison operator, rewrite it in a new mode. This
11080 probably won't match, but may allow further simplifications. */
11081 else if (COMPARISON_P (x
))
11082 return gen_rtx_fmt_ee (GET_CODE (x
), omode
, XEXP (x
, 0), XEXP (x
, 1));
11084 /* If we couldn't simplify X any other way, just enclose it in a
11085 SUBREG. Normally, this SUBREG won't match, but some patterns may
11086 include an explicit SUBREG or we may simplify it further in combine. */
11092 offset
= subreg_lowpart_offset (omode
, imode
);
11093 if (imode
== VOIDmode
)
11095 imode
= int_mode_for_mode (omode
);
11096 x
= gen_lowpart_common (imode
, x
);
11100 res
= simplify_gen_subreg (omode
, x
, imode
, offset
);
11106 return gen_rtx_CLOBBER (omode
, const0_rtx
);
11109 /* Try to simplify a comparison between OP0 and a constant OP1,
11110 where CODE is the comparison code that will be tested, into a
11111 (CODE OP0 const0_rtx) form.
11113 The result is a possibly different comparison code to use.
11114 *POP1 may be updated. */
11116 static enum rtx_code
11117 simplify_compare_const (enum rtx_code code
, machine_mode mode
,
11118 rtx op0
, rtx
*pop1
)
11120 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11121 HOST_WIDE_INT const_op
= INTVAL (*pop1
);
11123 /* Get the constant we are comparing against and turn off all bits
11124 not on in our mode. */
11125 if (mode
!= VOIDmode
)
11126 const_op
= trunc_int_for_mode (const_op
, mode
);
11128 /* If we are comparing against a constant power of two and the value
11129 being compared can only have that single bit nonzero (e.g., it was
11130 `and'ed with that bit), we can replace this with a comparison
11133 && (code
== EQ
|| code
== NE
|| code
== GE
|| code
== GEU
11134 || code
== LT
|| code
== LTU
)
11135 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11136 && exact_log2 (const_op
& GET_MODE_MASK (mode
)) >= 0
11137 && (nonzero_bits (op0
, mode
)
11138 == (unsigned HOST_WIDE_INT
) (const_op
& GET_MODE_MASK (mode
))))
11140 code
= (code
== EQ
|| code
== GE
|| code
== GEU
? NE
: EQ
);
11144 /* Similarly, if we are comparing a value known to be either -1 or
11145 0 with -1, change it to the opposite comparison against zero. */
11147 && (code
== EQ
|| code
== NE
|| code
== GT
|| code
== LE
11148 || code
== GEU
|| code
== LTU
)
11149 && num_sign_bit_copies (op0
, mode
) == mode_width
)
11151 code
= (code
== EQ
|| code
== LE
|| code
== GEU
? NE
: EQ
);
11155 /* Do some canonicalizations based on the comparison code. We prefer
11156 comparisons against zero and then prefer equality comparisons.
11157 If we can reduce the size of a constant, we will do that too. */
11161 /* < C is equivalent to <= (C - 1) */
11166 /* ... fall through to LE case below. */
11172 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11179 /* If we are doing a <= 0 comparison on a value known to have
11180 a zero sign bit, we can replace this with == 0. */
11181 else if (const_op
== 0
11182 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11183 && (nonzero_bits (op0
, mode
)
11184 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11190 /* >= C is equivalent to > (C - 1). */
11195 /* ... fall through to GT below. */
11201 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11208 /* If we are doing a > 0 comparison on a value known to have
11209 a zero sign bit, we can replace this with != 0. */
11210 else if (const_op
== 0
11211 && mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11212 && (nonzero_bits (op0
, mode
)
11213 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11219 /* < C is equivalent to <= (C - 1). */
11224 /* ... fall through ... */
11226 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11227 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11228 && (unsigned HOST_WIDE_INT
) const_op
11229 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11239 /* unsigned <= 0 is equivalent to == 0 */
11242 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11243 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11244 && (unsigned HOST_WIDE_INT
) const_op
11245 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11253 /* >= C is equivalent to > (C - 1). */
11258 /* ... fall through ... */
11261 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11262 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11263 && (unsigned HOST_WIDE_INT
) const_op
11264 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1))
11274 /* unsigned > 0 is equivalent to != 0 */
11277 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11278 else if (mode_width
- 1 < HOST_BITS_PER_WIDE_INT
11279 && (unsigned HOST_WIDE_INT
) const_op
11280 == ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)) - 1)
11291 *pop1
= GEN_INT (const_op
);
11295 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11296 comparison code that will be tested.
11298 The result is a possibly different comparison code to use. *POP0 and
11299 *POP1 may be updated.
11301 It is possible that we might detect that a comparison is either always
11302 true or always false. However, we do not perform general constant
11303 folding in combine, so this knowledge isn't useful. Such tautologies
11304 should have been detected earlier. Hence we ignore all such cases. */
11306 static enum rtx_code
11307 simplify_comparison (enum rtx_code code
, rtx
*pop0
, rtx
*pop1
)
11313 machine_mode mode
, tmode
;
11315 /* Try a few ways of applying the same transformation to both operands. */
11318 #ifndef WORD_REGISTER_OPERATIONS
11319 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11320 so check specially. */
11321 if (code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
11322 && GET_CODE (op0
) == ASHIFTRT
&& GET_CODE (op1
) == ASHIFTRT
11323 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11324 && GET_CODE (XEXP (op1
, 0)) == ASHIFT
11325 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == SUBREG
11326 && GET_CODE (XEXP (XEXP (op1
, 0), 0)) == SUBREG
11327 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0)))
11328 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1
, 0), 0))))
11329 && CONST_INT_P (XEXP (op0
, 1))
11330 && XEXP (op0
, 1) == XEXP (op1
, 1)
11331 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
11332 && XEXP (op0
, 1) == XEXP (XEXP (op1
, 0), 1)
11333 && (INTVAL (XEXP (op0
, 1))
11334 == (GET_MODE_PRECISION (GET_MODE (op0
))
11335 - (GET_MODE_PRECISION
11336 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0
, 0), 0))))))))
11338 op0
= SUBREG_REG (XEXP (XEXP (op0
, 0), 0));
11339 op1
= SUBREG_REG (XEXP (XEXP (op1
, 0), 0));
11343 /* If both operands are the same constant shift, see if we can ignore the
11344 shift. We can if the shift is a rotate or if the bits shifted out of
11345 this shift are known to be zero for both inputs and if the type of
11346 comparison is compatible with the shift. */
11347 if (GET_CODE (op0
) == GET_CODE (op1
)
11348 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0
))
11349 && ((GET_CODE (op0
) == ROTATE
&& (code
== NE
|| code
== EQ
))
11350 || ((GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFT
)
11351 && (code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
))
11352 || (GET_CODE (op0
) == ASHIFTRT
11353 && (code
!= GTU
&& code
!= LTU
11354 && code
!= GEU
&& code
!= LEU
)))
11355 && CONST_INT_P (XEXP (op0
, 1))
11356 && INTVAL (XEXP (op0
, 1)) >= 0
11357 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
11358 && XEXP (op0
, 1) == XEXP (op1
, 1))
11360 machine_mode mode
= GET_MODE (op0
);
11361 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11362 int shift_count
= INTVAL (XEXP (op0
, 1));
11364 if (GET_CODE (op0
) == LSHIFTRT
|| GET_CODE (op0
) == ASHIFTRT
)
11365 mask
&= (mask
>> shift_count
) << shift_count
;
11366 else if (GET_CODE (op0
) == ASHIFT
)
11367 mask
= (mask
& (mask
<< shift_count
)) >> shift_count
;
11369 if ((nonzero_bits (XEXP (op0
, 0), mode
) & ~mask
) == 0
11370 && (nonzero_bits (XEXP (op1
, 0), mode
) & ~mask
) == 0)
11371 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0);
11376 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11377 SUBREGs are of the same mode, and, in both cases, the AND would
11378 be redundant if the comparison was done in the narrower mode,
11379 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11380 and the operand's possibly nonzero bits are 0xffffff01; in that case
11381 if we only care about QImode, we don't need the AND). This case
11382 occurs if the output mode of an scc insn is not SImode and
11383 STORE_FLAG_VALUE == 1 (e.g., the 386).
11385 Similarly, check for a case where the AND's are ZERO_EXTEND
11386 operations from some narrower mode even though a SUBREG is not
11389 else if (GET_CODE (op0
) == AND
&& GET_CODE (op1
) == AND
11390 && CONST_INT_P (XEXP (op0
, 1))
11391 && CONST_INT_P (XEXP (op1
, 1)))
11393 rtx inner_op0
= XEXP (op0
, 0);
11394 rtx inner_op1
= XEXP (op1
, 0);
11395 HOST_WIDE_INT c0
= INTVAL (XEXP (op0
, 1));
11396 HOST_WIDE_INT c1
= INTVAL (XEXP (op1
, 1));
11399 if (paradoxical_subreg_p (inner_op0
)
11400 && GET_CODE (inner_op1
) == SUBREG
11401 && (GET_MODE (SUBREG_REG (inner_op0
))
11402 == GET_MODE (SUBREG_REG (inner_op1
)))
11403 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0
)))
11404 <= HOST_BITS_PER_WIDE_INT
)
11405 && (0 == ((~c0
) & nonzero_bits (SUBREG_REG (inner_op0
),
11406 GET_MODE (SUBREG_REG (inner_op0
)))))
11407 && (0 == ((~c1
) & nonzero_bits (SUBREG_REG (inner_op1
),
11408 GET_MODE (SUBREG_REG (inner_op1
))))))
11410 op0
= SUBREG_REG (inner_op0
);
11411 op1
= SUBREG_REG (inner_op1
);
11413 /* The resulting comparison is always unsigned since we masked
11414 off the original sign bit. */
11415 code
= unsigned_condition (code
);
11421 for (tmode
= GET_CLASS_NARROWEST_MODE
11422 (GET_MODE_CLASS (GET_MODE (op0
)));
11423 tmode
!= GET_MODE (op0
); tmode
= GET_MODE_WIDER_MODE (tmode
))
11424 if ((unsigned HOST_WIDE_INT
) c0
== GET_MODE_MASK (tmode
))
11426 op0
= gen_lowpart (tmode
, inner_op0
);
11427 op1
= gen_lowpart (tmode
, inner_op1
);
11428 code
= unsigned_condition (code
);
11437 /* If both operands are NOT, we can strip off the outer operation
11438 and adjust the comparison code for swapped operands; similarly for
11439 NEG, except that this must be an equality comparison. */
11440 else if ((GET_CODE (op0
) == NOT
&& GET_CODE (op1
) == NOT
)
11441 || (GET_CODE (op0
) == NEG
&& GET_CODE (op1
) == NEG
11442 && (code
== EQ
|| code
== NE
)))
11443 op0
= XEXP (op0
, 0), op1
= XEXP (op1
, 0), code
= swap_condition (code
);
11449 /* If the first operand is a constant, swap the operands and adjust the
11450 comparison code appropriately, but don't do this if the second operand
11451 is already a constant integer. */
11452 if (swap_commutative_operands_p (op0
, op1
))
11454 tem
= op0
, op0
= op1
, op1
= tem
;
11455 code
= swap_condition (code
);
11458 /* We now enter a loop during which we will try to simplify the comparison.
11459 For the most part, we only are concerned with comparisons with zero,
11460 but some things may really be comparisons with zero but not start
11461 out looking that way. */
11463 while (CONST_INT_P (op1
))
11465 machine_mode mode
= GET_MODE (op0
);
11466 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
11467 unsigned HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
11468 int equality_comparison_p
;
11469 int sign_bit_comparison_p
;
11470 int unsigned_comparison_p
;
11471 HOST_WIDE_INT const_op
;
11473 /* We only want to handle integral modes. This catches VOIDmode,
11474 CCmode, and the floating-point modes. An exception is that we
11475 can handle VOIDmode if OP0 is a COMPARE or a comparison
11478 if (GET_MODE_CLASS (mode
) != MODE_INT
11479 && ! (mode
== VOIDmode
11480 && (GET_CODE (op0
) == COMPARE
|| COMPARISON_P (op0
))))
11483 /* Try to simplify the compare to constant, possibly changing the
11484 comparison op, and/or changing op1 to zero. */
11485 code
= simplify_compare_const (code
, mode
, op0
, &op1
);
11486 const_op
= INTVAL (op1
);
11488 /* Compute some predicates to simplify code below. */
11490 equality_comparison_p
= (code
== EQ
|| code
== NE
);
11491 sign_bit_comparison_p
= ((code
== LT
|| code
== GE
) && const_op
== 0);
11492 unsigned_comparison_p
= (code
== LTU
|| code
== LEU
|| code
== GTU
11495 /* If this is a sign bit comparison and we can do arithmetic in
11496 MODE, say that we will only be needing the sign bit of OP0. */
11497 if (sign_bit_comparison_p
&& HWI_COMPUTABLE_MODE_P (mode
))
11498 op0
= force_to_mode (op0
, mode
,
11499 (unsigned HOST_WIDE_INT
) 1
11500 << (GET_MODE_PRECISION (mode
) - 1),
11503 /* Now try cases based on the opcode of OP0. If none of the cases
11504 does a "continue", we exit this loop immediately after the
11507 switch (GET_CODE (op0
))
11510 /* If we are extracting a single bit from a variable position in
11511 a constant that has only a single bit set and are comparing it
11512 with zero, we can convert this into an equality comparison
11513 between the position and the location of the single bit. */
11514 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11515 have already reduced the shift count modulo the word size. */
11516 if (!SHIFT_COUNT_TRUNCATED
11517 && CONST_INT_P (XEXP (op0
, 0))
11518 && XEXP (op0
, 1) == const1_rtx
11519 && equality_comparison_p
&& const_op
== 0
11520 && (i
= exact_log2 (UINTVAL (XEXP (op0
, 0)))) >= 0)
11522 if (BITS_BIG_ENDIAN
)
11523 i
= BITS_PER_WORD
- 1 - i
;
11525 op0
= XEXP (op0
, 2);
11529 /* Result is nonzero iff shift count is equal to I. */
11530 code
= reverse_condition (code
);
11534 /* ... fall through ... */
11537 tem
= expand_compound_operation (op0
);
11546 /* If testing for equality, we can take the NOT of the constant. */
11547 if (equality_comparison_p
11548 && (tem
= simplify_unary_operation (NOT
, mode
, op1
, mode
)) != 0)
11550 op0
= XEXP (op0
, 0);
11555 /* If just looking at the sign bit, reverse the sense of the
11557 if (sign_bit_comparison_p
)
11559 op0
= XEXP (op0
, 0);
11560 code
= (code
== GE
? LT
: GE
);
11566 /* If testing for equality, we can take the NEG of the constant. */
11567 if (equality_comparison_p
11568 && (tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
)) != 0)
11570 op0
= XEXP (op0
, 0);
11575 /* The remaining cases only apply to comparisons with zero. */
11579 /* When X is ABS or is known positive,
11580 (neg X) is < 0 if and only if X != 0. */
11582 if (sign_bit_comparison_p
11583 && (GET_CODE (XEXP (op0
, 0)) == ABS
11584 || (mode_width
<= HOST_BITS_PER_WIDE_INT
11585 && (nonzero_bits (XEXP (op0
, 0), mode
)
11586 & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11589 op0
= XEXP (op0
, 0);
11590 code
= (code
== LT
? NE
: EQ
);
11594 /* If we have NEG of something whose two high-order bits are the
11595 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11596 if (num_sign_bit_copies (op0
, mode
) >= 2)
11598 op0
= XEXP (op0
, 0);
11599 code
= swap_condition (code
);
11605 /* If we are testing equality and our count is a constant, we
11606 can perform the inverse operation on our RHS. */
11607 if (equality_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
11608 && (tem
= simplify_binary_operation (ROTATERT
, mode
,
11609 op1
, XEXP (op0
, 1))) != 0)
11611 op0
= XEXP (op0
, 0);
11616 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11617 a particular bit. Convert it to an AND of a constant of that
11618 bit. This will be converted into a ZERO_EXTRACT. */
11619 if (const_op
== 0 && sign_bit_comparison_p
11620 && CONST_INT_P (XEXP (op0
, 1))
11621 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
11623 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
11624 ((unsigned HOST_WIDE_INT
) 1
11626 - INTVAL (XEXP (op0
, 1)))));
11627 code
= (code
== LT
? NE
: EQ
);
11631 /* Fall through. */
11634 /* ABS is ignorable inside an equality comparison with zero. */
11635 if (const_op
== 0 && equality_comparison_p
)
11637 op0
= XEXP (op0
, 0);
11643 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11644 (compare FOO CONST) if CONST fits in FOO's mode and we
11645 are either testing inequality or have an unsigned
11646 comparison with ZERO_EXTEND or a signed comparison with
11647 SIGN_EXTEND. But don't do it if we don't have a compare
11648 insn of the given mode, since we'd have to revert it
11649 later on, and then we wouldn't know whether to sign- or
11651 mode
= GET_MODE (XEXP (op0
, 0));
11652 if (GET_MODE_CLASS (mode
) == MODE_INT
11653 && ! unsigned_comparison_p
11654 && HWI_COMPUTABLE_MODE_P (mode
)
11655 && trunc_int_for_mode (const_op
, mode
) == const_op
11656 && have_insn_for (COMPARE
, mode
))
11658 op0
= XEXP (op0
, 0);
11664 /* Check for the case where we are comparing A - C1 with C2, that is
11666 (subreg:MODE (plus (A) (-C1))) op (C2)
11668 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11669 comparison in the wider mode. One of the following two conditions
11670 must be true in order for this to be valid:
11672 1. The mode extension results in the same bit pattern being added
11673 on both sides and the comparison is equality or unsigned. As
11674 C2 has been truncated to fit in MODE, the pattern can only be
11677 2. The mode extension results in the sign bit being copied on
11680 The difficulty here is that we have predicates for A but not for
11681 (A - C1) so we need to check that C1 is within proper bounds so
11682 as to perturbate A as little as possible. */
11684 if (mode_width
<= HOST_BITS_PER_WIDE_INT
11685 && subreg_lowpart_p (op0
)
11686 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) > mode_width
11687 && GET_CODE (SUBREG_REG (op0
)) == PLUS
11688 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1)))
11690 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
11691 rtx a
= XEXP (SUBREG_REG (op0
), 0);
11692 HOST_WIDE_INT c1
= -INTVAL (XEXP (SUBREG_REG (op0
), 1));
11695 && (unsigned HOST_WIDE_INT
) c1
11696 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)
11697 && (equality_comparison_p
|| unsigned_comparison_p
)
11698 /* (A - C1) zero-extends if it is positive and sign-extends
11699 if it is negative, C2 both zero- and sign-extends. */
11700 && ((0 == (nonzero_bits (a
, inner_mode
)
11701 & ~GET_MODE_MASK (mode
))
11703 /* (A - C1) sign-extends if it is positive and 1-extends
11704 if it is negative, C2 both sign- and 1-extends. */
11705 || (num_sign_bit_copies (a
, inner_mode
)
11706 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
11709 || ((unsigned HOST_WIDE_INT
) c1
11710 < (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 2)
11711 /* (A - C1) always sign-extends, like C2. */
11712 && num_sign_bit_copies (a
, inner_mode
)
11713 > (unsigned int) (GET_MODE_PRECISION (inner_mode
)
11714 - (mode_width
- 1))))
11716 op0
= SUBREG_REG (op0
);
11721 /* If the inner mode is narrower and we are extracting the low part,
11722 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11723 if (subreg_lowpart_p (op0
)
11724 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
))) < mode_width
)
11725 /* Fall through */ ;
11729 /* ... fall through ... */
11732 mode
= GET_MODE (XEXP (op0
, 0));
11733 if (GET_MODE_CLASS (mode
) == MODE_INT
11734 && (unsigned_comparison_p
|| equality_comparison_p
)
11735 && HWI_COMPUTABLE_MODE_P (mode
)
11736 && (unsigned HOST_WIDE_INT
) const_op
<= GET_MODE_MASK (mode
)
11738 && have_insn_for (COMPARE
, mode
))
11740 op0
= XEXP (op0
, 0);
11746 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11747 this for equality comparisons due to pathological cases involving
11749 if (equality_comparison_p
11750 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11751 op1
, XEXP (op0
, 1))))
11753 op0
= XEXP (op0
, 0);
11758 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11759 if (const_op
== 0 && XEXP (op0
, 1) == constm1_rtx
11760 && GET_CODE (XEXP (op0
, 0)) == ABS
&& sign_bit_comparison_p
)
11762 op0
= XEXP (XEXP (op0
, 0), 0);
11763 code
= (code
== LT
? EQ
: NE
);
11769 /* We used to optimize signed comparisons against zero, but that
11770 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11771 arrive here as equality comparisons, or (GEU, LTU) are
11772 optimized away. No need to special-case them. */
11774 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11775 (eq B (minus A C)), whichever simplifies. We can only do
11776 this for equality comparisons due to pathological cases involving
11778 if (equality_comparison_p
11779 && 0 != (tem
= simplify_binary_operation (PLUS
, mode
,
11780 XEXP (op0
, 1), op1
)))
11782 op0
= XEXP (op0
, 0);
11787 if (equality_comparison_p
11788 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
,
11789 XEXP (op0
, 0), op1
)))
11791 op0
= XEXP (op0
, 1);
11796 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11797 of bits in X minus 1, is one iff X > 0. */
11798 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == ASHIFTRT
11799 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11800 && UINTVAL (XEXP (XEXP (op0
, 0), 1)) == mode_width
- 1
11801 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
11803 op0
= XEXP (op0
, 1);
11804 code
= (code
== GE
? LE
: GT
);
11810 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11811 if C is zero or B is a constant. */
11812 if (equality_comparison_p
11813 && 0 != (tem
= simplify_binary_operation (XOR
, mode
,
11814 XEXP (op0
, 1), op1
)))
11816 op0
= XEXP (op0
, 0);
11823 case UNEQ
: case LTGT
:
11824 case LT
: case LTU
: case UNLT
: case LE
: case LEU
: case UNLE
:
11825 case GT
: case GTU
: case UNGT
: case GE
: case GEU
: case UNGE
:
11826 case UNORDERED
: case ORDERED
:
11827 /* We can't do anything if OP0 is a condition code value, rather
11828 than an actual data value. */
11830 || CC0_P (XEXP (op0
, 0))
11831 || GET_MODE_CLASS (GET_MODE (XEXP (op0
, 0))) == MODE_CC
)
11834 /* Get the two operands being compared. */
11835 if (GET_CODE (XEXP (op0
, 0)) == COMPARE
)
11836 tem
= XEXP (XEXP (op0
, 0), 0), tem1
= XEXP (XEXP (op0
, 0), 1);
11838 tem
= XEXP (op0
, 0), tem1
= XEXP (op0
, 1);
11840 /* Check for the cases where we simply want the result of the
11841 earlier test or the opposite of that result. */
11842 if (code
== NE
|| code
== EQ
11843 || (val_signbit_known_set_p (GET_MODE (op0
), STORE_FLAG_VALUE
)
11844 && (code
== LT
|| code
== GE
)))
11846 enum rtx_code new_code
;
11847 if (code
== LT
|| code
== NE
)
11848 new_code
= GET_CODE (op0
);
11850 new_code
= reversed_comparison_code (op0
, NULL
);
11852 if (new_code
!= UNKNOWN
)
11863 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11865 if (sign_bit_comparison_p
&& GET_CODE (XEXP (op0
, 0)) == PLUS
11866 && XEXP (XEXP (op0
, 0), 1) == constm1_rtx
11867 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), XEXP (op0
, 1)))
11869 op0
= XEXP (op0
, 1);
11870 code
= (code
== GE
? GT
: LE
);
11876 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11877 will be converted to a ZERO_EXTRACT later. */
11878 if (const_op
== 0 && equality_comparison_p
11879 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
11880 && XEXP (XEXP (op0
, 0), 0) == const1_rtx
)
11882 op0
= gen_rtx_LSHIFTRT (mode
, XEXP (op0
, 1),
11883 XEXP (XEXP (op0
, 0), 1));
11884 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
11888 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11889 zero and X is a comparison and C1 and C2 describe only bits set
11890 in STORE_FLAG_VALUE, we can compare with X. */
11891 if (const_op
== 0 && equality_comparison_p
11892 && mode_width
<= HOST_BITS_PER_WIDE_INT
11893 && CONST_INT_P (XEXP (op0
, 1))
11894 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
11895 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
11896 && INTVAL (XEXP (XEXP (op0
, 0), 1)) >= 0
11897 && INTVAL (XEXP (XEXP (op0
, 0), 1)) < HOST_BITS_PER_WIDE_INT
)
11899 mask
= ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
11900 << INTVAL (XEXP (XEXP (op0
, 0), 1)));
11901 if ((~STORE_FLAG_VALUE
& mask
) == 0
11902 && (COMPARISON_P (XEXP (XEXP (op0
, 0), 0))
11903 || ((tem
= get_last_value (XEXP (XEXP (op0
, 0), 0))) != 0
11904 && COMPARISON_P (tem
))))
11906 op0
= XEXP (XEXP (op0
, 0), 0);
11911 /* If we are doing an equality comparison of an AND of a bit equal
11912 to the sign bit, replace this with a LT or GE comparison of
11913 the underlying value. */
11914 if (equality_comparison_p
11916 && CONST_INT_P (XEXP (op0
, 1))
11917 && mode_width
<= HOST_BITS_PER_WIDE_INT
11918 && ((INTVAL (XEXP (op0
, 1)) & GET_MODE_MASK (mode
))
11919 == (unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
11921 op0
= XEXP (op0
, 0);
11922 code
= (code
== EQ
? GE
: LT
);
11926 /* If this AND operation is really a ZERO_EXTEND from a narrower
11927 mode, the constant fits within that mode, and this is either an
11928 equality or unsigned comparison, try to do this comparison in
11933 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11934 -> (ne:DI (reg:SI 4) (const_int 0))
11936 unless TRULY_NOOP_TRUNCATION allows it or the register is
11937 known to hold a value of the required mode the
11938 transformation is invalid. */
11939 if ((equality_comparison_p
|| unsigned_comparison_p
)
11940 && CONST_INT_P (XEXP (op0
, 1))
11941 && (i
= exact_log2 ((UINTVAL (XEXP (op0
, 1))
11942 & GET_MODE_MASK (mode
))
11944 && const_op
>> i
== 0
11945 && (tmode
= mode_for_size (i
, MODE_INT
, 1)) != BLKmode
11946 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode
, GET_MODE (op0
))
11947 || (REG_P (XEXP (op0
, 0))
11948 && reg_truncated_to_mode (tmode
, XEXP (op0
, 0)))))
11950 op0
= gen_lowpart (tmode
, XEXP (op0
, 0));
11954 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11955 fits in both M1 and M2 and the SUBREG is either paradoxical
11956 or represents the low part, permute the SUBREG and the AND
11958 if (GET_CODE (XEXP (op0
, 0)) == SUBREG
)
11960 unsigned HOST_WIDE_INT c1
;
11961 tmode
= GET_MODE (SUBREG_REG (XEXP (op0
, 0)));
11962 /* Require an integral mode, to avoid creating something like
11964 if (SCALAR_INT_MODE_P (tmode
)
11965 /* It is unsafe to commute the AND into the SUBREG if the
11966 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11967 not defined. As originally written the upper bits
11968 have a defined value due to the AND operation.
11969 However, if we commute the AND inside the SUBREG then
11970 they no longer have defined values and the meaning of
11971 the code has been changed. */
11973 #ifdef WORD_REGISTER_OPERATIONS
11974 || (mode_width
> GET_MODE_PRECISION (tmode
)
11975 && mode_width
<= BITS_PER_WORD
)
11977 || (mode_width
<= GET_MODE_PRECISION (tmode
)
11978 && subreg_lowpart_p (XEXP (op0
, 0))))
11979 && CONST_INT_P (XEXP (op0
, 1))
11980 && mode_width
<= HOST_BITS_PER_WIDE_INT
11981 && HWI_COMPUTABLE_MODE_P (tmode
)
11982 && ((c1
= INTVAL (XEXP (op0
, 1))) & ~mask
) == 0
11983 && (c1
& ~GET_MODE_MASK (tmode
)) == 0
11985 && c1
!= GET_MODE_MASK (tmode
))
11987 op0
= simplify_gen_binary (AND
, tmode
,
11988 SUBREG_REG (XEXP (op0
, 0)),
11989 gen_int_mode (c1
, tmode
));
11990 op0
= gen_lowpart (mode
, op0
);
11995 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11996 if (const_op
== 0 && equality_comparison_p
11997 && XEXP (op0
, 1) == const1_rtx
11998 && GET_CODE (XEXP (op0
, 0)) == NOT
)
12000 op0
= simplify_and_const_int (NULL_RTX
, mode
,
12001 XEXP (XEXP (op0
, 0), 0), 1);
12002 code
= (code
== NE
? EQ
: NE
);
12006 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12007 (eq (and (lshiftrt X) 1) 0).
12008 Also handle the case where (not X) is expressed using xor. */
12009 if (const_op
== 0 && equality_comparison_p
12010 && XEXP (op0
, 1) == const1_rtx
12011 && GET_CODE (XEXP (op0
, 0)) == LSHIFTRT
)
12013 rtx shift_op
= XEXP (XEXP (op0
, 0), 0);
12014 rtx shift_count
= XEXP (XEXP (op0
, 0), 1);
12016 if (GET_CODE (shift_op
) == NOT
12017 || (GET_CODE (shift_op
) == XOR
12018 && CONST_INT_P (XEXP (shift_op
, 1))
12019 && CONST_INT_P (shift_count
)
12020 && HWI_COMPUTABLE_MODE_P (mode
)
12021 && (UINTVAL (XEXP (shift_op
, 1))
12022 == (unsigned HOST_WIDE_INT
) 1
12023 << INTVAL (shift_count
))))
12026 = gen_rtx_LSHIFTRT (mode
, XEXP (shift_op
, 0), shift_count
);
12027 op0
= simplify_and_const_int (NULL_RTX
, mode
, op0
, 1);
12028 code
= (code
== NE
? EQ
: NE
);
12035 /* If we have (compare (ashift FOO N) (const_int C)) and
12036 the high order N bits of FOO (N+1 if an inequality comparison)
12037 are known to be zero, we can do this by comparing FOO with C
12038 shifted right N bits so long as the low-order N bits of C are
12040 if (CONST_INT_P (XEXP (op0
, 1))
12041 && INTVAL (XEXP (op0
, 1)) >= 0
12042 && ((INTVAL (XEXP (op0
, 1)) + ! equality_comparison_p
)
12043 < HOST_BITS_PER_WIDE_INT
)
12044 && (((unsigned HOST_WIDE_INT
) const_op
12045 & (((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1)))
12047 && mode_width
<= HOST_BITS_PER_WIDE_INT
12048 && (nonzero_bits (XEXP (op0
, 0), mode
)
12049 & ~(mask
>> (INTVAL (XEXP (op0
, 1))
12050 + ! equality_comparison_p
))) == 0)
12052 /* We must perform a logical shift, not an arithmetic one,
12053 as we want the top N bits of C to be zero. */
12054 unsigned HOST_WIDE_INT temp
= const_op
& GET_MODE_MASK (mode
);
12056 temp
>>= INTVAL (XEXP (op0
, 1));
12057 op1
= gen_int_mode (temp
, mode
);
12058 op0
= XEXP (op0
, 0);
12062 /* If we are doing a sign bit comparison, it means we are testing
12063 a particular bit. Convert it to the appropriate AND. */
12064 if (sign_bit_comparison_p
&& CONST_INT_P (XEXP (op0
, 1))
12065 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
12067 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0),
12068 ((unsigned HOST_WIDE_INT
) 1
12070 - INTVAL (XEXP (op0
, 1)))));
12071 code
= (code
== LT
? NE
: EQ
);
12075 /* If this an equality comparison with zero and we are shifting
12076 the low bit to the sign bit, we can convert this to an AND of the
12078 if (const_op
== 0 && equality_comparison_p
12079 && CONST_INT_P (XEXP (op0
, 1))
12080 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12082 op0
= simplify_and_const_int (NULL_RTX
, mode
, XEXP (op0
, 0), 1);
12088 /* If this is an equality comparison with zero, we can do this
12089 as a logical shift, which might be much simpler. */
12090 if (equality_comparison_p
&& const_op
== 0
12091 && CONST_INT_P (XEXP (op0
, 1)))
12093 op0
= simplify_shift_const (NULL_RTX
, LSHIFTRT
, mode
,
12095 INTVAL (XEXP (op0
, 1)));
12099 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12100 do the comparison in a narrower mode. */
12101 if (! unsigned_comparison_p
12102 && CONST_INT_P (XEXP (op0
, 1))
12103 && GET_CODE (XEXP (op0
, 0)) == ASHIFT
12104 && XEXP (op0
, 1) == XEXP (XEXP (op0
, 0), 1)
12105 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12106 MODE_INT
, 1)) != BLKmode
12107 && (((unsigned HOST_WIDE_INT
) const_op
12108 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12109 <= GET_MODE_MASK (tmode
)))
12111 op0
= gen_lowpart (tmode
, XEXP (XEXP (op0
, 0), 0));
12115 /* Likewise if OP0 is a PLUS of a sign extension with a
12116 constant, which is usually represented with the PLUS
12117 between the shifts. */
12118 if (! unsigned_comparison_p
12119 && CONST_INT_P (XEXP (op0
, 1))
12120 && GET_CODE (XEXP (op0
, 0)) == PLUS
12121 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
12122 && GET_CODE (XEXP (XEXP (op0
, 0), 0)) == ASHIFT
12123 && XEXP (op0
, 1) == XEXP (XEXP (XEXP (op0
, 0), 0), 1)
12124 && (tmode
= mode_for_size (mode_width
- INTVAL (XEXP (op0
, 1)),
12125 MODE_INT
, 1)) != BLKmode
12126 && (((unsigned HOST_WIDE_INT
) const_op
12127 + (GET_MODE_MASK (tmode
) >> 1) + 1)
12128 <= GET_MODE_MASK (tmode
)))
12130 rtx inner
= XEXP (XEXP (XEXP (op0
, 0), 0), 0);
12131 rtx add_const
= XEXP (XEXP (op0
, 0), 1);
12132 rtx new_const
= simplify_gen_binary (ASHIFTRT
, GET_MODE (op0
),
12133 add_const
, XEXP (op0
, 1));
12135 op0
= simplify_gen_binary (PLUS
, tmode
,
12136 gen_lowpart (tmode
, inner
),
12141 /* ... fall through ... */
12143 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12144 the low order N bits of FOO are known to be zero, we can do this
12145 by comparing FOO with C shifted left N bits so long as no
12146 overflow occurs. Even if the low order N bits of FOO aren't known
12147 to be zero, if the comparison is >= or < we can use the same
12148 optimization and for > or <= by setting all the low
12149 order N bits in the comparison constant. */
12150 if (CONST_INT_P (XEXP (op0
, 1))
12151 && INTVAL (XEXP (op0
, 1)) > 0
12152 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
12153 && mode_width
<= HOST_BITS_PER_WIDE_INT
12154 && (((unsigned HOST_WIDE_INT
) const_op
12155 + (GET_CODE (op0
) != LSHIFTRT
12156 ? ((GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1)) >> 1)
12159 <= GET_MODE_MASK (mode
) >> INTVAL (XEXP (op0
, 1))))
12161 unsigned HOST_WIDE_INT low_bits
12162 = (nonzero_bits (XEXP (op0
, 0), mode
)
12163 & (((unsigned HOST_WIDE_INT
) 1
12164 << INTVAL (XEXP (op0
, 1))) - 1));
12165 if (low_bits
== 0 || !equality_comparison_p
)
12167 /* If the shift was logical, then we must make the condition
12169 if (GET_CODE (op0
) == LSHIFTRT
)
12170 code
= unsigned_condition (code
);
12172 const_op
<<= INTVAL (XEXP (op0
, 1));
12174 && (code
== GT
|| code
== GTU
12175 || code
== LE
|| code
== LEU
))
12177 |= (((HOST_WIDE_INT
) 1 << INTVAL (XEXP (op0
, 1))) - 1);
12178 op1
= GEN_INT (const_op
);
12179 op0
= XEXP (op0
, 0);
12184 /* If we are using this shift to extract just the sign bit, we
12185 can replace this with an LT or GE comparison. */
12187 && (equality_comparison_p
|| sign_bit_comparison_p
)
12188 && CONST_INT_P (XEXP (op0
, 1))
12189 && UINTVAL (XEXP (op0
, 1)) == mode_width
- 1)
12191 op0
= XEXP (op0
, 0);
12192 code
= (code
== NE
|| code
== GT
? LT
: GE
);
12204 /* Now make any compound operations involved in this comparison. Then,
12205 check for an outmost SUBREG on OP0 that is not doing anything or is
12206 paradoxical. The latter transformation must only be performed when
12207 it is known that the "extra" bits will be the same in op0 and op1 or
12208 that they don't matter. There are three cases to consider:
12210 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12211 care bits and we can assume they have any convenient value. So
12212 making the transformation is safe.
12214 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12215 In this case the upper bits of op0 are undefined. We should not make
12216 the simplification in that case as we do not know the contents of
12219 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12220 UNKNOWN. In that case we know those bits are zeros or ones. We must
12221 also be sure that they are the same as the upper bits of op1.
12223 We can never remove a SUBREG for a non-equality comparison because
12224 the sign bit is in a different place in the underlying object. */
12226 op0
= make_compound_operation (op0
, op1
== const0_rtx
? COMPARE
: SET
);
12227 op1
= make_compound_operation (op1
, SET
);
12229 if (GET_CODE (op0
) == SUBREG
&& subreg_lowpart_p (op0
)
12230 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_INT
12231 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0
))) == MODE_INT
12232 && (code
== NE
|| code
== EQ
))
12234 if (paradoxical_subreg_p (op0
))
12236 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12238 if (REG_P (SUBREG_REG (op0
)))
12240 op0
= SUBREG_REG (op0
);
12241 op1
= gen_lowpart (GET_MODE (op0
), op1
);
12244 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0
)))
12245 <= HOST_BITS_PER_WIDE_INT
)
12246 && (nonzero_bits (SUBREG_REG (op0
),
12247 GET_MODE (SUBREG_REG (op0
)))
12248 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12250 tem
= gen_lowpart (GET_MODE (SUBREG_REG (op0
)), op1
);
12252 if ((nonzero_bits (tem
, GET_MODE (SUBREG_REG (op0
)))
12253 & ~GET_MODE_MASK (GET_MODE (op0
))) == 0)
12254 op0
= SUBREG_REG (op0
), op1
= tem
;
12258 /* We now do the opposite procedure: Some machines don't have compare
12259 insns in all modes. If OP0's mode is an integer mode smaller than a
12260 word and we can't do a compare in that mode, see if there is a larger
12261 mode for which we can do the compare. There are a number of cases in
12262 which we can use the wider mode. */
12264 mode
= GET_MODE (op0
);
12265 if (mode
!= VOIDmode
&& GET_MODE_CLASS (mode
) == MODE_INT
12266 && GET_MODE_SIZE (mode
) < UNITS_PER_WORD
12267 && ! have_insn_for (COMPARE
, mode
))
12268 for (tmode
= GET_MODE_WIDER_MODE (mode
);
12269 (tmode
!= VOIDmode
&& HWI_COMPUTABLE_MODE_P (tmode
));
12270 tmode
= GET_MODE_WIDER_MODE (tmode
))
12271 if (have_insn_for (COMPARE
, tmode
))
12275 /* If this is a test for negative, we can make an explicit
12276 test of the sign bit. Test this first so we can use
12277 a paradoxical subreg to extend OP0. */
12279 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
12280 && HWI_COMPUTABLE_MODE_P (mode
))
12282 unsigned HOST_WIDE_INT sign
12283 = (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1);
12284 op0
= simplify_gen_binary (AND
, tmode
,
12285 gen_lowpart (tmode
, op0
),
12286 gen_int_mode (sign
, tmode
));
12287 code
= (code
== LT
) ? NE
: EQ
;
12291 /* If the only nonzero bits in OP0 and OP1 are those in the
12292 narrower mode and this is an equality or unsigned comparison,
12293 we can use the wider mode. Similarly for sign-extended
12294 values, in which case it is true for all comparisons. */
12295 zero_extended
= ((code
== EQ
|| code
== NE
12296 || code
== GEU
|| code
== GTU
12297 || code
== LEU
|| code
== LTU
)
12298 && (nonzero_bits (op0
, tmode
)
12299 & ~GET_MODE_MASK (mode
)) == 0
12300 && ((CONST_INT_P (op1
)
12301 || (nonzero_bits (op1
, tmode
)
12302 & ~GET_MODE_MASK (mode
)) == 0)));
12305 || ((num_sign_bit_copies (op0
, tmode
)
12306 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12307 - GET_MODE_PRECISION (mode
)))
12308 && (num_sign_bit_copies (op1
, tmode
)
12309 > (unsigned int) (GET_MODE_PRECISION (tmode
)
12310 - GET_MODE_PRECISION (mode
)))))
12312 /* If OP0 is an AND and we don't have an AND in MODE either,
12313 make a new AND in the proper mode. */
12314 if (GET_CODE (op0
) == AND
12315 && !have_insn_for (AND
, mode
))
12316 op0
= simplify_gen_binary (AND
, tmode
,
12317 gen_lowpart (tmode
,
12319 gen_lowpart (tmode
,
12325 op0
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op0
, mode
);
12326 op1
= simplify_gen_unary (ZERO_EXTEND
, tmode
, op1
, mode
);
12330 op0
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op0
, mode
);
12331 op1
= simplify_gen_unary (SIGN_EXTEND
, tmode
, op1
, mode
);
12338 /* We may have changed the comparison operands. Re-canonicalize. */
12339 if (swap_commutative_operands_p (op0
, op1
))
12341 tem
= op0
, op0
= op1
, op1
= tem
;
12342 code
= swap_condition (code
);
12345 /* If this machine only supports a subset of valid comparisons, see if we
12346 can convert an unsupported one into a supported one. */
12347 target_canonicalize_comparison (&code
, &op0
, &op1
, 0);
12355 /* Utility function for record_value_for_reg. Count number of
12360 enum rtx_code code
= GET_CODE (x
);
12364 if (GET_RTX_CLASS (code
) == RTX_BIN_ARITH
12365 || GET_RTX_CLASS (code
) == RTX_COMM_ARITH
)
12367 rtx x0
= XEXP (x
, 0);
12368 rtx x1
= XEXP (x
, 1);
12371 return 1 + 2 * count_rtxs (x0
);
12373 if ((GET_RTX_CLASS (GET_CODE (x1
)) == RTX_BIN_ARITH
12374 || GET_RTX_CLASS (GET_CODE (x1
)) == RTX_COMM_ARITH
)
12375 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12376 return 2 + 2 * count_rtxs (x0
)
12377 + count_rtxs (x
== XEXP (x1
, 0)
12378 ? XEXP (x1
, 1) : XEXP (x1
, 0));
12380 if ((GET_RTX_CLASS (GET_CODE (x0
)) == RTX_BIN_ARITH
12381 || GET_RTX_CLASS (GET_CODE (x0
)) == RTX_COMM_ARITH
)
12382 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12383 return 2 + 2 * count_rtxs (x1
)
12384 + count_rtxs (x
== XEXP (x0
, 0)
12385 ? XEXP (x0
, 1) : XEXP (x0
, 0));
12388 fmt
= GET_RTX_FORMAT (code
);
12389 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12391 ret
+= count_rtxs (XEXP (x
, i
));
12392 else if (fmt
[i
] == 'E')
12393 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12394 ret
+= count_rtxs (XVECEXP (x
, i
, j
));
12399 /* Utility function for following routine. Called when X is part of a value
12400 being stored into last_set_value. Sets last_set_table_tick
12401 for each register mentioned. Similar to mention_regs in cse.c */
12404 update_table_tick (rtx x
)
12406 enum rtx_code code
= GET_CODE (x
);
12407 const char *fmt
= GET_RTX_FORMAT (code
);
12412 unsigned int regno
= REGNO (x
);
12413 unsigned int endregno
= END_REGNO (x
);
12416 for (r
= regno
; r
< endregno
; r
++)
12418 reg_stat_type
*rsp
= ®_stat
[r
];
12419 rsp
->last_set_table_tick
= label_tick
;
12425 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
12428 /* Check for identical subexpressions. If x contains
12429 identical subexpression we only have to traverse one of
12431 if (i
== 0 && ARITHMETIC_P (x
))
12433 /* Note that at this point x1 has already been
12435 rtx x0
= XEXP (x
, 0);
12436 rtx x1
= XEXP (x
, 1);
12438 /* If x0 and x1 are identical then there is no need to
12443 /* If x0 is identical to a subexpression of x1 then while
12444 processing x1, x0 has already been processed. Thus we
12445 are done with x. */
12446 if (ARITHMETIC_P (x1
)
12447 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12450 /* If x1 is identical to a subexpression of x0 then we
12451 still have to process the rest of x0. */
12452 if (ARITHMETIC_P (x0
)
12453 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12455 update_table_tick (XEXP (x0
, x1
== XEXP (x0
, 0) ? 1 : 0));
12460 update_table_tick (XEXP (x
, i
));
12462 else if (fmt
[i
] == 'E')
12463 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12464 update_table_tick (XVECEXP (x
, i
, j
));
12467 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12468 are saying that the register is clobbered and we no longer know its
12469 value. If INSN is zero, don't update reg_stat[].last_set; this is
12470 only permitted with VALUE also zero and is used to invalidate the
12474 record_value_for_reg (rtx reg
, rtx_insn
*insn
, rtx value
)
12476 unsigned int regno
= REGNO (reg
);
12477 unsigned int endregno
= END_REGNO (reg
);
12479 reg_stat_type
*rsp
;
12481 /* If VALUE contains REG and we have a previous value for REG, substitute
12482 the previous value. */
12483 if (value
&& insn
&& reg_overlap_mentioned_p (reg
, value
))
12487 /* Set things up so get_last_value is allowed to see anything set up to
12489 subst_low_luid
= DF_INSN_LUID (insn
);
12490 tem
= get_last_value (reg
);
12492 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12493 it isn't going to be useful and will take a lot of time to process,
12494 so just use the CLOBBER. */
12498 if (ARITHMETIC_P (tem
)
12499 && GET_CODE (XEXP (tem
, 0)) == CLOBBER
12500 && GET_CODE (XEXP (tem
, 1)) == CLOBBER
)
12501 tem
= XEXP (tem
, 0);
12502 else if (count_occurrences (value
, reg
, 1) >= 2)
12504 /* If there are two or more occurrences of REG in VALUE,
12505 prevent the value from growing too much. */
12506 if (count_rtxs (tem
) > MAX_LAST_VALUE_RTL
)
12507 tem
= gen_rtx_CLOBBER (GET_MODE (tem
), const0_rtx
);
12510 value
= replace_rtx (copy_rtx (value
), reg
, tem
);
12514 /* For each register modified, show we don't know its value, that
12515 we don't know about its bitwise content, that its value has been
12516 updated, and that we don't know the location of the death of the
12518 for (i
= regno
; i
< endregno
; i
++)
12520 rsp
= ®_stat
[i
];
12523 rsp
->last_set
= insn
;
12525 rsp
->last_set_value
= 0;
12526 rsp
->last_set_mode
= VOIDmode
;
12527 rsp
->last_set_nonzero_bits
= 0;
12528 rsp
->last_set_sign_bit_copies
= 0;
12529 rsp
->last_death
= 0;
12530 rsp
->truncated_to_mode
= VOIDmode
;
12533 /* Mark registers that are being referenced in this value. */
12535 update_table_tick (value
);
12537 /* Now update the status of each register being set.
12538 If someone is using this register in this block, set this register
12539 to invalid since we will get confused between the two lives in this
12540 basic block. This makes using this register always invalid. In cse, we
12541 scan the table to invalidate all entries using this register, but this
12542 is too much work for us. */
12544 for (i
= regno
; i
< endregno
; i
++)
12546 rsp
= ®_stat
[i
];
12547 rsp
->last_set_label
= label_tick
;
12549 || (value
&& rsp
->last_set_table_tick
>= label_tick_ebb_start
))
12550 rsp
->last_set_invalid
= 1;
12552 rsp
->last_set_invalid
= 0;
12555 /* The value being assigned might refer to X (like in "x++;"). In that
12556 case, we must replace it with (clobber (const_int 0)) to prevent
12558 rsp
= ®_stat
[regno
];
12559 if (value
&& !get_last_value_validate (&value
, insn
, label_tick
, 0))
12561 value
= copy_rtx (value
);
12562 if (!get_last_value_validate (&value
, insn
, label_tick
, 1))
12566 /* For the main register being modified, update the value, the mode, the
12567 nonzero bits, and the number of sign bit copies. */
12569 rsp
->last_set_value
= value
;
12573 machine_mode mode
= GET_MODE (reg
);
12574 subst_low_luid
= DF_INSN_LUID (insn
);
12575 rsp
->last_set_mode
= mode
;
12576 if (GET_MODE_CLASS (mode
) == MODE_INT
12577 && HWI_COMPUTABLE_MODE_P (mode
))
12578 mode
= nonzero_bits_mode
;
12579 rsp
->last_set_nonzero_bits
= nonzero_bits (value
, mode
);
12580 rsp
->last_set_sign_bit_copies
12581 = num_sign_bit_copies (value
, GET_MODE (reg
));
12585 /* Called via note_stores from record_dead_and_set_regs to handle one
12586 SET or CLOBBER in an insn. DATA is the instruction in which the
12587 set is occurring. */
12590 record_dead_and_set_regs_1 (rtx dest
, const_rtx setter
, void *data
)
12592 rtx_insn
*record_dead_insn
= (rtx_insn
*) data
;
12594 if (GET_CODE (dest
) == SUBREG
)
12595 dest
= SUBREG_REG (dest
);
12597 if (!record_dead_insn
)
12600 record_value_for_reg (dest
, NULL
, NULL_RTX
);
12606 /* If we are setting the whole register, we know its value. Otherwise
12607 show that we don't know the value. We can handle SUBREG in
12609 if (GET_CODE (setter
) == SET
&& dest
== SET_DEST (setter
))
12610 record_value_for_reg (dest
, record_dead_insn
, SET_SRC (setter
));
12611 else if (GET_CODE (setter
) == SET
12612 && GET_CODE (SET_DEST (setter
)) == SUBREG
12613 && SUBREG_REG (SET_DEST (setter
)) == dest
12614 && GET_MODE_PRECISION (GET_MODE (dest
)) <= BITS_PER_WORD
12615 && subreg_lowpart_p (SET_DEST (setter
)))
12616 record_value_for_reg (dest
, record_dead_insn
,
12617 gen_lowpart (GET_MODE (dest
),
12618 SET_SRC (setter
)));
12620 record_value_for_reg (dest
, record_dead_insn
, NULL_RTX
);
12622 else if (MEM_P (dest
)
12623 /* Ignore pushes, they clobber nothing. */
12624 && ! push_operand (dest
, GET_MODE (dest
)))
12625 mem_last_set
= DF_INSN_LUID (record_dead_insn
);
12628 /* Update the records of when each REG was most recently set or killed
12629 for the things done by INSN. This is the last thing done in processing
12630 INSN in the combiner loop.
12632 We update reg_stat[], in particular fields last_set, last_set_value,
12633 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12634 last_death, and also the similar information mem_last_set (which insn
12635 most recently modified memory) and last_call_luid (which insn was the
12636 most recent subroutine call). */
12639 record_dead_and_set_regs (rtx_insn
*insn
)
12644 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
12646 if (REG_NOTE_KIND (link
) == REG_DEAD
12647 && REG_P (XEXP (link
, 0)))
12649 unsigned int regno
= REGNO (XEXP (link
, 0));
12650 unsigned int endregno
= END_REGNO (XEXP (link
, 0));
12652 for (i
= regno
; i
< endregno
; i
++)
12654 reg_stat_type
*rsp
;
12656 rsp
= ®_stat
[i
];
12657 rsp
->last_death
= insn
;
12660 else if (REG_NOTE_KIND (link
) == REG_INC
)
12661 record_value_for_reg (XEXP (link
, 0), insn
, NULL_RTX
);
12666 hard_reg_set_iterator hrsi
;
12667 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call
, 0, i
, hrsi
)
12669 reg_stat_type
*rsp
;
12671 rsp
= ®_stat
[i
];
12672 rsp
->last_set_invalid
= 1;
12673 rsp
->last_set
= insn
;
12674 rsp
->last_set_value
= 0;
12675 rsp
->last_set_mode
= VOIDmode
;
12676 rsp
->last_set_nonzero_bits
= 0;
12677 rsp
->last_set_sign_bit_copies
= 0;
12678 rsp
->last_death
= 0;
12679 rsp
->truncated_to_mode
= VOIDmode
;
12682 last_call_luid
= mem_last_set
= DF_INSN_LUID (insn
);
12684 /* We can't combine into a call pattern. Remember, though, that
12685 the return value register is set at this LUID. We could
12686 still replace a register with the return value from the
12687 wrong subroutine call! */
12688 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, NULL_RTX
);
12691 note_stores (PATTERN (insn
), record_dead_and_set_regs_1
, insn
);
12694 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12695 register present in the SUBREG, so for each such SUBREG go back and
12696 adjust nonzero and sign bit information of the registers that are
12697 known to have some zero/sign bits set.
12699 This is needed because when combine blows the SUBREGs away, the
12700 information on zero/sign bits is lost and further combines can be
12701 missed because of that. */
12704 record_promoted_value (rtx_insn
*insn
, rtx subreg
)
12706 struct insn_link
*links
;
12708 unsigned int regno
= REGNO (SUBREG_REG (subreg
));
12709 machine_mode mode
= GET_MODE (subreg
);
12711 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
12714 for (links
= LOG_LINKS (insn
); links
;)
12716 reg_stat_type
*rsp
;
12718 insn
= links
->insn
;
12719 set
= single_set (insn
);
12721 if (! set
|| !REG_P (SET_DEST (set
))
12722 || REGNO (SET_DEST (set
)) != regno
12723 || GET_MODE (SET_DEST (set
)) != GET_MODE (SUBREG_REG (subreg
)))
12725 links
= links
->next
;
12729 rsp
= ®_stat
[regno
];
12730 if (rsp
->last_set
== insn
)
12732 if (SUBREG_PROMOTED_UNSIGNED_P (subreg
))
12733 rsp
->last_set_nonzero_bits
&= GET_MODE_MASK (mode
);
12736 if (REG_P (SET_SRC (set
)))
12738 regno
= REGNO (SET_SRC (set
));
12739 links
= LOG_LINKS (insn
);
12746 /* Check if X, a register, is known to contain a value already
12747 truncated to MODE. In this case we can use a subreg to refer to
12748 the truncated value even though in the generic case we would need
12749 an explicit truncation. */
12752 reg_truncated_to_mode (machine_mode mode
, const_rtx x
)
12754 reg_stat_type
*rsp
= ®_stat
[REGNO (x
)];
12755 machine_mode truncated
= rsp
->truncated_to_mode
;
12758 || rsp
->truncation_label
< label_tick_ebb_start
)
12760 if (GET_MODE_SIZE (truncated
) <= GET_MODE_SIZE (mode
))
12762 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, truncated
))
12767 /* If X is a hard reg or a subreg record the mode that the register is
12768 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
12769 to turn a truncate into a subreg using this information. Return true
12770 if traversing X is complete. */
12773 record_truncated_value (rtx x
)
12775 machine_mode truncated_mode
;
12776 reg_stat_type
*rsp
;
12778 if (GET_CODE (x
) == SUBREG
&& REG_P (SUBREG_REG (x
)))
12780 machine_mode original_mode
= GET_MODE (SUBREG_REG (x
));
12781 truncated_mode
= GET_MODE (x
);
12783 if (GET_MODE_SIZE (original_mode
) <= GET_MODE_SIZE (truncated_mode
))
12786 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode
, original_mode
))
12789 x
= SUBREG_REG (x
);
12791 /* ??? For hard-regs we now record everything. We might be able to
12792 optimize this using last_set_mode. */
12793 else if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
12794 truncated_mode
= GET_MODE (x
);
12798 rsp
= ®_stat
[REGNO (x
)];
12799 if (rsp
->truncated_to_mode
== 0
12800 || rsp
->truncation_label
< label_tick_ebb_start
12801 || (GET_MODE_SIZE (truncated_mode
)
12802 < GET_MODE_SIZE (rsp
->truncated_to_mode
)))
12804 rsp
->truncated_to_mode
= truncated_mode
;
12805 rsp
->truncation_label
= label_tick
;
12811 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12812 the modes they are used in. This can help truning TRUNCATEs into
12816 record_truncated_values (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
12818 subrtx_var_iterator::array_type array
;
12819 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
12820 if (record_truncated_value (*iter
))
12821 iter
.skip_subrtxes ();
12824 /* Scan X for promoted SUBREGs. For each one found,
12825 note what it implies to the registers used in it. */
12828 check_promoted_subreg (rtx_insn
*insn
, rtx x
)
12830 if (GET_CODE (x
) == SUBREG
12831 && SUBREG_PROMOTED_VAR_P (x
)
12832 && REG_P (SUBREG_REG (x
)))
12833 record_promoted_value (insn
, x
);
12836 const char *format
= GET_RTX_FORMAT (GET_CODE (x
));
12839 for (i
= 0; i
< GET_RTX_LENGTH (GET_CODE (x
)); i
++)
12843 check_promoted_subreg (insn
, XEXP (x
, i
));
12847 if (XVEC (x
, i
) != 0)
12848 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12849 check_promoted_subreg (insn
, XVECEXP (x
, i
, j
));
12855 /* Verify that all the registers and memory references mentioned in *LOC are
12856 still valid. *LOC was part of a value set in INSN when label_tick was
12857 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12858 the invalid references with (clobber (const_int 0)) and return 1. This
12859 replacement is useful because we often can get useful information about
12860 the form of a value (e.g., if it was produced by a shift that always
12861 produces -1 or 0) even though we don't know exactly what registers it
12862 was produced from. */
12865 get_last_value_validate (rtx
*loc
, rtx_insn
*insn
, int tick
, int replace
)
12868 const char *fmt
= GET_RTX_FORMAT (GET_CODE (x
));
12869 int len
= GET_RTX_LENGTH (GET_CODE (x
));
12874 unsigned int regno
= REGNO (x
);
12875 unsigned int endregno
= END_REGNO (x
);
12878 for (j
= regno
; j
< endregno
; j
++)
12880 reg_stat_type
*rsp
= ®_stat
[j
];
12881 if (rsp
->last_set_invalid
12882 /* If this is a pseudo-register that was only set once and not
12883 live at the beginning of the function, it is always valid. */
12884 || (! (regno
>= FIRST_PSEUDO_REGISTER
12885 && regno
< reg_n_sets_max
12886 && REG_N_SETS (regno
) == 1
12887 && (!REGNO_REG_SET_P
12888 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
),
12890 && rsp
->last_set_label
> tick
))
12893 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
12900 /* If this is a memory reference, make sure that there were no stores after
12901 it that might have clobbered the value. We don't have alias info, so we
12902 assume any store invalidates it. Moreover, we only have local UIDs, so
12903 we also assume that there were stores in the intervening basic blocks. */
12904 else if (MEM_P (x
) && !MEM_READONLY_P (x
)
12905 && (tick
!= label_tick
|| DF_INSN_LUID (insn
) <= mem_last_set
))
12908 *loc
= gen_rtx_CLOBBER (GET_MODE (x
), const0_rtx
);
12912 for (i
= 0; i
< len
; i
++)
12916 /* Check for identical subexpressions. If x contains
12917 identical subexpression we only have to traverse one of
12919 if (i
== 1 && ARITHMETIC_P (x
))
12921 /* Note that at this point x0 has already been checked
12922 and found valid. */
12923 rtx x0
= XEXP (x
, 0);
12924 rtx x1
= XEXP (x
, 1);
12926 /* If x0 and x1 are identical then x is also valid. */
12930 /* If x1 is identical to a subexpression of x0 then
12931 while checking x0, x1 has already been checked. Thus
12932 it is valid and so as x. */
12933 if (ARITHMETIC_P (x0
)
12934 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
12937 /* If x0 is identical to a subexpression of x1 then x is
12938 valid iff the rest of x1 is valid. */
12939 if (ARITHMETIC_P (x1
)
12940 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
12942 get_last_value_validate (&XEXP (x1
,
12943 x0
== XEXP (x1
, 0) ? 1 : 0),
12944 insn
, tick
, replace
);
12947 if (get_last_value_validate (&XEXP (x
, i
), insn
, tick
,
12951 else if (fmt
[i
] == 'E')
12952 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
12953 if (get_last_value_validate (&XVECEXP (x
, i
, j
),
12954 insn
, tick
, replace
) == 0)
12958 /* If we haven't found a reason for it to be invalid, it is valid. */
12962 /* Get the last value assigned to X, if known. Some registers
12963 in the value may be replaced with (clobber (const_int 0)) if their value
12964 is known longer known reliably. */
12967 get_last_value (const_rtx x
)
12969 unsigned int regno
;
12971 reg_stat_type
*rsp
;
12973 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12974 then convert it to the desired mode. If this is a paradoxical SUBREG,
12975 we cannot predict what values the "extra" bits might have. */
12976 if (GET_CODE (x
) == SUBREG
12977 && subreg_lowpart_p (x
)
12978 && !paradoxical_subreg_p (x
)
12979 && (value
= get_last_value (SUBREG_REG (x
))) != 0)
12980 return gen_lowpart (GET_MODE (x
), value
);
12986 rsp
= ®_stat
[regno
];
12987 value
= rsp
->last_set_value
;
12989 /* If we don't have a value, or if it isn't for this basic block and
12990 it's either a hard register, set more than once, or it's a live
12991 at the beginning of the function, return 0.
12993 Because if it's not live at the beginning of the function then the reg
12994 is always set before being used (is never used without being set).
12995 And, if it's set only once, and it's always set before use, then all
12996 uses must have the same last value, even if it's not from this basic
13000 || (rsp
->last_set_label
< label_tick_ebb_start
13001 && (regno
< FIRST_PSEUDO_REGISTER
13002 || regno
>= reg_n_sets_max
13003 || REG_N_SETS (regno
) != 1
13005 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
), regno
))))
13008 /* If the value was set in a later insn than the ones we are processing,
13009 we can't use it even if the register was only set once. */
13010 if (rsp
->last_set_label
== label_tick
13011 && DF_INSN_LUID (rsp
->last_set
) >= subst_low_luid
)
13014 /* If the value has all its registers valid, return it. */
13015 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 0))
13018 /* Otherwise, make a copy and replace any invalid register with
13019 (clobber (const_int 0)). If that fails for some reason, return 0. */
13021 value
= copy_rtx (value
);
13022 if (get_last_value_validate (&value
, rsp
->last_set
, rsp
->last_set_label
, 1))
13028 /* Return nonzero if expression X refers to a REG or to memory
13029 that is set in an instruction more recent than FROM_LUID. */
13032 use_crosses_set_p (const_rtx x
, int from_luid
)
13036 enum rtx_code code
= GET_CODE (x
);
13040 unsigned int regno
= REGNO (x
);
13041 unsigned endreg
= END_REGNO (x
);
13043 #ifdef PUSH_ROUNDING
13044 /* Don't allow uses of the stack pointer to be moved,
13045 because we don't know whether the move crosses a push insn. */
13046 if (regno
== STACK_POINTER_REGNUM
&& PUSH_ARGS
)
13049 for (; regno
< endreg
; regno
++)
13051 reg_stat_type
*rsp
= ®_stat
[regno
];
13053 && rsp
->last_set_label
== label_tick
13054 && DF_INSN_LUID (rsp
->last_set
) > from_luid
)
13060 if (code
== MEM
&& mem_last_set
> from_luid
)
13063 fmt
= GET_RTX_FORMAT (code
);
13065 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13070 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13071 if (use_crosses_set_p (XVECEXP (x
, i
, j
), from_luid
))
13074 else if (fmt
[i
] == 'e'
13075 && use_crosses_set_p (XEXP (x
, i
), from_luid
))
13081 /* Define three variables used for communication between the following
13084 static unsigned int reg_dead_regno
, reg_dead_endregno
;
13085 static int reg_dead_flag
;
13087 /* Function called via note_stores from reg_dead_at_p.
13089 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13090 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13093 reg_dead_at_p_1 (rtx dest
, const_rtx x
, void *data ATTRIBUTE_UNUSED
)
13095 unsigned int regno
, endregno
;
13100 regno
= REGNO (dest
);
13101 endregno
= END_REGNO (dest
);
13102 if (reg_dead_endregno
> regno
&& reg_dead_regno
< endregno
)
13103 reg_dead_flag
= (GET_CODE (x
) == CLOBBER
) ? 1 : -1;
13106 /* Return nonzero if REG is known to be dead at INSN.
13108 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13109 referencing REG, it is dead. If we hit a SET referencing REG, it is
13110 live. Otherwise, see if it is live or dead at the start of the basic
13111 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13112 must be assumed to be always live. */
13115 reg_dead_at_p (rtx reg
, rtx_insn
*insn
)
13120 /* Set variables for reg_dead_at_p_1. */
13121 reg_dead_regno
= REGNO (reg
);
13122 reg_dead_endregno
= END_REGNO (reg
);
13126 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13127 we allow the machine description to decide whether use-and-clobber
13128 patterns are OK. */
13129 if (reg_dead_regno
< FIRST_PSEUDO_REGISTER
)
13131 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13132 if (!fixed_regs
[i
] && TEST_HARD_REG_BIT (newpat_used_regs
, i
))
13136 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13137 beginning of basic block. */
13138 block
= BLOCK_FOR_INSN (insn
);
13143 if (find_regno_note (insn
, REG_UNUSED
, reg_dead_regno
))
13146 note_stores (PATTERN (insn
), reg_dead_at_p_1
, NULL
);
13148 return reg_dead_flag
== 1 ? 1 : 0;
13150 if (find_regno_note (insn
, REG_DEAD
, reg_dead_regno
))
13154 if (insn
== BB_HEAD (block
))
13157 insn
= PREV_INSN (insn
);
13160 /* Look at live-in sets for the basic block that we were in. */
13161 for (i
= reg_dead_regno
; i
< reg_dead_endregno
; i
++)
13162 if (REGNO_REG_SET_P (df_get_live_in (block
), i
))
13168 /* Note hard registers in X that are used. */
13171 mark_used_regs_combine (rtx x
)
13173 RTX_CODE code
= GET_CODE (x
);
13174 unsigned int regno
;
13185 case ADDR_DIFF_VEC
:
13187 /* CC0 must die in the insn after it is set, so we don't need to take
13188 special note of it here. */
13193 /* If we are clobbering a MEM, mark any hard registers inside the
13194 address as used. */
13195 if (MEM_P (XEXP (x
, 0)))
13196 mark_used_regs_combine (XEXP (XEXP (x
, 0), 0));
13201 /* A hard reg in a wide mode may really be multiple registers.
13202 If so, mark all of them just like the first. */
13203 if (regno
< FIRST_PSEUDO_REGISTER
)
13205 /* None of this applies to the stack, frame or arg pointers. */
13206 if (regno
== STACK_POINTER_REGNUM
13207 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
13208 || regno
== HARD_FRAME_POINTER_REGNUM
13210 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13211 || (regno
== ARG_POINTER_REGNUM
&& fixed_regs
[regno
])
13213 || regno
== FRAME_POINTER_REGNUM
)
13216 add_to_hard_reg_set (&newpat_used_regs
, GET_MODE (x
), regno
);
13222 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13224 rtx testreg
= SET_DEST (x
);
13226 while (GET_CODE (testreg
) == SUBREG
13227 || GET_CODE (testreg
) == ZERO_EXTRACT
13228 || GET_CODE (testreg
) == STRICT_LOW_PART
)
13229 testreg
= XEXP (testreg
, 0);
13231 if (MEM_P (testreg
))
13232 mark_used_regs_combine (XEXP (testreg
, 0));
13234 mark_used_regs_combine (SET_SRC (x
));
13242 /* Recursively scan the operands of this expression. */
13245 const char *fmt
= GET_RTX_FORMAT (code
);
13247 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
13250 mark_used_regs_combine (XEXP (x
, i
));
13251 else if (fmt
[i
] == 'E')
13255 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
13256 mark_used_regs_combine (XVECEXP (x
, i
, j
));
13262 /* Remove register number REGNO from the dead registers list of INSN.
13264 Return the note used to record the death, if there was one. */
13267 remove_death (unsigned int regno
, rtx_insn
*insn
)
13269 rtx note
= find_regno_note (insn
, REG_DEAD
, regno
);
13272 remove_note (insn
, note
);
13277 /* For each register (hardware or pseudo) used within expression X, if its
13278 death is in an instruction with luid between FROM_LUID (inclusive) and
13279 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13280 list headed by PNOTES.
13282 That said, don't move registers killed by maybe_kill_insn.
13284 This is done when X is being merged by combination into TO_INSN. These
13285 notes will then be distributed as needed. */
13288 move_deaths (rtx x
, rtx maybe_kill_insn
, int from_luid
, rtx_insn
*to_insn
,
13293 enum rtx_code code
= GET_CODE (x
);
13297 unsigned int regno
= REGNO (x
);
13298 rtx_insn
*where_dead
= reg_stat
[regno
].last_death
;
13300 /* Don't move the register if it gets killed in between from and to. */
13301 if (maybe_kill_insn
&& reg_set_p (x
, maybe_kill_insn
)
13302 && ! reg_referenced_p (x
, maybe_kill_insn
))
13306 && BLOCK_FOR_INSN (where_dead
) == BLOCK_FOR_INSN (to_insn
)
13307 && DF_INSN_LUID (where_dead
) >= from_luid
13308 && DF_INSN_LUID (where_dead
) < DF_INSN_LUID (to_insn
))
13310 rtx note
= remove_death (regno
, where_dead
);
13312 /* It is possible for the call above to return 0. This can occur
13313 when last_death points to I2 or I1 that we combined with.
13314 In that case make a new note.
13316 We must also check for the case where X is a hard register
13317 and NOTE is a death note for a range of hard registers
13318 including X. In that case, we must put REG_DEAD notes for
13319 the remaining registers in place of NOTE. */
13321 if (note
!= 0 && regno
< FIRST_PSEUDO_REGISTER
13322 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13323 > GET_MODE_SIZE (GET_MODE (x
))))
13325 unsigned int deadregno
= REGNO (XEXP (note
, 0));
13326 unsigned int deadend
= END_HARD_REGNO (XEXP (note
, 0));
13327 unsigned int ourend
= END_HARD_REGNO (x
);
13330 for (i
= deadregno
; i
< deadend
; i
++)
13331 if (i
< regno
|| i
>= ourend
)
13332 add_reg_note (where_dead
, REG_DEAD
, regno_reg_rtx
[i
]);
13335 /* If we didn't find any note, or if we found a REG_DEAD note that
13336 covers only part of the given reg, and we have a multi-reg hard
13337 register, then to be safe we must check for REG_DEAD notes
13338 for each register other than the first. They could have
13339 their own REG_DEAD notes lying around. */
13340 else if ((note
== 0
13342 && (GET_MODE_SIZE (GET_MODE (XEXP (note
, 0)))
13343 < GET_MODE_SIZE (GET_MODE (x
)))))
13344 && regno
< FIRST_PSEUDO_REGISTER
13345 && hard_regno_nregs
[regno
][GET_MODE (x
)] > 1)
13347 unsigned int ourend
= END_HARD_REGNO (x
);
13348 unsigned int i
, offset
;
13352 offset
= hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))];
13356 for (i
= regno
+ offset
; i
< ourend
; i
++)
13357 move_deaths (regno_reg_rtx
[i
],
13358 maybe_kill_insn
, from_luid
, to_insn
, &oldnotes
);
13361 if (note
!= 0 && GET_MODE (XEXP (note
, 0)) == GET_MODE (x
))
13363 XEXP (note
, 1) = *pnotes
;
13367 *pnotes
= alloc_reg_note (REG_DEAD
, x
, *pnotes
);
13373 else if (GET_CODE (x
) == SET
)
13375 rtx dest
= SET_DEST (x
);
13377 move_deaths (SET_SRC (x
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13379 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13380 that accesses one word of a multi-word item, some
13381 piece of everything register in the expression is used by
13382 this insn, so remove any old death. */
13383 /* ??? So why do we test for equality of the sizes? */
13385 if (GET_CODE (dest
) == ZERO_EXTRACT
13386 || GET_CODE (dest
) == STRICT_LOW_PART
13387 || (GET_CODE (dest
) == SUBREG
13388 && (((GET_MODE_SIZE (GET_MODE (dest
))
13389 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
13390 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
13391 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
))))
13393 move_deaths (dest
, maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13397 /* If this is some other SUBREG, we know it replaces the entire
13398 value, so use that as the destination. */
13399 if (GET_CODE (dest
) == SUBREG
)
13400 dest
= SUBREG_REG (dest
);
13402 /* If this is a MEM, adjust deaths of anything used in the address.
13403 For a REG (the only other possibility), the entire value is
13404 being replaced so the old value is not used in this insn. */
13407 move_deaths (XEXP (dest
, 0), maybe_kill_insn
, from_luid
,
13412 else if (GET_CODE (x
) == CLOBBER
)
13415 len
= GET_RTX_LENGTH (code
);
13416 fmt
= GET_RTX_FORMAT (code
);
13418 for (i
= 0; i
< len
; i
++)
13423 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
13424 move_deaths (XVECEXP (x
, i
, j
), maybe_kill_insn
, from_luid
,
13427 else if (fmt
[i
] == 'e')
13428 move_deaths (XEXP (x
, i
), maybe_kill_insn
, from_luid
, to_insn
, pnotes
);
13432 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13433 pattern of an insn. X must be a REG. */
13436 reg_bitfield_target_p (rtx x
, rtx body
)
13440 if (GET_CODE (body
) == SET
)
13442 rtx dest
= SET_DEST (body
);
13444 unsigned int regno
, tregno
, endregno
, endtregno
;
13446 if (GET_CODE (dest
) == ZERO_EXTRACT
)
13447 target
= XEXP (dest
, 0);
13448 else if (GET_CODE (dest
) == STRICT_LOW_PART
)
13449 target
= SUBREG_REG (XEXP (dest
, 0));
13453 if (GET_CODE (target
) == SUBREG
)
13454 target
= SUBREG_REG (target
);
13456 if (!REG_P (target
))
13459 tregno
= REGNO (target
), regno
= REGNO (x
);
13460 if (tregno
>= FIRST_PSEUDO_REGISTER
|| regno
>= FIRST_PSEUDO_REGISTER
)
13461 return target
== x
;
13463 endtregno
= end_hard_regno (GET_MODE (target
), tregno
);
13464 endregno
= end_hard_regno (GET_MODE (x
), regno
);
13466 return endregno
> tregno
&& regno
< endtregno
;
13469 else if (GET_CODE (body
) == PARALLEL
)
13470 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
13471 if (reg_bitfield_target_p (x
, XVECEXP (body
, 0, i
)))
13477 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13478 as appropriate. I3 and I2 are the insns resulting from the combination
13479 insns including FROM (I2 may be zero).
13481 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13482 not need REG_DEAD notes because they are being substituted for. This
13483 saves searching in the most common cases.
13485 Each note in the list is either ignored or placed on some insns, depending
13486 on the type of note. */
13489 distribute_notes (rtx notes
, rtx_insn
*from_insn
, rtx_insn
*i3
, rtx_insn
*i2
,
13490 rtx elim_i2
, rtx elim_i1
, rtx elim_i0
)
13492 rtx note
, next_note
;
13494 rtx_insn
*tem_insn
;
13496 for (note
= notes
; note
; note
= next_note
)
13498 rtx_insn
*place
= 0, *place2
= 0;
13500 next_note
= XEXP (note
, 1);
13501 switch (REG_NOTE_KIND (note
))
13505 /* Doesn't matter much where we put this, as long as it's somewhere.
13506 It is preferable to keep these notes on branches, which is most
13507 likely to be i3. */
13511 case REG_NON_LOCAL_GOTO
:
13516 gcc_assert (i2
&& JUMP_P (i2
));
13521 case REG_EH_REGION
:
13522 /* These notes must remain with the call or trapping instruction. */
13525 else if (i2
&& CALL_P (i2
))
13529 gcc_assert (cfun
->can_throw_non_call_exceptions
);
13530 if (may_trap_p (i3
))
13532 else if (i2
&& may_trap_p (i2
))
13534 /* ??? Otherwise assume we've combined things such that we
13535 can now prove that the instructions can't trap. Drop the
13536 note in this case. */
13540 case REG_ARGS_SIZE
:
13541 /* ??? How to distribute between i3-i1. Assume i3 contains the
13542 entire adjustment. Assert i3 contains at least some adjust. */
13543 if (!noop_move_p (i3
))
13545 int old_size
, args_size
= INTVAL (XEXP (note
, 0));
13546 /* fixup_args_size_notes looks at REG_NORETURN note,
13547 so ensure the note is placed there first. */
13551 for (np
= &next_note
; *np
; np
= &XEXP (*np
, 1))
13552 if (REG_NOTE_KIND (*np
) == REG_NORETURN
)
13556 XEXP (n
, 1) = REG_NOTES (i3
);
13557 REG_NOTES (i3
) = n
;
13561 old_size
= fixup_args_size_notes (PREV_INSN (i3
), i3
, args_size
);
13562 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13563 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13564 gcc_assert (old_size
!= args_size
13566 && !ACCUMULATE_OUTGOING_ARGS
13567 && find_reg_note (i3
, REG_NORETURN
, NULL_RTX
)));
13574 case REG_CALL_DECL
:
13575 /* These notes must remain with the call. It should not be
13576 possible for both I2 and I3 to be a call. */
13581 gcc_assert (i2
&& CALL_P (i2
));
13587 /* Any clobbers for i3 may still exist, and so we must process
13588 REG_UNUSED notes from that insn.
13590 Any clobbers from i2 or i1 can only exist if they were added by
13591 recog_for_combine. In that case, recog_for_combine created the
13592 necessary REG_UNUSED notes. Trying to keep any original
13593 REG_UNUSED notes from these insns can cause incorrect output
13594 if it is for the same register as the original i3 dest.
13595 In that case, we will notice that the register is set in i3,
13596 and then add a REG_UNUSED note for the destination of i3, which
13597 is wrong. However, it is possible to have REG_UNUSED notes from
13598 i2 or i1 for register which were both used and clobbered, so
13599 we keep notes from i2 or i1 if they will turn into REG_DEAD
13602 /* If this register is set or clobbered in I3, put the note there
13603 unless there is one already. */
13604 if (reg_set_p (XEXP (note
, 0), PATTERN (i3
)))
13606 if (from_insn
!= i3
)
13609 if (! (REG_P (XEXP (note
, 0))
13610 ? find_regno_note (i3
, REG_UNUSED
, REGNO (XEXP (note
, 0)))
13611 : find_reg_note (i3
, REG_UNUSED
, XEXP (note
, 0))))
13614 /* Otherwise, if this register is used by I3, then this register
13615 now dies here, so we must put a REG_DEAD note here unless there
13617 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
))
13618 && ! (REG_P (XEXP (note
, 0))
13619 ? find_regno_note (i3
, REG_DEAD
,
13620 REGNO (XEXP (note
, 0)))
13621 : find_reg_note (i3
, REG_DEAD
, XEXP (note
, 0))))
13623 PUT_REG_NOTE_KIND (note
, REG_DEAD
);
13631 /* These notes say something about results of an insn. We can
13632 only support them if they used to be on I3 in which case they
13633 remain on I3. Otherwise they are ignored.
13635 If the note refers to an expression that is not a constant, we
13636 must also ignore the note since we cannot tell whether the
13637 equivalence is still true. It might be possible to do
13638 slightly better than this (we only have a problem if I2DEST
13639 or I1DEST is present in the expression), but it doesn't
13640 seem worth the trouble. */
13642 if (from_insn
== i3
13643 && (XEXP (note
, 0) == 0 || CONSTANT_P (XEXP (note
, 0))))
13648 /* These notes say something about how a register is used. They must
13649 be present on any use of the register in I2 or I3. */
13650 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
)))
13653 if (i2
&& reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
)))
13662 case REG_LABEL_TARGET
:
13663 case REG_LABEL_OPERAND
:
13664 /* This can show up in several ways -- either directly in the
13665 pattern, or hidden off in the constant pool with (or without?)
13666 a REG_EQUAL note. */
13667 /* ??? Ignore the without-reg_equal-note problem for now. */
13668 if (reg_mentioned_p (XEXP (note
, 0), PATTERN (i3
))
13669 || ((tem_note
= find_reg_note (i3
, REG_EQUAL
, NULL_RTX
))
13670 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
13671 && LABEL_REF_LABEL (XEXP (tem_note
, 0)) == XEXP (note
, 0)))
13675 && (reg_mentioned_p (XEXP (note
, 0), PATTERN (i2
))
13676 || ((tem_note
= find_reg_note (i2
, REG_EQUAL
, NULL_RTX
))
13677 && GET_CODE (XEXP (tem_note
, 0)) == LABEL_REF
13678 && LABEL_REF_LABEL (XEXP (tem_note
, 0)) == XEXP (note
, 0))))
13686 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13687 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13689 if (place
&& JUMP_P (place
)
13690 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13691 && (JUMP_LABEL (place
) == NULL
13692 || JUMP_LABEL (place
) == XEXP (note
, 0)))
13694 rtx label
= JUMP_LABEL (place
);
13697 JUMP_LABEL (place
) = XEXP (note
, 0);
13698 else if (LABEL_P (label
))
13699 LABEL_NUSES (label
)--;
13702 if (place2
&& JUMP_P (place2
)
13703 && REG_NOTE_KIND (note
) == REG_LABEL_TARGET
13704 && (JUMP_LABEL (place2
) == NULL
13705 || JUMP_LABEL (place2
) == XEXP (note
, 0)))
13707 rtx label
= JUMP_LABEL (place2
);
13710 JUMP_LABEL (place2
) = XEXP (note
, 0);
13711 else if (LABEL_P (label
))
13712 LABEL_NUSES (label
)--;
13718 /* This note says something about the value of a register prior
13719 to the execution of an insn. It is too much trouble to see
13720 if the note is still correct in all situations. It is better
13721 to simply delete it. */
13725 /* If we replaced the right hand side of FROM_INSN with a
13726 REG_EQUAL note, the original use of the dying register
13727 will not have been combined into I3 and I2. In such cases,
13728 FROM_INSN is guaranteed to be the first of the combined
13729 instructions, so we simply need to search back before
13730 FROM_INSN for the previous use or set of this register,
13731 then alter the notes there appropriately.
13733 If the register is used as an input in I3, it dies there.
13734 Similarly for I2, if it is nonzero and adjacent to I3.
13736 If the register is not used as an input in either I3 or I2
13737 and it is not one of the registers we were supposed to eliminate,
13738 there are two possibilities. We might have a non-adjacent I2
13739 or we might have somehow eliminated an additional register
13740 from a computation. For example, we might have had A & B where
13741 we discover that B will always be zero. In this case we will
13742 eliminate the reference to A.
13744 In both cases, we must search to see if we can find a previous
13745 use of A and put the death note there. */
13748 && from_insn
== i2mod
13749 && !reg_overlap_mentioned_p (XEXP (note
, 0), i2mod_new_rhs
))
13750 tem_insn
= from_insn
;
13754 && CALL_P (from_insn
)
13755 && find_reg_fusage (from_insn
, USE
, XEXP (note
, 0)))
13757 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (i3
)))
13759 else if (i2
!= 0 && next_nonnote_nondebug_insn (i2
) == i3
13760 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
13762 else if ((rtx_equal_p (XEXP (note
, 0), elim_i2
)
13764 && reg_overlap_mentioned_p (XEXP (note
, 0),
13766 || rtx_equal_p (XEXP (note
, 0), elim_i1
)
13767 || rtx_equal_p (XEXP (note
, 0), elim_i0
))
13770 /* If the new I2 sets the same register that is marked dead
13771 in the note, the note now should not be put on I2, as the
13772 note refers to a previous incarnation of the reg. */
13773 if (i2
!= 0 && reg_set_p (XEXP (note
, 0), PATTERN (i2
)))
13779 basic_block bb
= this_basic_block
;
13781 for (tem_insn
= PREV_INSN (tem_insn
); place
== 0; tem_insn
= PREV_INSN (tem_insn
))
13783 if (!NONDEBUG_INSN_P (tem_insn
))
13785 if (tem_insn
== BB_HEAD (bb
))
13790 /* If the register is being set at TEM_INSN, see if that is all
13791 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
13792 into a REG_UNUSED note instead. Don't delete sets to
13793 global register vars. */
13794 if ((REGNO (XEXP (note
, 0)) >= FIRST_PSEUDO_REGISTER
13795 || !global_regs
[REGNO (XEXP (note
, 0))])
13796 && reg_set_p (XEXP (note
, 0), PATTERN (tem_insn
)))
13798 rtx set
= single_set (tem_insn
);
13799 rtx inner_dest
= 0;
13800 rtx_insn
*cc0_setter
= NULL
;
13803 for (inner_dest
= SET_DEST (set
);
13804 (GET_CODE (inner_dest
) == STRICT_LOW_PART
13805 || GET_CODE (inner_dest
) == SUBREG
13806 || GET_CODE (inner_dest
) == ZERO_EXTRACT
);
13807 inner_dest
= XEXP (inner_dest
, 0))
13810 /* Verify that it was the set, and not a clobber that
13811 modified the register.
13813 CC0 targets must be careful to maintain setter/user
13814 pairs. If we cannot delete the setter due to side
13815 effects, mark the user with an UNUSED note instead
13818 if (set
!= 0 && ! side_effects_p (SET_SRC (set
))
13819 && rtx_equal_p (XEXP (note
, 0), inner_dest
)
13821 && (! reg_mentioned_p (cc0_rtx
, SET_SRC (set
))
13822 || ((cc0_setter
= prev_cc0_setter (tem_insn
)) != NULL
13823 && sets_cc0_p (PATTERN (cc0_setter
)) > 0))
13827 /* Move the notes and links of TEM_INSN elsewhere.
13828 This might delete other dead insns recursively.
13829 First set the pattern to something that won't use
13831 rtx old_notes
= REG_NOTES (tem_insn
);
13833 PATTERN (tem_insn
) = pc_rtx
;
13834 REG_NOTES (tem_insn
) = NULL
;
13836 distribute_notes (old_notes
, tem_insn
, tem_insn
, NULL
,
13837 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13838 distribute_links (LOG_LINKS (tem_insn
));
13840 SET_INSN_DELETED (tem_insn
);
13841 if (tem_insn
== i2
)
13844 /* Delete the setter too. */
13847 PATTERN (cc0_setter
) = pc_rtx
;
13848 old_notes
= REG_NOTES (cc0_setter
);
13849 REG_NOTES (cc0_setter
) = NULL
;
13851 distribute_notes (old_notes
, cc0_setter
,
13853 NULL_RTX
, NULL_RTX
, NULL_RTX
);
13854 distribute_links (LOG_LINKS (cc0_setter
));
13856 SET_INSN_DELETED (cc0_setter
);
13857 if (cc0_setter
== i2
)
13863 PUT_REG_NOTE_KIND (note
, REG_UNUSED
);
13865 /* If there isn't already a REG_UNUSED note, put one
13866 here. Do not place a REG_DEAD note, even if
13867 the register is also used here; that would not
13868 match the algorithm used in lifetime analysis
13869 and can cause the consistency check in the
13870 scheduler to fail. */
13871 if (! find_regno_note (tem_insn
, REG_UNUSED
,
13872 REGNO (XEXP (note
, 0))))
13877 else if (reg_referenced_p (XEXP (note
, 0), PATTERN (tem_insn
))
13878 || (CALL_P (tem_insn
)
13879 && find_reg_fusage (tem_insn
, USE
, XEXP (note
, 0))))
13883 /* If we are doing a 3->2 combination, and we have a
13884 register which formerly died in i3 and was not used
13885 by i2, which now no longer dies in i3 and is used in
13886 i2 but does not die in i2, and place is between i2
13887 and i3, then we may need to move a link from place to
13889 if (i2
&& DF_INSN_LUID (place
) > DF_INSN_LUID (i2
)
13891 && DF_INSN_LUID (from_insn
) > DF_INSN_LUID (i2
)
13892 && reg_referenced_p (XEXP (note
, 0), PATTERN (i2
)))
13894 struct insn_link
*links
= LOG_LINKS (place
);
13895 LOG_LINKS (place
) = NULL
;
13896 distribute_links (links
);
13901 if (tem_insn
== BB_HEAD (bb
))
13907 /* If the register is set or already dead at PLACE, we needn't do
13908 anything with this note if it is still a REG_DEAD note.
13909 We check here if it is set at all, not if is it totally replaced,
13910 which is what `dead_or_set_p' checks, so also check for it being
13913 if (place
&& REG_NOTE_KIND (note
) == REG_DEAD
)
13915 unsigned int regno
= REGNO (XEXP (note
, 0));
13916 reg_stat_type
*rsp
= ®_stat
[regno
];
13918 if (dead_or_set_p (place
, XEXP (note
, 0))
13919 || reg_bitfield_target_p (XEXP (note
, 0), PATTERN (place
)))
13921 /* Unless the register previously died in PLACE, clear
13922 last_death. [I no longer understand why this is
13924 if (rsp
->last_death
!= place
)
13925 rsp
->last_death
= 0;
13929 rsp
->last_death
= place
;
13931 /* If this is a death note for a hard reg that is occupying
13932 multiple registers, ensure that we are still using all
13933 parts of the object. If we find a piece of the object
13934 that is unused, we must arrange for an appropriate REG_DEAD
13935 note to be added for it. However, we can't just emit a USE
13936 and tag the note to it, since the register might actually
13937 be dead; so we recourse, and the recursive call then finds
13938 the previous insn that used this register. */
13940 if (place
&& regno
< FIRST_PSEUDO_REGISTER
13941 && hard_regno_nregs
[regno
][GET_MODE (XEXP (note
, 0))] > 1)
13943 unsigned int endregno
= END_HARD_REGNO (XEXP (note
, 0));
13944 bool all_used
= true;
13947 for (i
= regno
; i
< endregno
; i
++)
13948 if ((! refers_to_regno_p (i
, PATTERN (place
))
13949 && ! find_regno_fusage (place
, USE
, i
))
13950 || dead_or_set_regno_p (place
, i
))
13958 /* Put only REG_DEAD notes for pieces that are
13959 not already dead or set. */
13961 for (i
= regno
; i
< endregno
;
13962 i
+= hard_regno_nregs
[i
][reg_raw_mode
[i
]])
13964 rtx piece
= regno_reg_rtx
[i
];
13965 basic_block bb
= this_basic_block
;
13967 if (! dead_or_set_p (place
, piece
)
13968 && ! reg_bitfield_target_p (piece
,
13971 rtx new_note
= alloc_reg_note (REG_DEAD
, piece
,
13974 distribute_notes (new_note
, place
, place
,
13975 NULL
, NULL_RTX
, NULL_RTX
,
13978 else if (! refers_to_regno_p (i
, PATTERN (place
))
13979 && ! find_regno_fusage (place
, USE
, i
))
13980 for (tem_insn
= PREV_INSN (place
); ;
13981 tem_insn
= PREV_INSN (tem_insn
))
13983 if (!NONDEBUG_INSN_P (tem_insn
))
13985 if (tem_insn
== BB_HEAD (bb
))
13989 if (dead_or_set_p (tem_insn
, piece
)
13990 || reg_bitfield_target_p (piece
,
13991 PATTERN (tem_insn
)))
13993 add_reg_note (tem_insn
, REG_UNUSED
, piece
);
14006 /* Any other notes should not be present at this point in the
14008 gcc_unreachable ();
14013 XEXP (note
, 1) = REG_NOTES (place
);
14014 REG_NOTES (place
) = note
;
14018 add_shallow_copy_of_reg_note (place2
, note
);
14022 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14023 I3, I2, and I1 to new locations. This is also called to add a link
14024 pointing at I3 when I3's destination is changed. */
14027 distribute_links (struct insn_link
*links
)
14029 struct insn_link
*link
, *next_link
;
14031 for (link
= links
; link
; link
= next_link
)
14033 rtx_insn
*place
= 0;
14037 next_link
= link
->next
;
14039 /* If the insn that this link points to is a NOTE, ignore it. */
14040 if (NOTE_P (link
->insn
))
14044 rtx pat
= PATTERN (link
->insn
);
14045 if (GET_CODE (pat
) == SET
)
14047 else if (GET_CODE (pat
) == PARALLEL
)
14050 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
14052 set
= XVECEXP (pat
, 0, i
);
14053 if (GET_CODE (set
) != SET
)
14056 reg
= SET_DEST (set
);
14057 while (GET_CODE (reg
) == ZERO_EXTRACT
14058 || GET_CODE (reg
) == STRICT_LOW_PART
14059 || GET_CODE (reg
) == SUBREG
)
14060 reg
= XEXP (reg
, 0);
14065 if (REGNO (reg
) == link
->regno
)
14068 if (i
== XVECLEN (pat
, 0))
14074 reg
= SET_DEST (set
);
14076 while (GET_CODE (reg
) == ZERO_EXTRACT
14077 || GET_CODE (reg
) == STRICT_LOW_PART
14078 || GET_CODE (reg
) == SUBREG
)
14079 reg
= XEXP (reg
, 0);
14081 /* A LOG_LINK is defined as being placed on the first insn that uses
14082 a register and points to the insn that sets the register. Start
14083 searching at the next insn after the target of the link and stop
14084 when we reach a set of the register or the end of the basic block.
14086 Note that this correctly handles the link that used to point from
14087 I3 to I2. Also note that not much searching is typically done here
14088 since most links don't point very far away. */
14090 for (insn
= NEXT_INSN (link
->insn
);
14091 (insn
&& (this_basic_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
14092 || BB_HEAD (this_basic_block
->next_bb
) != insn
));
14093 insn
= NEXT_INSN (insn
))
14094 if (DEBUG_INSN_P (insn
))
14096 else if (INSN_P (insn
) && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
14098 if (reg_referenced_p (reg
, PATTERN (insn
)))
14102 else if (CALL_P (insn
)
14103 && find_reg_fusage (insn
, USE
, reg
))
14108 else if (INSN_P (insn
) && reg_set_p (reg
, insn
))
14111 /* If we found a place to put the link, place it there unless there
14112 is already a link to the same insn as LINK at that point. */
14116 struct insn_link
*link2
;
14118 FOR_EACH_LOG_LINK (link2
, place
)
14119 if (link2
->insn
== link
->insn
&& link2
->regno
== link
->regno
)
14124 link
->next
= LOG_LINKS (place
);
14125 LOG_LINKS (place
) = link
;
14127 /* Set added_links_insn to the earliest insn we added a
14129 if (added_links_insn
== 0
14130 || DF_INSN_LUID (added_links_insn
) > DF_INSN_LUID (place
))
14131 added_links_insn
= place
;
14137 /* Check for any register or memory mentioned in EQUIV that is not
14138 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14139 of EXPR where some registers may have been replaced by constants. */
14142 unmentioned_reg_p (rtx equiv
, rtx expr
)
14144 subrtx_iterator::array_type array
;
14145 FOR_EACH_SUBRTX (iter
, array
, equiv
, NONCONST
)
14147 const_rtx x
= *iter
;
14148 if ((REG_P (x
) || MEM_P (x
))
14149 && !reg_mentioned_p (x
, expr
))
14155 DEBUG_FUNCTION
void
14156 dump_combine_stats (FILE *file
)
14160 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14161 combine_attempts
, combine_merges
, combine_extras
, combine_successes
);
14165 dump_combine_total_stats (FILE *file
)
14169 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14170 total_attempts
, total_merges
, total_extras
, total_successes
);
14173 /* Try combining insns through substitution. */
14174 static unsigned int
14175 rest_of_handle_combine (void)
14177 int rebuild_jump_labels_after_combine
;
14179 df_set_flags (DF_LR_RUN_DCE
+ DF_DEFER_INSN_RESCAN
);
14180 df_note_add_problem ();
14183 regstat_init_n_sets_and_refs ();
14184 reg_n_sets_max
= max_reg_num ();
14186 rebuild_jump_labels_after_combine
14187 = combine_instructions (get_insns (), max_reg_num ());
14189 /* Combining insns may have turned an indirect jump into a
14190 direct jump. Rebuild the JUMP_LABEL fields of jumping
14192 if (rebuild_jump_labels_after_combine
)
14194 timevar_push (TV_JUMP
);
14195 rebuild_jump_labels (get_insns ());
14197 timevar_pop (TV_JUMP
);
14200 regstat_free_n_sets_and_refs ();
14206 const pass_data pass_data_combine
=
14208 RTL_PASS
, /* type */
14209 "combine", /* name */
14210 OPTGROUP_NONE
, /* optinfo_flags */
14211 TV_COMBINE
, /* tv_id */
14212 PROP_cfglayout
, /* properties_required */
14213 0, /* properties_provided */
14214 0, /* properties_destroyed */
14215 0, /* todo_flags_start */
14216 TODO_df_finish
, /* todo_flags_finish */
14219 class pass_combine
: public rtl_opt_pass
14222 pass_combine (gcc::context
*ctxt
)
14223 : rtl_opt_pass (pass_data_combine
, ctxt
)
14226 /* opt_pass methods: */
14227 virtual bool gate (function
*) { return (optimize
> 0); }
14228 virtual unsigned int execute (function
*)
14230 return rest_of_handle_combine ();
14233 }; // class pass_combine
14235 } // anon namespace
14238 make_pass_combine (gcc::context
*ctxt
)
14240 return new pass_combine (ctxt
);