typeck.c (cp_truthvalue_conversion): Add tsubst_flags_t parameter and use it in calls...
[official-gcc.git] / gcc / combine.c
bloba9721ebdcc0204edd78970b5c1594fa79e9584e8
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107 #include "function-abi.h"
109 /* Number of attempts to combine instructions in this function. */
111 static int combine_attempts;
113 /* Number of attempts that got as far as substitution in this function. */
115 static int combine_merges;
117 /* Number of instructions combined with added SETs in this function. */
119 static int combine_extras;
121 /* Number of instructions combined in this function. */
123 static int combine_successes;
125 /* Totals over entire compilation. */
127 static int total_attempts, total_merges, total_extras, total_successes;
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
136 static rtx_insn *i2mod;
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
140 static rtx i2mod_old_rhs;
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
144 static rtx i2mod_new_rhs;
146 struct reg_stat_type {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx_insn *last_death;
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx_insn *last_set;
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
166 Therefore, we maintain the following fields:
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
182 (The next two parameters are out of date).
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
196 /* Record last value assigned to (hard or pseudo) register n. */
198 rtx last_set_value;
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
203 int last_set_table_tick;
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
208 int last_set_label;
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
223 char last_set_invalid;
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
235 If an entry is zero, it means that we don't know anything special. */
237 unsigned char sign_bit_copies;
239 unsigned HOST_WIDE_INT nonzero_bits;
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
245 int truncation_label;
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
256 static vec<reg_stat_type> reg_stat;
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260 but during combine_split_insns new pseudos can be created. As we don't have
261 updated DF information in that case, it is hard to initialize the array
262 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
263 so instead of growing the arrays, just assume all newly created pseudos
264 during combine might be set multiple times. */
266 static unsigned int reg_n_sets_max;
268 /* Record the luid of the last insn that invalidated memory
269 (anything that writes memory, and subroutine calls, but not pushes). */
271 static int mem_last_set;
273 /* Record the luid of the last CALL_INSN
274 so we can tell whether a potential combination crosses any calls. */
276 static int last_call_luid;
278 /* When `subst' is called, this is the insn that is being modified
279 (by combining in a previous insn). The PATTERN of this insn
280 is still the old pattern partially modified and it should not be
281 looked at, but this may be used to examine the successors of the insn
282 to judge whether a simplification is valid. */
284 static rtx_insn *subst_insn;
286 /* This is the lowest LUID that `subst' is currently dealing with.
287 get_last_value will not return a value if the register was set at or
288 after this LUID. If not for this mechanism, we could get confused if
289 I2 or I1 in try_combine were an insn that used the old value of a register
290 to obtain a new value. In that case, we might erroneously get the
291 new value of the register when we wanted the old one. */
293 static int subst_low_luid;
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296 must consider all these registers to be always live. */
298 static HARD_REG_SET newpat_used_regs;
300 /* This is an insn to which a LOG_LINKS entry has been added. If this
301 insn is the earlier than I2 or I3, combine should rescan starting at
302 that location. */
304 static rtx_insn *added_links_insn;
306 /* And similarly, for notes. */
308 static rtx_insn *added_notes_insn;
310 /* Basic block in which we are performing combines. */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
315 /* Length of the currently allocated uid_insn_cost array. */
317 static int max_uid_known;
319 /* The following array records the insn_cost for every insn
320 in the instruction stream. */
322 static int *uid_insn_cost;
324 /* The following array records the LOG_LINKS for every insn in the
325 instruction stream as struct insn_link pointers. */
327 struct insn_link {
328 rtx_insn *insn;
329 unsigned int regno;
330 struct insn_link *next;
333 static struct insn_link **uid_log_links;
335 static inline int
336 insn_uid_check (const_rtx insn)
338 int uid = INSN_UID (insn);
339 gcc_checking_assert (uid <= max_uid_known);
340 return uid;
343 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
349 /* Links for LOG_LINKS are allocated from this obstack. */
351 static struct obstack insn_link_obstack;
353 /* Allocate a link. */
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
358 struct insn_link *l
359 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 sizeof (struct insn_link));
361 l->insn = insn;
362 l->regno = regno;
363 l->next = next;
364 return l;
367 /* Incremented for each basic block. */
369 static int label_tick;
371 /* Reset to label_tick for each extended basic block in scanning order. */
373 static int label_tick_ebb_start;
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
378 static scalar_int_mode nonzero_bits_mode;
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
384 in a loop. */
386 static int nonzero_sign_valid;
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
394 struct undo
396 struct undo *next;
397 enum undo_kind kind;
398 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399 union { rtx *r; int *i; struct insn_link **l; } where;
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
408 struct undobuf
410 struct undo *undos;
411 struct undo *frees;
412 rtx_insn *other_insn;
415 static struct undobuf undobuf;
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
420 static int n_occurrences;
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 scalar_int_mode,
424 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 scalar_int_mode,
427 unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN cannot be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
541 rtx_insn *ret;
542 unsigned int nregs;
544 ret = split_insns (pattern, insn);
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
567 switch (code)
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 return 0;
576 case SET:
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x)) != CC0
582 && GET_CODE (SET_DEST (x)) != PC
583 && !REG_P (SET_DEST (x))
584 && ! (GET_CODE (SET_DEST (x)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x)))
586 && !read_modify_subreg_p (SET_DEST (x))))
587 break;
589 return find_single_use_1 (dest, &SET_SRC (x));
591 case MEM:
592 case SUBREG:
593 return find_single_use_1 (dest, &XEXP (x, 0));
595 default:
596 break;
599 /* If it wasn't one of the common cases above, check each expression and
600 vector of this code. Look for a unique usage of DEST. */
602 fmt = GET_RTX_FORMAT (code);
603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605 if (fmt[i] == 'e')
607 if (dest == XEXP (x, i)
608 || (REG_P (dest) && REG_P (XEXP (x, i))
609 && REGNO (dest) == REGNO (XEXP (x, i))))
610 this_result = loc;
611 else
612 this_result = find_single_use_1 (dest, &XEXP (x, i));
614 if (result == NULL)
615 result = this_result;
616 else if (this_result)
617 /* Duplicate usage. */
618 return NULL;
620 else if (fmt[i] == 'E')
622 int j;
624 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 if (XVECEXP (x, i, j) == dest
627 || (REG_P (dest)
628 && REG_P (XVECEXP (x, i, j))
629 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
630 this_result = loc;
631 else
632 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 if (result == NULL)
635 result = this_result;
636 else if (this_result)
637 return NULL;
642 return result;
646 /* See if DEST, produced in INSN, is used only a single time in the
647 sequel. If so, return a pointer to the innermost rtx expression in which
648 it is used.
650 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
653 care about REG_DEAD notes or LOG_LINKS.
655 Otherwise, we find the single use by finding an insn that has a
656 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
657 only referenced once in that insn, we know that it must be the first
658 and last insn referencing DEST. */
660 static rtx *
661 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 basic_block bb;
664 rtx_insn *next;
665 rtx *result;
666 struct insn_link *link;
668 if (dest == cc0_rtx)
670 next = NEXT_INSN (insn);
671 if (next == 0
672 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
673 return 0;
675 result = find_single_use_1 (dest, &PATTERN (next));
676 if (result && ploc)
677 *ploc = next;
678 return result;
681 if (!REG_P (dest))
682 return 0;
684 bb = BLOCK_FOR_INSN (insn);
685 for (next = NEXT_INSN (insn);
686 next && BLOCK_FOR_INSN (next) == bb;
687 next = NEXT_INSN (next))
688 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690 FOR_EACH_LOG_LINK (link, next)
691 if (link->insn == insn && link->regno == REGNO (dest))
692 break;
694 if (link)
696 result = find_single_use_1 (dest, &PATTERN (next));
697 if (ploc)
698 *ploc = next;
699 return result;
703 return 0;
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
710 the undo table. */
712 static void
713 do_SUBST (rtx *into, rtx newval)
715 struct undo *buf;
716 rtx oldval = *into;
718 if (oldval == newval)
719 return;
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
727 && CONST_INT_P (newval))
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval)
732 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval))));
742 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval, 0))));
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
751 buf->kind = UNDO_RTX;
752 buf->where.r = into;
753 buf->old_contents.r = oldval;
754 *into = newval;
756 buf->next = undobuf.undos, undobuf.undos = buf;
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
763 not safe. */
765 static void
766 do_SUBST_INT (int *into, int newval)
768 struct undo *buf;
769 int oldval = *into;
771 if (oldval == newval)
772 return;
774 if (undobuf.frees)
775 buf = undobuf.frees, undobuf.frees = buf->next;
776 else
777 buf = XNEW (struct undo);
779 buf->kind = UNDO_INT;
780 buf->where.i = into;
781 buf->old_contents.i = oldval;
782 *into = newval;
784 buf->next = undobuf.undos, undobuf.undos = buf;
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
792 well. */
794 static void
795 do_SUBST_MODE (rtx *into, machine_mode newval)
797 struct undo *buf;
798 machine_mode oldval = GET_MODE (*into);
800 if (oldval == newval)
801 return;
803 if (undobuf.frees)
804 buf = undobuf.frees, undobuf.frees = buf->next;
805 else
806 buf = XNEW (struct undo);
808 buf->kind = UNDO_MODE;
809 buf->where.r = into;
810 buf->old_contents.m = oldval;
811 adjust_reg_mode (*into, newval);
813 buf->next = undobuf.undos, undobuf.undos = buf;
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
818 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 static void
821 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 struct undo *buf;
824 struct insn_link * oldval = *into;
826 if (oldval == newval)
827 return;
829 if (undobuf.frees)
830 buf = undobuf.frees, undobuf.frees = buf->next;
831 else
832 buf = XNEW (struct undo);
834 buf->kind = UNDO_LINKS;
835 buf->where.l = into;
836 buf->old_contents.l = oldval;
837 *into = newval;
839 buf->next = undobuf.undos, undobuf.undos = buf;
842 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 /* Subroutine of try_combine. Determine whether the replacement patterns
845 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
846 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
847 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
848 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
849 of all the instructions can be estimated and the replacements are more
850 expensive than the original sequence. */
852 static bool
853 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
854 rtx newpat, rtx newi2pat, rtx newotherpat)
856 int i0_cost, i1_cost, i2_cost, i3_cost;
857 int new_i2_cost, new_i3_cost;
858 int old_cost, new_cost;
860 /* Lookup the original insn_costs. */
861 i2_cost = INSN_COST (i2);
862 i3_cost = INSN_COST (i3);
864 if (i1)
866 i1_cost = INSN_COST (i1);
867 if (i0)
869 i0_cost = INSN_COST (i0);
870 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
871 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 else
875 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
876 ? i1_cost + i2_cost + i3_cost : 0);
877 i0_cost = 0;
880 else
882 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
883 i1_cost = i0_cost = 0;
886 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 correct that. */
888 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
889 old_cost -= i1_cost;
892 /* Calculate the replacement insn_costs. */
893 rtx tmp = PATTERN (i3);
894 PATTERN (i3) = newpat;
895 int tmpi = INSN_CODE (i3);
896 INSN_CODE (i3) = -1;
897 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
898 PATTERN (i3) = tmp;
899 INSN_CODE (i3) = tmpi;
900 if (newi2pat)
902 tmp = PATTERN (i2);
903 PATTERN (i2) = newi2pat;
904 tmpi = INSN_CODE (i2);
905 INSN_CODE (i2) = -1;
906 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
907 PATTERN (i2) = tmp;
908 INSN_CODE (i2) = tmpi;
909 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
910 ? new_i2_cost + new_i3_cost : 0;
912 else
914 new_cost = new_i3_cost;
915 new_i2_cost = 0;
918 if (undobuf.other_insn)
920 int old_other_cost, new_other_cost;
922 old_other_cost = INSN_COST (undobuf.other_insn);
923 tmp = PATTERN (undobuf.other_insn);
924 PATTERN (undobuf.other_insn) = newotherpat;
925 tmpi = INSN_CODE (undobuf.other_insn);
926 INSN_CODE (undobuf.other_insn) = -1;
927 new_other_cost = insn_cost (undobuf.other_insn,
928 optimize_this_for_speed_p);
929 PATTERN (undobuf.other_insn) = tmp;
930 INSN_CODE (undobuf.other_insn) = tmpi;
931 if (old_other_cost > 0 && new_other_cost > 0)
933 old_cost += old_other_cost;
934 new_cost += new_other_cost;
936 else
937 old_cost = 0;
940 /* Disallow this combination if both new_cost and old_cost are greater than
941 zero, and new_cost is greater than old cost. */
942 int reject = old_cost > 0 && new_cost > old_cost;
944 if (dump_file)
946 fprintf (dump_file, "%s combination of insns ",
947 reject ? "rejecting" : "allowing");
948 if (i0)
949 fprintf (dump_file, "%d, ", INSN_UID (i0));
950 if (i1 && INSN_UID (i1) != INSN_UID (i2))
951 fprintf (dump_file, "%d, ", INSN_UID (i1));
952 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
954 fprintf (dump_file, "original costs ");
955 if (i0)
956 fprintf (dump_file, "%d + ", i0_cost);
957 if (i1 && INSN_UID (i1) != INSN_UID (i2))
958 fprintf (dump_file, "%d + ", i1_cost);
959 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
961 if (newi2pat)
962 fprintf (dump_file, "replacement costs %d + %d = %d\n",
963 new_i2_cost, new_i3_cost, new_cost);
964 else
965 fprintf (dump_file, "replacement cost %d\n", new_cost);
968 if (reject)
969 return false;
971 /* Update the uid_insn_cost array with the replacement costs. */
972 INSN_COST (i2) = new_i2_cost;
973 INSN_COST (i3) = new_i3_cost;
974 if (i1)
976 INSN_COST (i1) = 0;
977 if (i0)
978 INSN_COST (i0) = 0;
981 return true;
985 /* Delete any insns that copy a register to itself.
986 Return true if the CFG was changed. */
988 static bool
989 delete_noop_moves (void)
991 rtx_insn *insn, *next;
992 basic_block bb;
994 bool edges_deleted = false;
996 FOR_EACH_BB_FN (bb, cfun)
998 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
1000 next = NEXT_INSN (insn);
1001 if (INSN_P (insn) && noop_move_p (insn))
1003 if (dump_file)
1004 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1006 edges_deleted |= delete_insn_and_edges (insn);
1011 return edges_deleted;
1015 /* Return false if we do not want to (or cannot) combine DEF. */
1016 static bool
1017 can_combine_def_p (df_ref def)
1019 /* Do not consider if it is pre/post modification in MEM. */
1020 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1021 return false;
1023 unsigned int regno = DF_REF_REGNO (def);
1025 /* Do not combine frame pointer adjustments. */
1026 if ((regno == FRAME_POINTER_REGNUM
1027 && (!reload_completed || frame_pointer_needed))
1028 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1029 && regno == HARD_FRAME_POINTER_REGNUM
1030 && (!reload_completed || frame_pointer_needed))
1031 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1032 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1033 return false;
1035 return true;
1038 /* Return false if we do not want to (or cannot) combine USE. */
1039 static bool
1040 can_combine_use_p (df_ref use)
1042 /* Do not consider the usage of the stack pointer by function call. */
1043 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1044 return false;
1046 return true;
1049 /* Fill in log links field for all insns. */
1051 static void
1052 create_log_links (void)
1054 basic_block bb;
1055 rtx_insn **next_use;
1056 rtx_insn *insn;
1057 df_ref def, use;
1059 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1061 /* Pass through each block from the end, recording the uses of each
1062 register and establishing log links when def is encountered.
1063 Note that we do not clear next_use array in order to save time,
1064 so we have to test whether the use is in the same basic block as def.
1066 There are a few cases below when we do not consider the definition or
1067 usage -- these are taken from original flow.c did. Don't ask me why it is
1068 done this way; I don't know and if it works, I don't want to know. */
1070 FOR_EACH_BB_FN (bb, cfun)
1072 FOR_BB_INSNS_REVERSE (bb, insn)
1074 if (!NONDEBUG_INSN_P (insn))
1075 continue;
1077 /* Log links are created only once. */
1078 gcc_assert (!LOG_LINKS (insn));
1080 FOR_EACH_INSN_DEF (def, insn)
1082 unsigned int regno = DF_REF_REGNO (def);
1083 rtx_insn *use_insn;
1085 if (!next_use[regno])
1086 continue;
1088 if (!can_combine_def_p (def))
1089 continue;
1091 use_insn = next_use[regno];
1092 next_use[regno] = NULL;
1094 if (BLOCK_FOR_INSN (use_insn) != bb)
1095 continue;
1097 /* flow.c claimed:
1099 We don't build a LOG_LINK for hard registers contained
1100 in ASM_OPERANDs. If these registers get replaced,
1101 we might wind up changing the semantics of the insn,
1102 even if reload can make what appear to be valid
1103 assignments later. */
1104 if (regno < FIRST_PSEUDO_REGISTER
1105 && asm_noperands (PATTERN (use_insn)) >= 0)
1106 continue;
1108 /* Don't add duplicate links between instructions. */
1109 struct insn_link *links;
1110 FOR_EACH_LOG_LINK (links, use_insn)
1111 if (insn == links->insn && regno == links->regno)
1112 break;
1114 if (!links)
1115 LOG_LINKS (use_insn)
1116 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1119 FOR_EACH_INSN_USE (use, insn)
1120 if (can_combine_use_p (use))
1121 next_use[DF_REF_REGNO (use)] = insn;
1125 free (next_use);
1128 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1129 true if we found a LOG_LINK that proves that A feeds B. This only works
1130 if there are no instructions between A and B which could have a link
1131 depending on A, since in that case we would not record a link for B.
1132 We also check the implicit dependency created by a cc0 setter/user
1133 pair. */
1135 static bool
1136 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1138 struct insn_link *links;
1139 FOR_EACH_LOG_LINK (links, b)
1140 if (links->insn == a)
1141 return true;
1142 if (HAVE_cc0 && sets_cc0_p (a))
1143 return true;
1144 return false;
1147 /* Main entry point for combiner. F is the first insn of the function.
1148 NREGS is the first unused pseudo-reg number.
1150 Return nonzero if the CFG was changed (e.g. if the combiner has
1151 turned an indirect jump instruction into a direct jump). */
1152 static int
1153 combine_instructions (rtx_insn *f, unsigned int nregs)
1155 rtx_insn *insn, *next;
1156 rtx_insn *prev;
1157 struct insn_link *links, *nextlinks;
1158 rtx_insn *first;
1159 basic_block last_bb;
1161 int new_direct_jump_p = 0;
1163 for (first = f; first && !NONDEBUG_INSN_P (first); )
1164 first = NEXT_INSN (first);
1165 if (!first)
1166 return 0;
1168 combine_attempts = 0;
1169 combine_merges = 0;
1170 combine_extras = 0;
1171 combine_successes = 0;
1173 rtl_hooks = combine_rtl_hooks;
1175 reg_stat.safe_grow_cleared (nregs);
1177 init_recog_no_volatile ();
1179 /* Allocate array for insn info. */
1180 max_uid_known = get_max_uid ();
1181 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1182 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1183 gcc_obstack_init (&insn_link_obstack);
1185 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1187 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1188 problems when, for example, we have j <<= 1 in a loop. */
1190 nonzero_sign_valid = 0;
1191 label_tick = label_tick_ebb_start = 1;
1193 /* Scan all SETs and see if we can deduce anything about what
1194 bits are known to be zero for some registers and how many copies
1195 of the sign bit are known to exist for those registers.
1197 Also set any known values so that we can use it while searching
1198 for what bits are known to be set. */
1200 setup_incoming_promotions (first);
1201 /* Allow the entry block and the first block to fall into the same EBB.
1202 Conceptually the incoming promotions are assigned to the entry block. */
1203 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1205 create_log_links ();
1206 FOR_EACH_BB_FN (this_basic_block, cfun)
1208 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1209 last_call_luid = 0;
1210 mem_last_set = -1;
1212 label_tick++;
1213 if (!single_pred_p (this_basic_block)
1214 || single_pred (this_basic_block) != last_bb)
1215 label_tick_ebb_start = label_tick;
1216 last_bb = this_basic_block;
1218 FOR_BB_INSNS (this_basic_block, insn)
1219 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1221 rtx links;
1223 subst_low_luid = DF_INSN_LUID (insn);
1224 subst_insn = insn;
1226 note_stores (insn, set_nonzero_bits_and_sign_copies, insn);
1227 record_dead_and_set_regs (insn);
1229 if (AUTO_INC_DEC)
1230 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1231 if (REG_NOTE_KIND (links) == REG_INC)
1232 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1233 insn);
1235 /* Record the current insn_cost of this instruction. */
1236 if (NONJUMP_INSN_P (insn))
1237 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1238 if (dump_file)
1240 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1241 dump_insn_slim (dump_file, insn);
1246 nonzero_sign_valid = 1;
1248 /* Now scan all the insns in forward order. */
1249 label_tick = label_tick_ebb_start = 1;
1250 init_reg_last ();
1251 setup_incoming_promotions (first);
1252 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1253 int max_combine = param_max_combine_insns;
1255 FOR_EACH_BB_FN (this_basic_block, cfun)
1257 rtx_insn *last_combined_insn = NULL;
1259 /* Ignore instruction combination in basic blocks that are going to
1260 be removed as unreachable anyway. See PR82386. */
1261 if (EDGE_COUNT (this_basic_block->preds) == 0)
1262 continue;
1264 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1265 last_call_luid = 0;
1266 mem_last_set = -1;
1268 label_tick++;
1269 if (!single_pred_p (this_basic_block)
1270 || single_pred (this_basic_block) != last_bb)
1271 label_tick_ebb_start = label_tick;
1272 last_bb = this_basic_block;
1274 rtl_profile_for_bb (this_basic_block);
1275 for (insn = BB_HEAD (this_basic_block);
1276 insn != NEXT_INSN (BB_END (this_basic_block));
1277 insn = next ? next : NEXT_INSN (insn))
1279 next = 0;
1280 if (!NONDEBUG_INSN_P (insn))
1281 continue;
1283 while (last_combined_insn
1284 && (!NONDEBUG_INSN_P (last_combined_insn)
1285 || last_combined_insn->deleted ()))
1286 last_combined_insn = PREV_INSN (last_combined_insn);
1287 if (last_combined_insn == NULL_RTX
1288 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1289 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1290 last_combined_insn = insn;
1292 /* See if we know about function return values before this
1293 insn based upon SUBREG flags. */
1294 check_promoted_subreg (insn, PATTERN (insn));
1296 /* See if we can find hardregs and subreg of pseudos in
1297 narrower modes. This could help turning TRUNCATEs
1298 into SUBREGs. */
1299 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1301 /* Try this insn with each insn it links back to. */
1303 FOR_EACH_LOG_LINK (links, insn)
1304 if ((next = try_combine (insn, links->insn, NULL,
1305 NULL, &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1308 statistics_counter_event (cfun, "two-insn combine", 1);
1309 goto retry;
1312 /* Try each sequence of three linked insns ending with this one. */
1314 if (max_combine >= 3)
1315 FOR_EACH_LOG_LINK (links, insn)
1317 rtx_insn *link = links->insn;
1319 /* If the linked insn has been replaced by a note, then there
1320 is no point in pursuing this chain any further. */
1321 if (NOTE_P (link))
1322 continue;
1324 FOR_EACH_LOG_LINK (nextlinks, link)
1325 if ((next = try_combine (insn, link, nextlinks->insn,
1326 NULL, &new_direct_jump_p,
1327 last_combined_insn)) != 0)
1329 statistics_counter_event (cfun, "three-insn combine", 1);
1330 goto retry;
1334 /* Try to combine a jump insn that uses CC0
1335 with a preceding insn that sets CC0, and maybe with its
1336 logical predecessor as well.
1337 This is how we make decrement-and-branch insns.
1338 We need this special code because data flow connections
1339 via CC0 do not get entered in LOG_LINKS. */
1341 if (HAVE_cc0
1342 && JUMP_P (insn)
1343 && (prev = prev_nonnote_insn (insn)) != 0
1344 && NONJUMP_INSN_P (prev)
1345 && sets_cc0_p (PATTERN (prev)))
1347 if ((next = try_combine (insn, prev, NULL, NULL,
1348 &new_direct_jump_p,
1349 last_combined_insn)) != 0)
1350 goto retry;
1352 FOR_EACH_LOG_LINK (nextlinks, prev)
1353 if ((next = try_combine (insn, prev, nextlinks->insn,
1354 NULL, &new_direct_jump_p,
1355 last_combined_insn)) != 0)
1356 goto retry;
1359 /* Do the same for an insn that explicitly references CC0. */
1360 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1361 && (prev = prev_nonnote_insn (insn)) != 0
1362 && NONJUMP_INSN_P (prev)
1363 && sets_cc0_p (PATTERN (prev))
1364 && GET_CODE (PATTERN (insn)) == SET
1365 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1367 if ((next = try_combine (insn, prev, NULL, NULL,
1368 &new_direct_jump_p,
1369 last_combined_insn)) != 0)
1370 goto retry;
1372 FOR_EACH_LOG_LINK (nextlinks, prev)
1373 if ((next = try_combine (insn, prev, nextlinks->insn,
1374 NULL, &new_direct_jump_p,
1375 last_combined_insn)) != 0)
1376 goto retry;
1379 /* Finally, see if any of the insns that this insn links to
1380 explicitly references CC0. If so, try this insn, that insn,
1381 and its predecessor if it sets CC0. */
1382 if (HAVE_cc0)
1384 FOR_EACH_LOG_LINK (links, insn)
1385 if (NONJUMP_INSN_P (links->insn)
1386 && GET_CODE (PATTERN (links->insn)) == SET
1387 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1388 && (prev = prev_nonnote_insn (links->insn)) != 0
1389 && NONJUMP_INSN_P (prev)
1390 && sets_cc0_p (PATTERN (prev))
1391 && (next = try_combine (insn, links->insn,
1392 prev, NULL, &new_direct_jump_p,
1393 last_combined_insn)) != 0)
1394 goto retry;
1397 /* Try combining an insn with two different insns whose results it
1398 uses. */
1399 if (max_combine >= 3)
1400 FOR_EACH_LOG_LINK (links, insn)
1401 for (nextlinks = links->next; nextlinks;
1402 nextlinks = nextlinks->next)
1403 if ((next = try_combine (insn, links->insn,
1404 nextlinks->insn, NULL,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1409 statistics_counter_event (cfun, "three-insn combine", 1);
1410 goto retry;
1413 /* Try four-instruction combinations. */
1414 if (max_combine >= 4)
1415 FOR_EACH_LOG_LINK (links, insn)
1417 struct insn_link *next1;
1418 rtx_insn *link = links->insn;
1420 /* If the linked insn has been replaced by a note, then there
1421 is no point in pursuing this chain any further. */
1422 if (NOTE_P (link))
1423 continue;
1425 FOR_EACH_LOG_LINK (next1, link)
1427 rtx_insn *link1 = next1->insn;
1428 if (NOTE_P (link1))
1429 continue;
1430 /* I0 -> I1 -> I2 -> I3. */
1431 FOR_EACH_LOG_LINK (nextlinks, link1)
1432 if ((next = try_combine (insn, link, link1,
1433 nextlinks->insn,
1434 &new_direct_jump_p,
1435 last_combined_insn)) != 0)
1437 statistics_counter_event (cfun, "four-insn combine", 1);
1438 goto retry;
1440 /* I0, I1 -> I2, I2 -> I3. */
1441 for (nextlinks = next1->next; nextlinks;
1442 nextlinks = nextlinks->next)
1443 if ((next = try_combine (insn, link, link1,
1444 nextlinks->insn,
1445 &new_direct_jump_p,
1446 last_combined_insn)) != 0)
1448 statistics_counter_event (cfun, "four-insn combine", 1);
1449 goto retry;
1453 for (next1 = links->next; next1; next1 = next1->next)
1455 rtx_insn *link1 = next1->insn;
1456 if (NOTE_P (link1))
1457 continue;
1458 /* I0 -> I2; I1, I2 -> I3. */
1459 FOR_EACH_LOG_LINK (nextlinks, link)
1460 if ((next = try_combine (insn, link, link1,
1461 nextlinks->insn,
1462 &new_direct_jump_p,
1463 last_combined_insn)) != 0)
1465 statistics_counter_event (cfun, "four-insn combine", 1);
1466 goto retry;
1468 /* I0 -> I1; I1, I2 -> I3. */
1469 FOR_EACH_LOG_LINK (nextlinks, link1)
1470 if ((next = try_combine (insn, link, link1,
1471 nextlinks->insn,
1472 &new_direct_jump_p,
1473 last_combined_insn)) != 0)
1475 statistics_counter_event (cfun, "four-insn combine", 1);
1476 goto retry;
1481 /* Try this insn with each REG_EQUAL note it links back to. */
1482 FOR_EACH_LOG_LINK (links, insn)
1484 rtx set, note;
1485 rtx_insn *temp = links->insn;
1486 if ((set = single_set (temp)) != 0
1487 && (note = find_reg_equal_equiv_note (temp)) != 0
1488 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1489 /* Avoid using a register that may already been marked
1490 dead by an earlier instruction. */
1491 && ! unmentioned_reg_p (note, SET_SRC (set))
1492 && (GET_MODE (note) == VOIDmode
1493 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1494 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1495 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1496 || (GET_MODE (XEXP (SET_DEST (set), 0))
1497 == GET_MODE (note))))))
1499 /* Temporarily replace the set's source with the
1500 contents of the REG_EQUAL note. The insn will
1501 be deleted or recognized by try_combine. */
1502 rtx orig_src = SET_SRC (set);
1503 rtx orig_dest = SET_DEST (set);
1504 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1505 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1506 SET_SRC (set) = note;
1507 i2mod = temp;
1508 i2mod_old_rhs = copy_rtx (orig_src);
1509 i2mod_new_rhs = copy_rtx (note);
1510 next = try_combine (insn, i2mod, NULL, NULL,
1511 &new_direct_jump_p,
1512 last_combined_insn);
1513 i2mod = NULL;
1514 if (next)
1516 statistics_counter_event (cfun, "insn-with-note combine", 1);
1517 goto retry;
1519 SET_SRC (set) = orig_src;
1520 SET_DEST (set) = orig_dest;
1524 if (!NOTE_P (insn))
1525 record_dead_and_set_regs (insn);
1527 retry:
1532 default_rtl_profile ();
1533 clear_bb_flags ();
1534 new_direct_jump_p |= purge_all_dead_edges ();
1535 new_direct_jump_p |= delete_noop_moves ();
1537 /* Clean up. */
1538 obstack_free (&insn_link_obstack, NULL);
1539 free (uid_log_links);
1540 free (uid_insn_cost);
1541 reg_stat.release ();
1544 struct undo *undo, *next;
1545 for (undo = undobuf.frees; undo; undo = next)
1547 next = undo->next;
1548 free (undo);
1550 undobuf.frees = 0;
1553 total_attempts += combine_attempts;
1554 total_merges += combine_merges;
1555 total_extras += combine_extras;
1556 total_successes += combine_successes;
1558 nonzero_sign_valid = 0;
1559 rtl_hooks = general_rtl_hooks;
1561 /* Make recognizer allow volatile MEMs again. */
1562 init_recog ();
1564 return new_direct_jump_p;
1567 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1569 static void
1570 init_reg_last (void)
1572 unsigned int i;
1573 reg_stat_type *p;
1575 FOR_EACH_VEC_ELT (reg_stat, i, p)
1576 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1579 /* Set up any promoted values for incoming argument registers. */
1581 static void
1582 setup_incoming_promotions (rtx_insn *first)
1584 tree arg;
1585 bool strictly_local = false;
1587 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1588 arg = DECL_CHAIN (arg))
1590 rtx x, reg = DECL_INCOMING_RTL (arg);
1591 int uns1, uns3;
1592 machine_mode mode1, mode2, mode3, mode4;
1594 /* Only continue if the incoming argument is in a register. */
1595 if (!REG_P (reg))
1596 continue;
1598 /* Determine, if possible, whether all call sites of the current
1599 function lie within the current compilation unit. (This does
1600 take into account the exporting of a function via taking its
1601 address, and so forth.) */
1602 strictly_local
1603 = cgraph_node::local_info_node (current_function_decl)->local;
1605 /* The mode and signedness of the argument before any promotions happen
1606 (equal to the mode of the pseudo holding it at that stage). */
1607 mode1 = TYPE_MODE (TREE_TYPE (arg));
1608 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1610 /* The mode and signedness of the argument after any source language and
1611 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1612 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1613 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1615 /* The mode and signedness of the argument as it is actually passed,
1616 see assign_parm_setup_reg in function.c. */
1617 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1618 TREE_TYPE (cfun->decl), 0);
1620 /* The mode of the register in which the argument is being passed. */
1621 mode4 = GET_MODE (reg);
1623 /* Eliminate sign extensions in the callee when:
1624 (a) A mode promotion has occurred; */
1625 if (mode1 == mode3)
1626 continue;
1627 /* (b) The mode of the register is the same as the mode of
1628 the argument as it is passed; */
1629 if (mode3 != mode4)
1630 continue;
1631 /* (c) There's no language level extension; */
1632 if (mode1 == mode2)
1634 /* (c.1) All callers are from the current compilation unit. If that's
1635 the case we don't have to rely on an ABI, we only have to know
1636 what we're generating right now, and we know that we will do the
1637 mode1 to mode2 promotion with the given sign. */
1638 else if (!strictly_local)
1639 continue;
1640 /* (c.2) The combination of the two promotions is useful. This is
1641 true when the signs match, or if the first promotion is unsigned.
1642 In the later case, (sign_extend (zero_extend x)) is the same as
1643 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1644 else if (uns1)
1645 uns3 = true;
1646 else if (uns3)
1647 continue;
1649 /* Record that the value was promoted from mode1 to mode3,
1650 so that any sign extension at the head of the current
1651 function may be eliminated. */
1652 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1653 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1654 record_value_for_reg (reg, first, x);
1658 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1659 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1660 because some machines (maybe most) will actually do the sign-extension and
1661 this is the conservative approach.
1663 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1664 kludge. */
1666 static rtx
1667 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1669 scalar_int_mode int_mode;
1670 if (CONST_INT_P (src)
1671 && is_a <scalar_int_mode> (mode, &int_mode)
1672 && GET_MODE_PRECISION (int_mode) < prec
1673 && INTVAL (src) > 0
1674 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1675 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1677 return src;
1680 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1681 and SET. */
1683 static void
1684 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1685 rtx x)
1687 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1688 unsigned HOST_WIDE_INT bits = 0;
1689 rtx reg_equal = NULL, src = SET_SRC (set);
1690 unsigned int num = 0;
1692 if (reg_equal_note)
1693 reg_equal = XEXP (reg_equal_note, 0);
1695 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1697 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1698 if (reg_equal)
1699 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1702 /* Don't call nonzero_bits if it cannot change anything. */
1703 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1705 machine_mode mode = GET_MODE (x);
1706 if (GET_MODE_CLASS (mode) == MODE_INT
1707 && HWI_COMPUTABLE_MODE_P (mode))
1708 mode = nonzero_bits_mode;
1709 bits = nonzero_bits (src, mode);
1710 if (reg_equal && bits)
1711 bits &= nonzero_bits (reg_equal, mode);
1712 rsp->nonzero_bits |= bits;
1715 /* Don't call num_sign_bit_copies if it cannot change anything. */
1716 if (rsp->sign_bit_copies != 1)
1718 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1719 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1721 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1722 if (num == 0 || numeq > num)
1723 num = numeq;
1725 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1726 rsp->sign_bit_copies = num;
1730 /* Called via note_stores. If X is a pseudo that is narrower than
1731 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1733 If we are setting only a portion of X and we can't figure out what
1734 portion, assume all bits will be used since we don't know what will
1735 be happening.
1737 Similarly, set how many bits of X are known to be copies of the sign bit
1738 at all locations in the function. This is the smallest number implied
1739 by any set of X. */
1741 static void
1742 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1744 rtx_insn *insn = (rtx_insn *) data;
1745 scalar_int_mode mode;
1747 if (REG_P (x)
1748 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1749 /* If this register is undefined at the start of the file, we can't
1750 say what its contents were. */
1751 && ! REGNO_REG_SET_P
1752 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1753 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1754 && HWI_COMPUTABLE_MODE_P (mode))
1756 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1758 if (set == 0 || GET_CODE (set) == CLOBBER)
1760 rsp->nonzero_bits = GET_MODE_MASK (mode);
1761 rsp->sign_bit_copies = 1;
1762 return;
1765 /* If this register is being initialized using itself, and the
1766 register is uninitialized in this basic block, and there are
1767 no LOG_LINKS which set the register, then part of the
1768 register is uninitialized. In that case we can't assume
1769 anything about the number of nonzero bits.
1771 ??? We could do better if we checked this in
1772 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1773 could avoid making assumptions about the insn which initially
1774 sets the register, while still using the information in other
1775 insns. We would have to be careful to check every insn
1776 involved in the combination. */
1778 if (insn
1779 && reg_referenced_p (x, PATTERN (insn))
1780 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1781 REGNO (x)))
1783 struct insn_link *link;
1785 FOR_EACH_LOG_LINK (link, insn)
1786 if (dead_or_set_p (link->insn, x))
1787 break;
1788 if (!link)
1790 rsp->nonzero_bits = GET_MODE_MASK (mode);
1791 rsp->sign_bit_copies = 1;
1792 return;
1796 /* If this is a complex assignment, see if we can convert it into a
1797 simple assignment. */
1798 set = expand_field_assignment (set);
1800 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1801 set what we know about X. */
1803 if (SET_DEST (set) == x
1804 || (paradoxical_subreg_p (SET_DEST (set))
1805 && SUBREG_REG (SET_DEST (set)) == x))
1806 update_rsp_from_reg_equal (rsp, insn, set, x);
1807 else
1809 rsp->nonzero_bits = GET_MODE_MASK (mode);
1810 rsp->sign_bit_copies = 1;
1815 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1816 optionally insns that were previously combined into I3 or that will be
1817 combined into the merger of INSN and I3. The order is PRED, PRED2,
1818 INSN, SUCC, SUCC2, I3.
1820 Return 0 if the combination is not allowed for any reason.
1822 If the combination is allowed, *PDEST will be set to the single
1823 destination of INSN and *PSRC to the single source, and this function
1824 will return 1. */
1826 static int
1827 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1828 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1829 rtx *pdest, rtx *psrc)
1831 int i;
1832 const_rtx set = 0;
1833 rtx src, dest;
1834 rtx_insn *p;
1835 rtx link;
1836 bool all_adjacent = true;
1837 int (*is_volatile_p) (const_rtx);
1839 if (succ)
1841 if (succ2)
1843 if (next_active_insn (succ2) != i3)
1844 all_adjacent = false;
1845 if (next_active_insn (succ) != succ2)
1846 all_adjacent = false;
1848 else if (next_active_insn (succ) != i3)
1849 all_adjacent = false;
1850 if (next_active_insn (insn) != succ)
1851 all_adjacent = false;
1853 else if (next_active_insn (insn) != i3)
1854 all_adjacent = false;
1856 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1857 or a PARALLEL consisting of such a SET and CLOBBERs.
1859 If INSN has CLOBBER parallel parts, ignore them for our processing.
1860 By definition, these happen during the execution of the insn. When it
1861 is merged with another insn, all bets are off. If they are, in fact,
1862 needed and aren't also supplied in I3, they may be added by
1863 recog_for_combine. Otherwise, it won't match.
1865 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1866 note.
1868 Get the source and destination of INSN. If more than one, can't
1869 combine. */
1871 if (GET_CODE (PATTERN (insn)) == SET)
1872 set = PATTERN (insn);
1873 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1874 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1876 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1878 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1880 switch (GET_CODE (elt))
1882 /* This is important to combine floating point insns
1883 for the SH4 port. */
1884 case USE:
1885 /* Combining an isolated USE doesn't make sense.
1886 We depend here on combinable_i3pat to reject them. */
1887 /* The code below this loop only verifies that the inputs of
1888 the SET in INSN do not change. We call reg_set_between_p
1889 to verify that the REG in the USE does not change between
1890 I3 and INSN.
1891 If the USE in INSN was for a pseudo register, the matching
1892 insn pattern will likely match any register; combining this
1893 with any other USE would only be safe if we knew that the
1894 used registers have identical values, or if there was
1895 something to tell them apart, e.g. different modes. For
1896 now, we forgo such complicated tests and simply disallow
1897 combining of USES of pseudo registers with any other USE. */
1898 if (REG_P (XEXP (elt, 0))
1899 && GET_CODE (PATTERN (i3)) == PARALLEL)
1901 rtx i3pat = PATTERN (i3);
1902 int i = XVECLEN (i3pat, 0) - 1;
1903 unsigned int regno = REGNO (XEXP (elt, 0));
1907 rtx i3elt = XVECEXP (i3pat, 0, i);
1909 if (GET_CODE (i3elt) == USE
1910 && REG_P (XEXP (i3elt, 0))
1911 && (REGNO (XEXP (i3elt, 0)) == regno
1912 ? reg_set_between_p (XEXP (elt, 0),
1913 PREV_INSN (insn), i3)
1914 : regno >= FIRST_PSEUDO_REGISTER))
1915 return 0;
1917 while (--i >= 0);
1919 break;
1921 /* We can ignore CLOBBERs. */
1922 case CLOBBER:
1923 break;
1925 case SET:
1926 /* Ignore SETs whose result isn't used but not those that
1927 have side-effects. */
1928 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1929 && insn_nothrow_p (insn)
1930 && !side_effects_p (elt))
1931 break;
1933 /* If we have already found a SET, this is a second one and
1934 so we cannot combine with this insn. */
1935 if (set)
1936 return 0;
1938 set = elt;
1939 break;
1941 default:
1942 /* Anything else means we can't combine. */
1943 return 0;
1947 if (set == 0
1948 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1949 so don't do anything with it. */
1950 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1951 return 0;
1953 else
1954 return 0;
1956 if (set == 0)
1957 return 0;
1959 /* The simplification in expand_field_assignment may call back to
1960 get_last_value, so set safe guard here. */
1961 subst_low_luid = DF_INSN_LUID (insn);
1963 set = expand_field_assignment (set);
1964 src = SET_SRC (set), dest = SET_DEST (set);
1966 /* Do not eliminate user-specified register if it is in an
1967 asm input because we may break the register asm usage defined
1968 in GCC manual if allow to do so.
1969 Be aware that this may cover more cases than we expect but this
1970 should be harmless. */
1971 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1972 && extract_asm_operands (PATTERN (i3)))
1973 return 0;
1975 /* Don't eliminate a store in the stack pointer. */
1976 if (dest == stack_pointer_rtx
1977 /* Don't combine with an insn that sets a register to itself if it has
1978 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1979 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1980 /* Can't merge an ASM_OPERANDS. */
1981 || GET_CODE (src) == ASM_OPERANDS
1982 /* Can't merge a function call. */
1983 || GET_CODE (src) == CALL
1984 /* Don't eliminate a function call argument. */
1985 || (CALL_P (i3)
1986 && (find_reg_fusage (i3, USE, dest)
1987 || (REG_P (dest)
1988 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1989 && global_regs[REGNO (dest)])))
1990 /* Don't substitute into an incremented register. */
1991 || FIND_REG_INC_NOTE (i3, dest)
1992 || (succ && FIND_REG_INC_NOTE (succ, dest))
1993 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1994 /* Don't substitute into a non-local goto, this confuses CFG. */
1995 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1996 /* Make sure that DEST is not used after INSN but before SUCC, or
1997 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1998 || (!all_adjacent
1999 && ((succ2
2000 && (reg_used_between_p (dest, succ2, i3)
2001 || reg_used_between_p (dest, succ, succ2)))
2002 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2003 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2004 || (succ
2005 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2006 that case SUCC is not in the insn stream, so use SUCC2
2007 instead for this test. */
2008 && reg_used_between_p (dest, insn,
2009 succ2
2010 && INSN_UID (succ) == INSN_UID (succ2)
2011 ? succ2 : succ))))
2012 /* Make sure that the value that is to be substituted for the register
2013 does not use any registers whose values alter in between. However,
2014 If the insns are adjacent, a use can't cross a set even though we
2015 think it might (this can happen for a sequence of insns each setting
2016 the same destination; last_set of that register might point to
2017 a NOTE). If INSN has a REG_EQUIV note, the register is always
2018 equivalent to the memory so the substitution is valid even if there
2019 are intervening stores. Also, don't move a volatile asm or
2020 UNSPEC_VOLATILE across any other insns. */
2021 || (! all_adjacent
2022 && (((!MEM_P (src)
2023 || ! find_reg_note (insn, REG_EQUIV, src))
2024 && modified_between_p (src, insn, i3))
2025 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2026 || GET_CODE (src) == UNSPEC_VOLATILE))
2027 /* Don't combine across a CALL_INSN, because that would possibly
2028 change whether the life span of some REGs crosses calls or not,
2029 and it is a pain to update that information.
2030 Exception: if source is a constant, moving it later can't hurt.
2031 Accept that as a special case. */
2032 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2033 return 0;
2035 /* DEST must either be a REG or CC0. */
2036 if (REG_P (dest))
2038 /* If register alignment is being enforced for multi-word items in all
2039 cases except for parameters, it is possible to have a register copy
2040 insn referencing a hard register that is not allowed to contain the
2041 mode being copied and which would not be valid as an operand of most
2042 insns. Eliminate this problem by not combining with such an insn.
2044 Also, on some machines we don't want to extend the life of a hard
2045 register. */
2047 if (REG_P (src)
2048 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2049 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2050 /* Don't extend the life of a hard register unless it is
2051 user variable (if we have few registers) or it can't
2052 fit into the desired register (meaning something special
2053 is going on).
2054 Also avoid substituting a return register into I3, because
2055 reload can't handle a conflict with constraints of other
2056 inputs. */
2057 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2058 && !targetm.hard_regno_mode_ok (REGNO (src),
2059 GET_MODE (src)))))
2060 return 0;
2062 else if (GET_CODE (dest) != CC0)
2063 return 0;
2066 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2067 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2068 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2070 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2072 /* If the clobber represents an earlyclobber operand, we must not
2073 substitute an expression containing the clobbered register.
2074 As we do not analyze the constraint strings here, we have to
2075 make the conservative assumption. However, if the register is
2076 a fixed hard reg, the clobber cannot represent any operand;
2077 we leave it up to the machine description to either accept or
2078 reject use-and-clobber patterns. */
2079 if (!REG_P (reg)
2080 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2081 || !fixed_regs[REGNO (reg)])
2082 if (reg_overlap_mentioned_p (reg, src))
2083 return 0;
2086 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2087 or not), reject, unless nothing volatile comes between it and I3 */
2089 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2091 /* Make sure neither succ nor succ2 contains a volatile reference. */
2092 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2093 return 0;
2094 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2095 return 0;
2096 /* We'll check insns between INSN and I3 below. */
2099 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2100 to be an explicit register variable, and was chosen for a reason. */
2102 if (GET_CODE (src) == ASM_OPERANDS
2103 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2104 return 0;
2106 /* If INSN contains volatile references (specifically volatile MEMs),
2107 we cannot combine across any other volatile references.
2108 Even if INSN doesn't contain volatile references, any intervening
2109 volatile insn might affect machine state. */
2111 is_volatile_p = volatile_refs_p (PATTERN (insn))
2112 ? volatile_refs_p
2113 : volatile_insn_p;
2115 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2116 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2117 return 0;
2119 /* If INSN contains an autoincrement or autodecrement, make sure that
2120 register is not used between there and I3, and not already used in
2121 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2122 Also insist that I3 not be a jump; if it were one
2123 and the incremented register were spilled, we would lose. */
2125 if (AUTO_INC_DEC)
2126 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2127 if (REG_NOTE_KIND (link) == REG_INC
2128 && (JUMP_P (i3)
2129 || reg_used_between_p (XEXP (link, 0), insn, i3)
2130 || (pred != NULL_RTX
2131 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2132 || (pred2 != NULL_RTX
2133 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2134 || (succ != NULL_RTX
2135 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2136 || (succ2 != NULL_RTX
2137 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2138 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2139 return 0;
2141 /* Don't combine an insn that follows a CC0-setting insn.
2142 An insn that uses CC0 must not be separated from the one that sets it.
2143 We do, however, allow I2 to follow a CC0-setting insn if that insn
2144 is passed as I1; in that case it will be deleted also.
2145 We also allow combining in this case if all the insns are adjacent
2146 because that would leave the two CC0 insns adjacent as well.
2147 It would be more logical to test whether CC0 occurs inside I1 or I2,
2148 but that would be much slower, and this ought to be equivalent. */
2150 if (HAVE_cc0)
2152 p = prev_nonnote_insn (insn);
2153 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2154 && ! all_adjacent)
2155 return 0;
2158 /* If we get here, we have passed all the tests and the combination is
2159 to be allowed. */
2161 *pdest = dest;
2162 *psrc = src;
2164 return 1;
2167 /* LOC is the location within I3 that contains its pattern or the component
2168 of a PARALLEL of the pattern. We validate that it is valid for combining.
2170 One problem is if I3 modifies its output, as opposed to replacing it
2171 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2172 doing so would produce an insn that is not equivalent to the original insns.
2174 Consider:
2176 (set (reg:DI 101) (reg:DI 100))
2177 (set (subreg:SI (reg:DI 101) 0) <foo>)
2179 This is NOT equivalent to:
2181 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2182 (set (reg:DI 101) (reg:DI 100))])
2184 Not only does this modify 100 (in which case it might still be valid
2185 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2187 We can also run into a problem if I2 sets a register that I1
2188 uses and I1 gets directly substituted into I3 (not via I2). In that
2189 case, we would be getting the wrong value of I2DEST into I3, so we
2190 must reject the combination. This case occurs when I2 and I1 both
2191 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2192 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2193 of a SET must prevent combination from occurring. The same situation
2194 can occur for I0, in which case I0_NOT_IN_SRC is set.
2196 Before doing the above check, we first try to expand a field assignment
2197 into a set of logical operations.
2199 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2200 we place a register that is both set and used within I3. If more than one
2201 such register is detected, we fail.
2203 Return 1 if the combination is valid, zero otherwise. */
2205 static int
2206 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2207 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2209 rtx x = *loc;
2211 if (GET_CODE (x) == SET)
2213 rtx set = x ;
2214 rtx dest = SET_DEST (set);
2215 rtx src = SET_SRC (set);
2216 rtx inner_dest = dest;
2217 rtx subdest;
2219 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2220 || GET_CODE (inner_dest) == SUBREG
2221 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2222 inner_dest = XEXP (inner_dest, 0);
2224 /* Check for the case where I3 modifies its output, as discussed
2225 above. We don't want to prevent pseudos from being combined
2226 into the address of a MEM, so only prevent the combination if
2227 i1 or i2 set the same MEM. */
2228 if ((inner_dest != dest &&
2229 (!MEM_P (inner_dest)
2230 || rtx_equal_p (i2dest, inner_dest)
2231 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2232 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2233 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2234 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2235 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2237 /* This is the same test done in can_combine_p except we can't test
2238 all_adjacent; we don't have to, since this instruction will stay
2239 in place, thus we are not considering increasing the lifetime of
2240 INNER_DEST.
2242 Also, if this insn sets a function argument, combining it with
2243 something that might need a spill could clobber a previous
2244 function argument; the all_adjacent test in can_combine_p also
2245 checks this; here, we do a more specific test for this case. */
2247 || (REG_P (inner_dest)
2248 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2249 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2250 GET_MODE (inner_dest)))
2251 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2252 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2253 return 0;
2255 /* If DEST is used in I3, it is being killed in this insn, so
2256 record that for later. We have to consider paradoxical
2257 subregs here, since they kill the whole register, but we
2258 ignore partial subregs, STRICT_LOW_PART, etc.
2259 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2260 STACK_POINTER_REGNUM, since these are always considered to be
2261 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2262 subdest = dest;
2263 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2264 subdest = SUBREG_REG (subdest);
2265 if (pi3dest_killed
2266 && REG_P (subdest)
2267 && reg_referenced_p (subdest, PATTERN (i3))
2268 && REGNO (subdest) != FRAME_POINTER_REGNUM
2269 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2270 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2271 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2272 || (REGNO (subdest) != ARG_POINTER_REGNUM
2273 || ! fixed_regs [REGNO (subdest)]))
2274 && REGNO (subdest) != STACK_POINTER_REGNUM)
2276 if (*pi3dest_killed)
2277 return 0;
2279 *pi3dest_killed = subdest;
2283 else if (GET_CODE (x) == PARALLEL)
2285 int i;
2287 for (i = 0; i < XVECLEN (x, 0); i++)
2288 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2289 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2290 return 0;
2293 return 1;
2296 /* Return 1 if X is an arithmetic expression that contains a multiplication
2297 and division. We don't count multiplications by powers of two here. */
2299 static int
2300 contains_muldiv (rtx x)
2302 switch (GET_CODE (x))
2304 case MOD: case DIV: case UMOD: case UDIV:
2305 return 1;
2307 case MULT:
2308 return ! (CONST_INT_P (XEXP (x, 1))
2309 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2310 default:
2311 if (BINARY_P (x))
2312 return contains_muldiv (XEXP (x, 0))
2313 || contains_muldiv (XEXP (x, 1));
2315 if (UNARY_P (x))
2316 return contains_muldiv (XEXP (x, 0));
2318 return 0;
2322 /* Determine whether INSN can be used in a combination. Return nonzero if
2323 not. This is used in try_combine to detect early some cases where we
2324 can't perform combinations. */
2326 static int
2327 cant_combine_insn_p (rtx_insn *insn)
2329 rtx set;
2330 rtx src, dest;
2332 /* If this isn't really an insn, we can't do anything.
2333 This can occur when flow deletes an insn that it has merged into an
2334 auto-increment address. */
2335 if (!NONDEBUG_INSN_P (insn))
2336 return 1;
2338 /* Never combine loads and stores involving hard regs that are likely
2339 to be spilled. The register allocator can usually handle such
2340 reg-reg moves by tying. If we allow the combiner to make
2341 substitutions of likely-spilled regs, reload might die.
2342 As an exception, we allow combinations involving fixed regs; these are
2343 not available to the register allocator so there's no risk involved. */
2345 set = single_set (insn);
2346 if (! set)
2347 return 0;
2348 src = SET_SRC (set);
2349 dest = SET_DEST (set);
2350 if (GET_CODE (src) == SUBREG)
2351 src = SUBREG_REG (src);
2352 if (GET_CODE (dest) == SUBREG)
2353 dest = SUBREG_REG (dest);
2354 if (REG_P (src) && REG_P (dest)
2355 && ((HARD_REGISTER_P (src)
2356 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2357 #ifdef LEAF_REGISTERS
2358 && ! LEAF_REGISTERS [REGNO (src)])
2359 #else
2361 #endif
2362 || (HARD_REGISTER_P (dest)
2363 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2364 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2365 return 1;
2367 return 0;
2370 struct likely_spilled_retval_info
2372 unsigned regno, nregs;
2373 unsigned mask;
2376 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2377 hard registers that are known to be written to / clobbered in full. */
2378 static void
2379 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2381 struct likely_spilled_retval_info *const info =
2382 (struct likely_spilled_retval_info *) data;
2383 unsigned regno, nregs;
2384 unsigned new_mask;
2386 if (!REG_P (XEXP (set, 0)))
2387 return;
2388 regno = REGNO (x);
2389 if (regno >= info->regno + info->nregs)
2390 return;
2391 nregs = REG_NREGS (x);
2392 if (regno + nregs <= info->regno)
2393 return;
2394 new_mask = (2U << (nregs - 1)) - 1;
2395 if (regno < info->regno)
2396 new_mask >>= info->regno - regno;
2397 else
2398 new_mask <<= regno - info->regno;
2399 info->mask &= ~new_mask;
2402 /* Return nonzero iff part of the return value is live during INSN, and
2403 it is likely spilled. This can happen when more than one insn is needed
2404 to copy the return value, e.g. when we consider to combine into the
2405 second copy insn for a complex value. */
2407 static int
2408 likely_spilled_retval_p (rtx_insn *insn)
2410 rtx_insn *use = BB_END (this_basic_block);
2411 rtx reg;
2412 rtx_insn *p;
2413 unsigned regno, nregs;
2414 /* We assume here that no machine mode needs more than
2415 32 hard registers when the value overlaps with a register
2416 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2417 unsigned mask;
2418 struct likely_spilled_retval_info info;
2420 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2421 return 0;
2422 reg = XEXP (PATTERN (use), 0);
2423 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2424 return 0;
2425 regno = REGNO (reg);
2426 nregs = REG_NREGS (reg);
2427 if (nregs == 1)
2428 return 0;
2429 mask = (2U << (nregs - 1)) - 1;
2431 /* Disregard parts of the return value that are set later. */
2432 info.regno = regno;
2433 info.nregs = nregs;
2434 info.mask = mask;
2435 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2436 if (INSN_P (p))
2437 note_stores (p, likely_spilled_retval_1, &info);
2438 mask = info.mask;
2440 /* Check if any of the (probably) live return value registers is
2441 likely spilled. */
2442 nregs --;
2445 if ((mask & 1 << nregs)
2446 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2447 return 1;
2448 } while (nregs--);
2449 return 0;
2452 /* Adjust INSN after we made a change to its destination.
2454 Changing the destination can invalidate notes that say something about
2455 the results of the insn and a LOG_LINK pointing to the insn. */
2457 static void
2458 adjust_for_new_dest (rtx_insn *insn)
2460 /* For notes, be conservative and simply remove them. */
2461 remove_reg_equal_equiv_notes (insn);
2463 /* The new insn will have a destination that was previously the destination
2464 of an insn just above it. Call distribute_links to make a LOG_LINK from
2465 the next use of that destination. */
2467 rtx set = single_set (insn);
2468 gcc_assert (set);
2470 rtx reg = SET_DEST (set);
2472 while (GET_CODE (reg) == ZERO_EXTRACT
2473 || GET_CODE (reg) == STRICT_LOW_PART
2474 || GET_CODE (reg) == SUBREG)
2475 reg = XEXP (reg, 0);
2476 gcc_assert (REG_P (reg));
2478 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2480 df_insn_rescan (insn);
2483 /* Return TRUE if combine can reuse reg X in mode MODE.
2484 ADDED_SETS is nonzero if the original set is still required. */
2485 static bool
2486 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2488 unsigned int regno;
2490 if (!REG_P (x))
2491 return false;
2493 /* Don't change between modes with different underlying register sizes,
2494 since this could lead to invalid subregs. */
2495 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2496 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2497 return false;
2499 regno = REGNO (x);
2500 /* Allow hard registers if the new mode is legal, and occupies no more
2501 registers than the old mode. */
2502 if (regno < FIRST_PSEUDO_REGISTER)
2503 return (targetm.hard_regno_mode_ok (regno, mode)
2504 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2506 /* Or a pseudo that is only used once. */
2507 return (regno < reg_n_sets_max
2508 && REG_N_SETS (regno) == 1
2509 && !added_sets
2510 && !REG_USERVAR_P (x));
2514 /* Check whether X, the destination of a set, refers to part of
2515 the register specified by REG. */
2517 static bool
2518 reg_subword_p (rtx x, rtx reg)
2520 /* Check that reg is an integer mode register. */
2521 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2522 return false;
2524 if (GET_CODE (x) == STRICT_LOW_PART
2525 || GET_CODE (x) == ZERO_EXTRACT)
2526 x = XEXP (x, 0);
2528 return GET_CODE (x) == SUBREG
2529 && SUBREG_REG (x) == reg
2530 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2533 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2534 Note that the INSN should be deleted *after* removing dead edges, so
2535 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2536 but not for a (set (pc) (label_ref FOO)). */
2538 static void
2539 update_cfg_for_uncondjump (rtx_insn *insn)
2541 basic_block bb = BLOCK_FOR_INSN (insn);
2542 gcc_assert (BB_END (bb) == insn);
2544 purge_dead_edges (bb);
2546 delete_insn (insn);
2547 if (EDGE_COUNT (bb->succs) == 1)
2549 rtx_insn *insn;
2551 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2553 /* Remove barriers from the footer if there are any. */
2554 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2555 if (BARRIER_P (insn))
2557 if (PREV_INSN (insn))
2558 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2559 else
2560 BB_FOOTER (bb) = NEXT_INSN (insn);
2561 if (NEXT_INSN (insn))
2562 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2564 else if (LABEL_P (insn))
2565 break;
2569 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2570 by an arbitrary number of CLOBBERs. */
2571 static bool
2572 is_parallel_of_n_reg_sets (rtx pat, int n)
2574 if (GET_CODE (pat) != PARALLEL)
2575 return false;
2577 int len = XVECLEN (pat, 0);
2578 if (len < n)
2579 return false;
2581 int i;
2582 for (i = 0; i < n; i++)
2583 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2584 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2585 return false;
2586 for ( ; i < len; i++)
2587 switch (GET_CODE (XVECEXP (pat, 0, i)))
2589 case CLOBBER:
2590 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2591 return false;
2592 break;
2593 default:
2594 return false;
2596 return true;
2599 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2600 CLOBBERs), can be split into individual SETs in that order, without
2601 changing semantics. */
2602 static bool
2603 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2605 if (!insn_nothrow_p (insn))
2606 return false;
2608 rtx pat = PATTERN (insn);
2610 int i, j;
2611 for (i = 0; i < n; i++)
2613 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2614 return false;
2616 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2618 for (j = i + 1; j < n; j++)
2619 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2620 return false;
2623 return true;
2626 /* Return whether X is just a single set, with the source
2627 a general_operand. */
2628 static bool
2629 is_just_move (rtx x)
2631 if (INSN_P (x))
2632 x = PATTERN (x);
2634 return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2637 /* Callback function to count autoincs. */
2639 static int
2640 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2642 (*((int *) arg))++;
2644 return 0;
2647 /* Try to combine the insns I0, I1 and I2 into I3.
2648 Here I0, I1 and I2 appear earlier than I3.
2649 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2652 If we are combining more than two insns and the resulting insn is not
2653 recognized, try splitting it into two insns. If that happens, I2 and I3
2654 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2655 Otherwise, I0, I1 and I2 are pseudo-deleted.
2657 Return 0 if the combination does not work. Then nothing is changed.
2658 If we did the combination, return the insn at which combine should
2659 resume scanning.
2661 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2662 new direct jump instruction.
2664 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2665 been I3 passed to an earlier try_combine within the same basic
2666 block. */
2668 static rtx_insn *
2669 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2670 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2672 /* New patterns for I3 and I2, respectively. */
2673 rtx newpat, newi2pat = 0;
2674 rtvec newpat_vec_with_clobbers = 0;
2675 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2676 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2677 dead. */
2678 int added_sets_0, added_sets_1, added_sets_2;
2679 /* Total number of SETs to put into I3. */
2680 int total_sets;
2681 /* Nonzero if I2's or I1's body now appears in I3. */
2682 int i2_is_used = 0, i1_is_used = 0;
2683 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2684 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2685 /* Contains I3 if the destination of I3 is used in its source, which means
2686 that the old life of I3 is being killed. If that usage is placed into
2687 I2 and not in I3, a REG_DEAD note must be made. */
2688 rtx i3dest_killed = 0;
2689 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2690 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2691 /* Copy of SET_SRC of I1 and I0, if needed. */
2692 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2693 /* Set if I2DEST was reused as a scratch register. */
2694 bool i2scratch = false;
2695 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2696 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2697 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2698 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2699 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2700 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2701 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2702 /* Notes that must be added to REG_NOTES in I3 and I2. */
2703 rtx new_i3_notes, new_i2_notes;
2704 /* Notes that we substituted I3 into I2 instead of the normal case. */
2705 int i3_subst_into_i2 = 0;
2706 /* Notes that I1, I2 or I3 is a MULT operation. */
2707 int have_mult = 0;
2708 int swap_i2i3 = 0;
2709 int split_i2i3 = 0;
2710 int changed_i3_dest = 0;
2711 bool i2_was_move = false, i3_was_move = false;
2712 int n_auto_inc = 0;
2714 int maxreg;
2715 rtx_insn *temp_insn;
2716 rtx temp_expr;
2717 struct insn_link *link;
2718 rtx other_pat = 0;
2719 rtx new_other_notes;
2720 int i;
2721 scalar_int_mode dest_mode, temp_mode;
2723 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2724 never be). */
2725 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2726 return 0;
2728 /* Only try four-insn combinations when there's high likelihood of
2729 success. Look for simple insns, such as loads of constants or
2730 binary operations involving a constant. */
2731 if (i0)
2733 int i;
2734 int ngood = 0;
2735 int nshift = 0;
2736 rtx set0, set3;
2738 if (!flag_expensive_optimizations)
2739 return 0;
2741 for (i = 0; i < 4; i++)
2743 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2744 rtx set = single_set (insn);
2745 rtx src;
2746 if (!set)
2747 continue;
2748 src = SET_SRC (set);
2749 if (CONSTANT_P (src))
2751 ngood += 2;
2752 break;
2754 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2755 ngood++;
2756 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2757 || GET_CODE (src) == LSHIFTRT)
2758 nshift++;
2761 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2762 are likely manipulating its value. Ideally we'll be able to combine
2763 all four insns into a bitfield insertion of some kind.
2765 Note the source in I0 might be inside a sign/zero extension and the
2766 memory modes in I0 and I3 might be different. So extract the address
2767 from the destination of I3 and search for it in the source of I0.
2769 In the event that there's a match but the source/dest do not actually
2770 refer to the same memory, the worst that happens is we try some
2771 combinations that we wouldn't have otherwise. */
2772 if ((set0 = single_set (i0))
2773 /* Ensure the source of SET0 is a MEM, possibly buried inside
2774 an extension. */
2775 && (GET_CODE (SET_SRC (set0)) == MEM
2776 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2777 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2778 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2779 && (set3 = single_set (i3))
2780 /* Ensure the destination of SET3 is a MEM. */
2781 && GET_CODE (SET_DEST (set3)) == MEM
2782 /* Would it be better to extract the base address for the MEM
2783 in SET3 and look for that? I don't have cases where it matters
2784 but I could envision such cases. */
2785 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2786 ngood += 2;
2788 if (ngood < 2 && nshift < 2)
2789 return 0;
2792 /* Exit early if one of the insns involved can't be used for
2793 combinations. */
2794 if (CALL_P (i2)
2795 || (i1 && CALL_P (i1))
2796 || (i0 && CALL_P (i0))
2797 || cant_combine_insn_p (i3)
2798 || cant_combine_insn_p (i2)
2799 || (i1 && cant_combine_insn_p (i1))
2800 || (i0 && cant_combine_insn_p (i0))
2801 || likely_spilled_retval_p (i3))
2802 return 0;
2804 combine_attempts++;
2805 undobuf.other_insn = 0;
2807 /* Reset the hard register usage information. */
2808 CLEAR_HARD_REG_SET (newpat_used_regs);
2810 if (dump_file && (dump_flags & TDF_DETAILS))
2812 if (i0)
2813 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2814 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2815 else if (i1)
2816 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2817 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2818 else
2819 fprintf (dump_file, "\nTrying %d -> %d:\n",
2820 INSN_UID (i2), INSN_UID (i3));
2822 if (i0)
2823 dump_insn_slim (dump_file, i0);
2824 if (i1)
2825 dump_insn_slim (dump_file, i1);
2826 dump_insn_slim (dump_file, i2);
2827 dump_insn_slim (dump_file, i3);
2830 /* If multiple insns feed into one of I2 or I3, they can be in any
2831 order. To simplify the code below, reorder them in sequence. */
2832 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2833 std::swap (i0, i2);
2834 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2835 std::swap (i0, i1);
2836 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2837 std::swap (i1, i2);
2839 added_links_insn = 0;
2840 added_notes_insn = 0;
2842 /* First check for one important special case that the code below will
2843 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2844 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2845 we may be able to replace that destination with the destination of I3.
2846 This occurs in the common code where we compute both a quotient and
2847 remainder into a structure, in which case we want to do the computation
2848 directly into the structure to avoid register-register copies.
2850 Note that this case handles both multiple sets in I2 and also cases
2851 where I2 has a number of CLOBBERs inside the PARALLEL.
2853 We make very conservative checks below and only try to handle the
2854 most common cases of this. For example, we only handle the case
2855 where I2 and I3 are adjacent to avoid making difficult register
2856 usage tests. */
2858 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2859 && REG_P (SET_SRC (PATTERN (i3)))
2860 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2861 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2862 && GET_CODE (PATTERN (i2)) == PARALLEL
2863 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2864 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2865 below would need to check what is inside (and reg_overlap_mentioned_p
2866 doesn't support those codes anyway). Don't allow those destinations;
2867 the resulting insn isn't likely to be recognized anyway. */
2868 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2869 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2870 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2871 SET_DEST (PATTERN (i3)))
2872 && next_active_insn (i2) == i3)
2874 rtx p2 = PATTERN (i2);
2876 /* Make sure that the destination of I3,
2877 which we are going to substitute into one output of I2,
2878 is not used within another output of I2. We must avoid making this:
2879 (parallel [(set (mem (reg 69)) ...)
2880 (set (reg 69) ...)])
2881 which is not well-defined as to order of actions.
2882 (Besides, reload can't handle output reloads for this.)
2884 The problem can also happen if the dest of I3 is a memory ref,
2885 if another dest in I2 is an indirect memory ref.
2887 Neither can this PARALLEL be an asm. We do not allow combining
2888 that usually (see can_combine_p), so do not here either. */
2889 bool ok = true;
2890 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2892 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2893 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2894 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2895 SET_DEST (XVECEXP (p2, 0, i))))
2896 ok = false;
2897 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2898 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2899 ok = false;
2902 if (ok)
2903 for (i = 0; i < XVECLEN (p2, 0); i++)
2904 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2905 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2907 combine_merges++;
2909 subst_insn = i3;
2910 subst_low_luid = DF_INSN_LUID (i2);
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2src = SET_SRC (XVECEXP (p2, 0, i));
2914 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2915 i2dest_killed = dead_or_set_p (i2, i2dest);
2917 /* Replace the dest in I2 with our dest and make the resulting
2918 insn the new pattern for I3. Then skip to where we validate
2919 the pattern. Everything was set up above. */
2920 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2921 newpat = p2;
2922 i3_subst_into_i2 = 1;
2923 goto validate_replacement;
2927 /* If I2 is setting a pseudo to a constant and I3 is setting some
2928 sub-part of it to another constant, merge them by making a new
2929 constant. */
2930 if (i1 == 0
2931 && (temp_expr = single_set (i2)) != 0
2932 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2933 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2934 && GET_CODE (PATTERN (i3)) == SET
2935 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2936 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2938 rtx dest = SET_DEST (PATTERN (i3));
2939 rtx temp_dest = SET_DEST (temp_expr);
2940 int offset = -1;
2941 int width = 0;
2943 if (GET_CODE (dest) == ZERO_EXTRACT)
2945 if (CONST_INT_P (XEXP (dest, 1))
2946 && CONST_INT_P (XEXP (dest, 2))
2947 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2948 &dest_mode))
2950 width = INTVAL (XEXP (dest, 1));
2951 offset = INTVAL (XEXP (dest, 2));
2952 dest = XEXP (dest, 0);
2953 if (BITS_BIG_ENDIAN)
2954 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2957 else
2959 if (GET_CODE (dest) == STRICT_LOW_PART)
2960 dest = XEXP (dest, 0);
2961 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2963 width = GET_MODE_PRECISION (dest_mode);
2964 offset = 0;
2968 if (offset >= 0)
2970 /* If this is the low part, we're done. */
2971 if (subreg_lowpart_p (dest))
2973 /* Handle the case where inner is twice the size of outer. */
2974 else if (GET_MODE_PRECISION (temp_mode)
2975 == 2 * GET_MODE_PRECISION (dest_mode))
2976 offset += GET_MODE_PRECISION (dest_mode);
2977 /* Otherwise give up for now. */
2978 else
2979 offset = -1;
2982 if (offset >= 0)
2984 rtx inner = SET_SRC (PATTERN (i3));
2985 rtx outer = SET_SRC (temp_expr);
2987 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2988 rtx_mode_t (inner, dest_mode),
2989 offset, width);
2991 combine_merges++;
2992 subst_insn = i3;
2993 subst_low_luid = DF_INSN_LUID (i2);
2994 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2995 i2dest = temp_dest;
2996 i2dest_killed = dead_or_set_p (i2, i2dest);
2998 /* Replace the source in I2 with the new constant and make the
2999 resulting insn the new pattern for I3. Then skip to where we
3000 validate the pattern. Everything was set up above. */
3001 SUBST (SET_SRC (temp_expr),
3002 immed_wide_int_const (o, temp_mode));
3004 newpat = PATTERN (i2);
3006 /* The dest of I3 has been replaced with the dest of I2. */
3007 changed_i3_dest = 1;
3008 goto validate_replacement;
3012 /* If we have no I1 and I2 looks like:
3013 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3014 (set Y OP)])
3015 make up a dummy I1 that is
3016 (set Y OP)
3017 and change I2 to be
3018 (set (reg:CC X) (compare:CC Y (const_int 0)))
3020 (We can ignore any trailing CLOBBERs.)
3022 This undoes a previous combination and allows us to match a branch-and-
3023 decrement insn. */
3025 if (!HAVE_cc0 && i1 == 0
3026 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3027 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3028 == MODE_CC)
3029 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3030 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3031 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3032 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3033 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3034 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3036 /* We make I1 with the same INSN_UID as I2. This gives it
3037 the same DF_INSN_LUID for value tracking. Our fake I1 will
3038 never appear in the insn stream so giving it the same INSN_UID
3039 as I2 will not cause a problem. */
3041 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3042 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3043 -1, NULL_RTX);
3044 INSN_UID (i1) = INSN_UID (i2);
3046 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3047 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3048 SET_DEST (PATTERN (i1)));
3049 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3050 SUBST_LINK (LOG_LINKS (i2),
3051 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3054 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3055 make those two SETs separate I1 and I2 insns, and make an I0 that is
3056 the original I1. */
3057 if (!HAVE_cc0 && i0 == 0
3058 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3059 && can_split_parallel_of_n_reg_sets (i2, 2)
3060 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3061 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3062 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3063 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3065 /* If there is no I1, there is no I0 either. */
3066 i0 = i1;
3068 /* We make I1 with the same INSN_UID as I2. This gives it
3069 the same DF_INSN_LUID for value tracking. Our fake I1 will
3070 never appear in the insn stream so giving it the same INSN_UID
3071 as I2 will not cause a problem. */
3073 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3074 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3075 -1, NULL_RTX);
3076 INSN_UID (i1) = INSN_UID (i2);
3078 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3081 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3082 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3084 if (dump_file && (dump_flags & TDF_DETAILS))
3085 fprintf (dump_file, "Can't combine i2 into i3\n");
3086 undo_all ();
3087 return 0;
3089 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3091 if (dump_file && (dump_flags & TDF_DETAILS))
3092 fprintf (dump_file, "Can't combine i1 into i3\n");
3093 undo_all ();
3094 return 0;
3096 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3098 if (dump_file && (dump_flags & TDF_DETAILS))
3099 fprintf (dump_file, "Can't combine i0 into i3\n");
3100 undo_all ();
3101 return 0;
3104 /* Record whether i2 and i3 are trivial moves. */
3105 i2_was_move = is_just_move (i2);
3106 i3_was_move = is_just_move (i3);
3108 /* Record whether I2DEST is used in I2SRC and similarly for the other
3109 cases. Knowing this will help in register status updating below. */
3110 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3111 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3112 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3113 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3114 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3115 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3116 i2dest_killed = dead_or_set_p (i2, i2dest);
3117 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3118 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3120 /* For the earlier insns, determine which of the subsequent ones they
3121 feed. */
3122 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3123 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3124 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3125 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3126 && reg_overlap_mentioned_p (i0dest, i2src))));
3128 /* Ensure that I3's pattern can be the destination of combines. */
3129 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3130 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3131 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3132 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3133 &i3dest_killed))
3135 undo_all ();
3136 return 0;
3139 /* See if any of the insns is a MULT operation. Unless one is, we will
3140 reject a combination that is, since it must be slower. Be conservative
3141 here. */
3142 if (GET_CODE (i2src) == MULT
3143 || (i1 != 0 && GET_CODE (i1src) == MULT)
3144 || (i0 != 0 && GET_CODE (i0src) == MULT)
3145 || (GET_CODE (PATTERN (i3)) == SET
3146 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3147 have_mult = 1;
3149 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3150 We used to do this EXCEPT in one case: I3 has a post-inc in an
3151 output operand. However, that exception can give rise to insns like
3152 mov r3,(r3)+
3153 which is a famous insn on the PDP-11 where the value of r3 used as the
3154 source was model-dependent. Avoid this sort of thing. */
3156 #if 0
3157 if (!(GET_CODE (PATTERN (i3)) == SET
3158 && REG_P (SET_SRC (PATTERN (i3)))
3159 && MEM_P (SET_DEST (PATTERN (i3)))
3160 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3161 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3162 /* It's not the exception. */
3163 #endif
3164 if (AUTO_INC_DEC)
3166 rtx link;
3167 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3168 if (REG_NOTE_KIND (link) == REG_INC
3169 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3170 || (i1 != 0
3171 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3173 undo_all ();
3174 return 0;
3178 /* See if the SETs in I1 or I2 need to be kept around in the merged
3179 instruction: whenever the value set there is still needed past I3.
3180 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3182 For the SET in I1, we have two cases: if I1 and I2 independently feed
3183 into I3, the set in I1 needs to be kept around unless I1DEST dies
3184 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3185 in I1 needs to be kept around unless I1DEST dies or is set in either
3186 I2 or I3. The same considerations apply to I0. */
3188 added_sets_2 = !dead_or_set_p (i3, i2dest);
3190 if (i1)
3191 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3192 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3193 else
3194 added_sets_1 = 0;
3196 if (i0)
3197 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3198 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3199 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3200 && dead_or_set_p (i2, i0dest)));
3201 else
3202 added_sets_0 = 0;
3204 /* We are about to copy insns for the case where they need to be kept
3205 around. Check that they can be copied in the merged instruction. */
3207 if (targetm.cannot_copy_insn_p
3208 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3209 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3210 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3212 undo_all ();
3213 return 0;
3216 /* Count how many auto_inc expressions there were in the original insns;
3217 we need to have the same number in the resulting patterns. */
3219 if (i0)
3220 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3221 if (i1)
3222 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3223 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3224 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3226 /* If the set in I2 needs to be kept around, we must make a copy of
3227 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3228 PATTERN (I2), we are only substituting for the original I1DEST, not into
3229 an already-substituted copy. This also prevents making self-referential
3230 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3231 I2DEST. */
3233 if (added_sets_2)
3235 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3236 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3237 else
3238 i2pat = copy_rtx (PATTERN (i2));
3241 if (added_sets_1)
3243 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3244 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3245 else
3246 i1pat = copy_rtx (PATTERN (i1));
3249 if (added_sets_0)
3251 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3252 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3253 else
3254 i0pat = copy_rtx (PATTERN (i0));
3257 combine_merges++;
3259 /* Substitute in the latest insn for the regs set by the earlier ones. */
3261 maxreg = max_reg_num ();
3263 subst_insn = i3;
3265 /* Many machines that don't use CC0 have insns that can both perform an
3266 arithmetic operation and set the condition code. These operations will
3267 be represented as a PARALLEL with the first element of the vector
3268 being a COMPARE of an arithmetic operation with the constant zero.
3269 The second element of the vector will set some pseudo to the result
3270 of the same arithmetic operation. If we simplify the COMPARE, we won't
3271 match such a pattern and so will generate an extra insn. Here we test
3272 for this case, where both the comparison and the operation result are
3273 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3274 I2SRC. Later we will make the PARALLEL that contains I2. */
3276 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3277 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3278 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3279 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3281 rtx newpat_dest;
3282 rtx *cc_use_loc = NULL;
3283 rtx_insn *cc_use_insn = NULL;
3284 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3285 machine_mode compare_mode, orig_compare_mode;
3286 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3287 scalar_int_mode mode;
3289 newpat = PATTERN (i3);
3290 newpat_dest = SET_DEST (newpat);
3291 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3293 if (undobuf.other_insn == 0
3294 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3295 &cc_use_insn)))
3297 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3298 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3299 compare_code = simplify_compare_const (compare_code, mode,
3300 op0, &op1);
3301 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3304 /* Do the rest only if op1 is const0_rtx, which may be the
3305 result of simplification. */
3306 if (op1 == const0_rtx)
3308 /* If a single use of the CC is found, prepare to modify it
3309 when SELECT_CC_MODE returns a new CC-class mode, or when
3310 the above simplify_compare_const() returned a new comparison
3311 operator. undobuf.other_insn is assigned the CC use insn
3312 when modifying it. */
3313 if (cc_use_loc)
3315 #ifdef SELECT_CC_MODE
3316 machine_mode new_mode
3317 = SELECT_CC_MODE (compare_code, op0, op1);
3318 if (new_mode != orig_compare_mode
3319 && can_change_dest_mode (SET_DEST (newpat),
3320 added_sets_2, new_mode))
3322 unsigned int regno = REGNO (newpat_dest);
3323 compare_mode = new_mode;
3324 if (regno < FIRST_PSEUDO_REGISTER)
3325 newpat_dest = gen_rtx_REG (compare_mode, regno);
3326 else
3328 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3329 newpat_dest = regno_reg_rtx[regno];
3332 #endif
3333 /* Cases for modifying the CC-using comparison. */
3334 if (compare_code != orig_compare_code
3335 /* ??? Do we need to verify the zero rtx? */
3336 && XEXP (*cc_use_loc, 1) == const0_rtx)
3338 /* Replace cc_use_loc with entire new RTX. */
3339 SUBST (*cc_use_loc,
3340 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3341 newpat_dest, const0_rtx));
3342 undobuf.other_insn = cc_use_insn;
3344 else if (compare_mode != orig_compare_mode)
3346 /* Just replace the CC reg with a new mode. */
3347 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3348 undobuf.other_insn = cc_use_insn;
3352 /* Now we modify the current newpat:
3353 First, SET_DEST(newpat) is updated if the CC mode has been
3354 altered. For targets without SELECT_CC_MODE, this should be
3355 optimized away. */
3356 if (compare_mode != orig_compare_mode)
3357 SUBST (SET_DEST (newpat), newpat_dest);
3358 /* This is always done to propagate i2src into newpat. */
3359 SUBST (SET_SRC (newpat),
3360 gen_rtx_COMPARE (compare_mode, op0, op1));
3361 /* Create new version of i2pat if needed; the below PARALLEL
3362 creation needs this to work correctly. */
3363 if (! rtx_equal_p (i2src, op0))
3364 i2pat = gen_rtx_SET (i2dest, op0);
3365 i2_is_used = 1;
3369 if (i2_is_used == 0)
3371 /* It is possible that the source of I2 or I1 may be performing
3372 an unneeded operation, such as a ZERO_EXTEND of something
3373 that is known to have the high part zero. Handle that case
3374 by letting subst look at the inner insns.
3376 Another way to do this would be to have a function that tries
3377 to simplify a single insn instead of merging two or more
3378 insns. We don't do this because of the potential of infinite
3379 loops and because of the potential extra memory required.
3380 However, doing it the way we are is a bit of a kludge and
3381 doesn't catch all cases.
3383 But only do this if -fexpensive-optimizations since it slows
3384 things down and doesn't usually win.
3386 This is not done in the COMPARE case above because the
3387 unmodified I2PAT is used in the PARALLEL and so a pattern
3388 with a modified I2SRC would not match. */
3390 if (flag_expensive_optimizations)
3392 /* Pass pc_rtx so no substitutions are done, just
3393 simplifications. */
3394 if (i1)
3396 subst_low_luid = DF_INSN_LUID (i1);
3397 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3400 subst_low_luid = DF_INSN_LUID (i2);
3401 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3404 n_occurrences = 0; /* `subst' counts here */
3405 subst_low_luid = DF_INSN_LUID (i2);
3407 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3408 copy of I2SRC each time we substitute it, in order to avoid creating
3409 self-referential RTL when we will be substituting I1SRC for I1DEST
3410 later. Likewise if I0 feeds into I2, either directly or indirectly
3411 through I1, and I0DEST is in I0SRC. */
3412 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3413 (i1_feeds_i2_n && i1dest_in_i1src)
3414 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3415 && i0dest_in_i0src));
3416 substed_i2 = 1;
3418 /* Record whether I2's body now appears within I3's body. */
3419 i2_is_used = n_occurrences;
3422 /* If we already got a failure, don't try to do more. Otherwise, try to
3423 substitute I1 if we have it. */
3425 if (i1 && GET_CODE (newpat) != CLOBBER)
3427 /* Before we can do this substitution, we must redo the test done
3428 above (see detailed comments there) that ensures I1DEST isn't
3429 mentioned in any SETs in NEWPAT that are field assignments. */
3430 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3431 0, 0, 0))
3433 undo_all ();
3434 return 0;
3437 n_occurrences = 0;
3438 subst_low_luid = DF_INSN_LUID (i1);
3440 /* If the following substitution will modify I1SRC, make a copy of it
3441 for the case where it is substituted for I1DEST in I2PAT later. */
3442 if (added_sets_2 && i1_feeds_i2_n)
3443 i1src_copy = copy_rtx (i1src);
3445 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3446 copy of I1SRC each time we substitute it, in order to avoid creating
3447 self-referential RTL when we will be substituting I0SRC for I0DEST
3448 later. */
3449 newpat = subst (newpat, i1dest, i1src, 0, 0,
3450 i0_feeds_i1_n && i0dest_in_i0src);
3451 substed_i1 = 1;
3453 /* Record whether I1's body now appears within I3's body. */
3454 i1_is_used = n_occurrences;
3457 /* Likewise for I0 if we have it. */
3459 if (i0 && GET_CODE (newpat) != CLOBBER)
3461 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3462 0, 0, 0))
3464 undo_all ();
3465 return 0;
3468 /* If the following substitution will modify I0SRC, make a copy of it
3469 for the case where it is substituted for I0DEST in I1PAT later. */
3470 if (added_sets_1 && i0_feeds_i1_n)
3471 i0src_copy = copy_rtx (i0src);
3472 /* And a copy for I0DEST in I2PAT substitution. */
3473 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3474 || (i0_feeds_i2_n)))
3475 i0src_copy2 = copy_rtx (i0src);
3477 n_occurrences = 0;
3478 subst_low_luid = DF_INSN_LUID (i0);
3479 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3480 substed_i0 = 1;
3483 if (n_auto_inc)
3485 int new_n_auto_inc = 0;
3486 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3488 if (n_auto_inc != new_n_auto_inc)
3490 if (dump_file && (dump_flags & TDF_DETAILS))
3491 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3492 undo_all ();
3493 return 0;
3497 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3498 to count all the ways that I2SRC and I1SRC can be used. */
3499 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3500 && i2_is_used + added_sets_2 > 1)
3501 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3502 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3503 > 1))
3504 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3505 && (n_occurrences + added_sets_0
3506 + (added_sets_1 && i0_feeds_i1_n)
3507 + (added_sets_2 && i0_feeds_i2_n)
3508 > 1))
3509 /* Fail if we tried to make a new register. */
3510 || max_reg_num () != maxreg
3511 /* Fail if we couldn't do something and have a CLOBBER. */
3512 || GET_CODE (newpat) == CLOBBER
3513 /* Fail if this new pattern is a MULT and we didn't have one before
3514 at the outer level. */
3515 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3516 && ! have_mult))
3518 undo_all ();
3519 return 0;
3522 /* If the actions of the earlier insns must be kept
3523 in addition to substituting them into the latest one,
3524 we must make a new PARALLEL for the latest insn
3525 to hold additional the SETs. */
3527 if (added_sets_0 || added_sets_1 || added_sets_2)
3529 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3530 combine_extras++;
3532 if (GET_CODE (newpat) == PARALLEL)
3534 rtvec old = XVEC (newpat, 0);
3535 total_sets = XVECLEN (newpat, 0) + extra_sets;
3536 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3537 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3538 sizeof (old->elem[0]) * old->num_elem);
3540 else
3542 rtx old = newpat;
3543 total_sets = 1 + extra_sets;
3544 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 XVECEXP (newpat, 0, 0) = old;
3548 if (added_sets_0)
3549 XVECEXP (newpat, 0, --total_sets) = i0pat;
3551 if (added_sets_1)
3553 rtx t = i1pat;
3554 if (i0_feeds_i1_n)
3555 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3557 XVECEXP (newpat, 0, --total_sets) = t;
3559 if (added_sets_2)
3561 rtx t = i2pat;
3562 if (i1_feeds_i2_n)
3563 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3564 i0_feeds_i1_n && i0dest_in_i0src);
3565 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3566 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3568 XVECEXP (newpat, 0, --total_sets) = t;
3572 validate_replacement:
3574 /* Note which hard regs this insn has as inputs. */
3575 mark_used_regs_combine (newpat);
3577 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3578 consider splitting this pattern, we might need these clobbers. */
3579 if (i1 && GET_CODE (newpat) == PARALLEL
3580 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3582 int len = XVECLEN (newpat, 0);
3584 newpat_vec_with_clobbers = rtvec_alloc (len);
3585 for (i = 0; i < len; i++)
3586 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3589 /* We have recognized nothing yet. */
3590 insn_code_number = -1;
3592 /* See if this is a PARALLEL of two SETs where one SET's destination is
3593 a register that is unused and this isn't marked as an instruction that
3594 might trap in an EH region. In that case, we just need the other SET.
3595 We prefer this over the PARALLEL.
3597 This can occur when simplifying a divmod insn. We *must* test for this
3598 case here because the code below that splits two independent SETs doesn't
3599 handle this case correctly when it updates the register status.
3601 It's pointless doing this if we originally had two sets, one from
3602 i3, and one from i2. Combining then splitting the parallel results
3603 in the original i2 again plus an invalid insn (which we delete).
3604 The net effect is only to move instructions around, which makes
3605 debug info less accurate.
3607 If the remaining SET came from I2 its destination should not be used
3608 between I2 and I3. See PR82024. */
3610 if (!(added_sets_2 && i1 == 0)
3611 && is_parallel_of_n_reg_sets (newpat, 2)
3612 && asm_noperands (newpat) < 0)
3614 rtx set0 = XVECEXP (newpat, 0, 0);
3615 rtx set1 = XVECEXP (newpat, 0, 1);
3616 rtx oldpat = newpat;
3618 if (((REG_P (SET_DEST (set1))
3619 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3620 || (GET_CODE (SET_DEST (set1)) == SUBREG
3621 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3622 && insn_nothrow_p (i3)
3623 && !side_effects_p (SET_SRC (set1)))
3625 newpat = set0;
3626 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3629 else if (((REG_P (SET_DEST (set0))
3630 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3631 || (GET_CODE (SET_DEST (set0)) == SUBREG
3632 && find_reg_note (i3, REG_UNUSED,
3633 SUBREG_REG (SET_DEST (set0)))))
3634 && insn_nothrow_p (i3)
3635 && !side_effects_p (SET_SRC (set0)))
3637 rtx dest = SET_DEST (set1);
3638 if (GET_CODE (dest) == SUBREG)
3639 dest = SUBREG_REG (dest);
3640 if (!reg_used_between_p (dest, i2, i3))
3642 newpat = set1;
3643 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3645 if (insn_code_number >= 0)
3646 changed_i3_dest = 1;
3650 if (insn_code_number < 0)
3651 newpat = oldpat;
3654 /* Is the result of combination a valid instruction? */
3655 if (insn_code_number < 0)
3656 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3658 /* If we were combining three insns and the result is a simple SET
3659 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3660 insns. There are two ways to do this. It can be split using a
3661 machine-specific method (like when you have an addition of a large
3662 constant) or by combine in the function find_split_point. */
3664 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3665 && asm_noperands (newpat) < 0)
3667 rtx parallel, *split;
3668 rtx_insn *m_split_insn;
3670 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3671 use I2DEST as a scratch register will help. In the latter case,
3672 convert I2DEST to the mode of the source of NEWPAT if we can. */
3674 m_split_insn = combine_split_insns (newpat, i3);
3676 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3677 inputs of NEWPAT. */
3679 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3680 possible to try that as a scratch reg. This would require adding
3681 more code to make it work though. */
3683 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3685 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3687 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3688 (temporarily, until we are committed to this instruction
3689 combination) does not work: for example, any call to nonzero_bits
3690 on the register (from a splitter in the MD file, for example)
3691 will get the old information, which is invalid.
3693 Since nowadays we can create registers during combine just fine,
3694 we should just create a new one here, not reuse i2dest. */
3696 /* First try to split using the original register as a
3697 scratch register. */
3698 parallel = gen_rtx_PARALLEL (VOIDmode,
3699 gen_rtvec (2, newpat,
3700 gen_rtx_CLOBBER (VOIDmode,
3701 i2dest)));
3702 m_split_insn = combine_split_insns (parallel, i3);
3704 /* If that didn't work, try changing the mode of I2DEST if
3705 we can. */
3706 if (m_split_insn == 0
3707 && new_mode != GET_MODE (i2dest)
3708 && new_mode != VOIDmode
3709 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3711 machine_mode old_mode = GET_MODE (i2dest);
3712 rtx ni2dest;
3714 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3715 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3716 else
3718 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3719 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3722 parallel = (gen_rtx_PARALLEL
3723 (VOIDmode,
3724 gen_rtvec (2, newpat,
3725 gen_rtx_CLOBBER (VOIDmode,
3726 ni2dest))));
3727 m_split_insn = combine_split_insns (parallel, i3);
3729 if (m_split_insn == 0
3730 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3732 struct undo *buf;
3734 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3735 buf = undobuf.undos;
3736 undobuf.undos = buf->next;
3737 buf->next = undobuf.frees;
3738 undobuf.frees = buf;
3742 i2scratch = m_split_insn != 0;
3745 /* If recog_for_combine has discarded clobbers, try to use them
3746 again for the split. */
3747 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3749 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3750 m_split_insn = combine_split_insns (parallel, i3);
3753 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3755 rtx m_split_pat = PATTERN (m_split_insn);
3756 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3757 if (insn_code_number >= 0)
3758 newpat = m_split_pat;
3760 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3761 && (next_nonnote_nondebug_insn (i2) == i3
3762 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3764 rtx i2set, i3set;
3765 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3766 newi2pat = PATTERN (m_split_insn);
3768 i3set = single_set (NEXT_INSN (m_split_insn));
3769 i2set = single_set (m_split_insn);
3771 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3773 /* If I2 or I3 has multiple SETs, we won't know how to track
3774 register status, so don't use these insns. If I2's destination
3775 is used between I2 and I3, we also can't use these insns. */
3777 if (i2_code_number >= 0 && i2set && i3set
3778 && (next_nonnote_nondebug_insn (i2) == i3
3779 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3780 insn_code_number = recog_for_combine (&newi3pat, i3,
3781 &new_i3_notes);
3782 if (insn_code_number >= 0)
3783 newpat = newi3pat;
3785 /* It is possible that both insns now set the destination of I3.
3786 If so, we must show an extra use of it. */
3788 if (insn_code_number >= 0)
3790 rtx new_i3_dest = SET_DEST (i3set);
3791 rtx new_i2_dest = SET_DEST (i2set);
3793 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3794 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3795 || GET_CODE (new_i3_dest) == SUBREG)
3796 new_i3_dest = XEXP (new_i3_dest, 0);
3798 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3799 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3800 || GET_CODE (new_i2_dest) == SUBREG)
3801 new_i2_dest = XEXP (new_i2_dest, 0);
3803 if (REG_P (new_i3_dest)
3804 && REG_P (new_i2_dest)
3805 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3806 && REGNO (new_i2_dest) < reg_n_sets_max)
3807 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3811 /* If we can split it and use I2DEST, go ahead and see if that
3812 helps things be recognized. Verify that none of the registers
3813 are set between I2 and I3. */
3814 if (insn_code_number < 0
3815 && (split = find_split_point (&newpat, i3, false)) != 0
3816 && (!HAVE_cc0 || REG_P (i2dest))
3817 /* We need I2DEST in the proper mode. If it is a hard register
3818 or the only use of a pseudo, we can change its mode.
3819 Make sure we don't change a hard register to have a mode that
3820 isn't valid for it, or change the number of registers. */
3821 && (GET_MODE (*split) == GET_MODE (i2dest)
3822 || GET_MODE (*split) == VOIDmode
3823 || can_change_dest_mode (i2dest, added_sets_2,
3824 GET_MODE (*split)))
3825 && (next_nonnote_nondebug_insn (i2) == i3
3826 || !modified_between_p (*split, i2, i3))
3827 /* We can't overwrite I2DEST if its value is still used by
3828 NEWPAT. */
3829 && ! reg_referenced_p (i2dest, newpat))
3831 rtx newdest = i2dest;
3832 enum rtx_code split_code = GET_CODE (*split);
3833 machine_mode split_mode = GET_MODE (*split);
3834 bool subst_done = false;
3835 newi2pat = NULL_RTX;
3837 i2scratch = true;
3839 /* *SPLIT may be part of I2SRC, so make sure we have the
3840 original expression around for later debug processing.
3841 We should not need I2SRC any more in other cases. */
3842 if (MAY_HAVE_DEBUG_BIND_INSNS)
3843 i2src = copy_rtx (i2src);
3844 else
3845 i2src = NULL;
3847 /* Get NEWDEST as a register in the proper mode. We have already
3848 validated that we can do this. */
3849 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3851 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3852 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3853 else
3855 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3856 newdest = regno_reg_rtx[REGNO (i2dest)];
3860 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3861 an ASHIFT. This can occur if it was inside a PLUS and hence
3862 appeared to be a memory address. This is a kludge. */
3863 if (split_code == MULT
3864 && CONST_INT_P (XEXP (*split, 1))
3865 && INTVAL (XEXP (*split, 1)) > 0
3866 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3868 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3869 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3870 XEXP (*split, 0), i_rtx));
3871 /* Update split_code because we may not have a multiply
3872 anymore. */
3873 split_code = GET_CODE (*split);
3876 /* Similarly for (plus (mult FOO (const_int pow2))). */
3877 if (split_code == PLUS
3878 && GET_CODE (XEXP (*split, 0)) == MULT
3879 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3880 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3881 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3883 rtx nsplit = XEXP (*split, 0);
3884 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3885 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3886 XEXP (nsplit, 0),
3887 i_rtx));
3888 /* Update split_code because we may not have a multiply
3889 anymore. */
3890 split_code = GET_CODE (*split);
3893 #ifdef INSN_SCHEDULING
3894 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3895 be written as a ZERO_EXTEND. */
3896 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3898 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3899 what it really is. */
3900 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3901 == SIGN_EXTEND)
3902 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3903 SUBREG_REG (*split)));
3904 else
3905 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3906 SUBREG_REG (*split)));
3908 #endif
3910 /* Attempt to split binary operators using arithmetic identities. */
3911 if (BINARY_P (SET_SRC (newpat))
3912 && split_mode == GET_MODE (SET_SRC (newpat))
3913 && ! side_effects_p (SET_SRC (newpat)))
3915 rtx setsrc = SET_SRC (newpat);
3916 machine_mode mode = GET_MODE (setsrc);
3917 enum rtx_code code = GET_CODE (setsrc);
3918 rtx src_op0 = XEXP (setsrc, 0);
3919 rtx src_op1 = XEXP (setsrc, 1);
3921 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3922 if (rtx_equal_p (src_op0, src_op1))
3924 newi2pat = gen_rtx_SET (newdest, src_op0);
3925 SUBST (XEXP (setsrc, 0), newdest);
3926 SUBST (XEXP (setsrc, 1), newdest);
3927 subst_done = true;
3929 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3930 else if ((code == PLUS || code == MULT)
3931 && GET_CODE (src_op0) == code
3932 && GET_CODE (XEXP (src_op0, 0)) == code
3933 && (INTEGRAL_MODE_P (mode)
3934 || (FLOAT_MODE_P (mode)
3935 && flag_unsafe_math_optimizations)))
3937 rtx p = XEXP (XEXP (src_op0, 0), 0);
3938 rtx q = XEXP (XEXP (src_op0, 0), 1);
3939 rtx r = XEXP (src_op0, 1);
3940 rtx s = src_op1;
3942 /* Split both "((X op Y) op X) op Y" and
3943 "((X op Y) op Y) op X" as "T op T" where T is
3944 "X op Y". */
3945 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3946 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3948 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3949 SUBST (XEXP (setsrc, 0), newdest);
3950 SUBST (XEXP (setsrc, 1), newdest);
3951 subst_done = true;
3953 /* Split "((X op X) op Y) op Y)" as "T op T" where
3954 T is "X op Y". */
3955 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3957 rtx tmp = simplify_gen_binary (code, mode, p, r);
3958 newi2pat = gen_rtx_SET (newdest, tmp);
3959 SUBST (XEXP (setsrc, 0), newdest);
3960 SUBST (XEXP (setsrc, 1), newdest);
3961 subst_done = true;
3966 if (!subst_done)
3968 newi2pat = gen_rtx_SET (newdest, *split);
3969 SUBST (*split, newdest);
3972 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3974 /* recog_for_combine might have added CLOBBERs to newi2pat.
3975 Make sure NEWPAT does not depend on the clobbered regs. */
3976 if (GET_CODE (newi2pat) == PARALLEL)
3977 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3978 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3980 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3981 if (reg_overlap_mentioned_p (reg, newpat))
3983 undo_all ();
3984 return 0;
3988 /* If the split point was a MULT and we didn't have one before,
3989 don't use one now. */
3990 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3991 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3995 /* Check for a case where we loaded from memory in a narrow mode and
3996 then sign extended it, but we need both registers. In that case,
3997 we have a PARALLEL with both loads from the same memory location.
3998 We can split this into a load from memory followed by a register-register
3999 copy. This saves at least one insn, more if register allocation can
4000 eliminate the copy.
4002 We cannot do this if the destination of the first assignment is a
4003 condition code register or cc0. We eliminate this case by making sure
4004 the SET_DEST and SET_SRC have the same mode.
4006 We cannot do this if the destination of the second assignment is
4007 a register that we have already assumed is zero-extended. Similarly
4008 for a SUBREG of such a register. */
4010 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4011 && GET_CODE (newpat) == PARALLEL
4012 && XVECLEN (newpat, 0) == 2
4013 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4014 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4015 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4016 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4017 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4018 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4019 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4020 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4021 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4022 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4023 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4024 (REG_P (temp_expr)
4025 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4026 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4027 BITS_PER_WORD)
4028 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4029 HOST_BITS_PER_INT)
4030 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4031 != GET_MODE_MASK (word_mode))))
4032 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4033 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4034 (REG_P (temp_expr)
4035 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4036 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 BITS_PER_WORD)
4038 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4039 HOST_BITS_PER_INT)
4040 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4041 != GET_MODE_MASK (word_mode)))))
4042 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4043 SET_SRC (XVECEXP (newpat, 0, 1)))
4044 && ! find_reg_note (i3, REG_UNUSED,
4045 SET_DEST (XVECEXP (newpat, 0, 0))))
4047 rtx ni2dest;
4049 newi2pat = XVECEXP (newpat, 0, 0);
4050 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4051 newpat = XVECEXP (newpat, 0, 1);
4052 SUBST (SET_SRC (newpat),
4053 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4054 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4056 if (i2_code_number >= 0)
4057 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4059 if (insn_code_number >= 0)
4060 swap_i2i3 = 1;
4063 /* Similarly, check for a case where we have a PARALLEL of two independent
4064 SETs but we started with three insns. In this case, we can do the sets
4065 as two separate insns. This case occurs when some SET allows two
4066 other insns to combine, but the destination of that SET is still live.
4068 Also do this if we started with two insns and (at least) one of the
4069 resulting sets is a noop; this noop will be deleted later.
4071 Also do this if we started with two insns neither of which was a simple
4072 move. */
4074 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4075 && GET_CODE (newpat) == PARALLEL
4076 && XVECLEN (newpat, 0) == 2
4077 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4078 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4079 && (i1
4080 || set_noop_p (XVECEXP (newpat, 0, 0))
4081 || set_noop_p (XVECEXP (newpat, 0, 1))
4082 || (!i2_was_move && !i3_was_move))
4083 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4084 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4085 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4086 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4087 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4088 XVECEXP (newpat, 0, 0))
4089 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4090 XVECEXP (newpat, 0, 1))
4091 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4092 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4094 rtx set0 = XVECEXP (newpat, 0, 0);
4095 rtx set1 = XVECEXP (newpat, 0, 1);
4097 /* Normally, it doesn't matter which of the two is done first,
4098 but the one that references cc0 can't be the second, and
4099 one which uses any regs/memory set in between i2 and i3 can't
4100 be first. The PARALLEL might also have been pre-existing in i3,
4101 so we need to make sure that we won't wrongly hoist a SET to i2
4102 that would conflict with a death note present in there, or would
4103 have its dest modified between i2 and i3. */
4104 if (!modified_between_p (SET_SRC (set1), i2, i3)
4105 && !(REG_P (SET_DEST (set1))
4106 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4107 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4108 && find_reg_note (i2, REG_DEAD,
4109 SUBREG_REG (SET_DEST (set1))))
4110 && !modified_between_p (SET_DEST (set1), i2, i3)
4111 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4112 /* If I3 is a jump, ensure that set0 is a jump so that
4113 we do not create invalid RTL. */
4114 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4117 newi2pat = set1;
4118 newpat = set0;
4120 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4121 && !(REG_P (SET_DEST (set0))
4122 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4123 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4124 && find_reg_note (i2, REG_DEAD,
4125 SUBREG_REG (SET_DEST (set0))))
4126 && !modified_between_p (SET_DEST (set0), i2, i3)
4127 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4128 /* If I3 is a jump, ensure that set1 is a jump so that
4129 we do not create invalid RTL. */
4130 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4133 newi2pat = set0;
4134 newpat = set1;
4136 else
4138 undo_all ();
4139 return 0;
4142 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4144 if (i2_code_number >= 0)
4146 /* recog_for_combine might have added CLOBBERs to newi2pat.
4147 Make sure NEWPAT does not depend on the clobbered regs. */
4148 if (GET_CODE (newi2pat) == PARALLEL)
4150 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4151 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4153 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4154 if (reg_overlap_mentioned_p (reg, newpat))
4156 undo_all ();
4157 return 0;
4162 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4164 if (insn_code_number >= 0)
4165 split_i2i3 = 1;
4169 /* If it still isn't recognized, fail and change things back the way they
4170 were. */
4171 if ((insn_code_number < 0
4172 /* Is the result a reasonable ASM_OPERANDS? */
4173 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4175 undo_all ();
4176 return 0;
4179 /* If we had to change another insn, make sure it is valid also. */
4180 if (undobuf.other_insn)
4182 CLEAR_HARD_REG_SET (newpat_used_regs);
4184 other_pat = PATTERN (undobuf.other_insn);
4185 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4186 &new_other_notes);
4188 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4190 undo_all ();
4191 return 0;
4195 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4196 they are adjacent to each other or not. */
4197 if (HAVE_cc0)
4199 rtx_insn *p = prev_nonnote_insn (i3);
4200 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4201 && sets_cc0_p (newi2pat))
4203 undo_all ();
4204 return 0;
4208 /* Only allow this combination if insn_cost reports that the
4209 replacement instructions are cheaper than the originals. */
4210 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4212 undo_all ();
4213 return 0;
4216 if (MAY_HAVE_DEBUG_BIND_INSNS)
4218 struct undo *undo;
4220 for (undo = undobuf.undos; undo; undo = undo->next)
4221 if (undo->kind == UNDO_MODE)
4223 rtx reg = *undo->where.r;
4224 machine_mode new_mode = GET_MODE (reg);
4225 machine_mode old_mode = undo->old_contents.m;
4227 /* Temporarily revert mode back. */
4228 adjust_reg_mode (reg, old_mode);
4230 if (reg == i2dest && i2scratch)
4232 /* If we used i2dest as a scratch register with a
4233 different mode, substitute it for the original
4234 i2src while its original mode is temporarily
4235 restored, and then clear i2scratch so that we don't
4236 do it again later. */
4237 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4238 this_basic_block);
4239 i2scratch = false;
4240 /* Put back the new mode. */
4241 adjust_reg_mode (reg, new_mode);
4243 else
4245 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4246 rtx_insn *first, *last;
4248 if (reg == i2dest)
4250 first = i2;
4251 last = last_combined_insn;
4253 else
4255 first = i3;
4256 last = undobuf.other_insn;
4257 gcc_assert (last);
4258 if (DF_INSN_LUID (last)
4259 < DF_INSN_LUID (last_combined_insn))
4260 last = last_combined_insn;
4263 /* We're dealing with a reg that changed mode but not
4264 meaning, so we want to turn it into a subreg for
4265 the new mode. However, because of REG sharing and
4266 because its mode had already changed, we have to do
4267 it in two steps. First, replace any debug uses of
4268 reg, with its original mode temporarily restored,
4269 with this copy we have created; then, replace the
4270 copy with the SUBREG of the original shared reg,
4271 once again changed to the new mode. */
4272 propagate_for_debug (first, last, reg, tempreg,
4273 this_basic_block);
4274 adjust_reg_mode (reg, new_mode);
4275 propagate_for_debug (first, last, tempreg,
4276 lowpart_subreg (old_mode, reg, new_mode),
4277 this_basic_block);
4282 /* If we will be able to accept this, we have made a
4283 change to the destination of I3. This requires us to
4284 do a few adjustments. */
4286 if (changed_i3_dest)
4288 PATTERN (i3) = newpat;
4289 adjust_for_new_dest (i3);
4292 /* We now know that we can do this combination. Merge the insns and
4293 update the status of registers and LOG_LINKS. */
4295 if (undobuf.other_insn)
4297 rtx note, next;
4299 PATTERN (undobuf.other_insn) = other_pat;
4301 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4302 ensure that they are still valid. Then add any non-duplicate
4303 notes added by recog_for_combine. */
4304 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4306 next = XEXP (note, 1);
4308 if ((REG_NOTE_KIND (note) == REG_DEAD
4309 && !reg_referenced_p (XEXP (note, 0),
4310 PATTERN (undobuf.other_insn)))
4311 ||(REG_NOTE_KIND (note) == REG_UNUSED
4312 && !reg_set_p (XEXP (note, 0),
4313 PATTERN (undobuf.other_insn)))
4314 /* Simply drop equal note since it may be no longer valid
4315 for other_insn. It may be possible to record that CC
4316 register is changed and only discard those notes, but
4317 in practice it's unnecessary complication and doesn't
4318 give any meaningful improvement.
4320 See PR78559. */
4321 || REG_NOTE_KIND (note) == REG_EQUAL
4322 || REG_NOTE_KIND (note) == REG_EQUIV)
4323 remove_note (undobuf.other_insn, note);
4326 distribute_notes (new_other_notes, undobuf.other_insn,
4327 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4328 NULL_RTX);
4331 if (swap_i2i3)
4333 /* I3 now uses what used to be its destination and which is now
4334 I2's destination. This requires us to do a few adjustments. */
4335 PATTERN (i3) = newpat;
4336 adjust_for_new_dest (i3);
4339 if (swap_i2i3 || split_i2i3)
4341 /* We might need a LOG_LINK from I3 to I2. But then we used to
4342 have one, so we still will.
4344 However, some later insn might be using I2's dest and have
4345 a LOG_LINK pointing at I3. We should change it to point at
4346 I2 instead. */
4348 /* newi2pat is usually a SET here; however, recog_for_combine might
4349 have added some clobbers. */
4350 rtx x = newi2pat;
4351 if (GET_CODE (x) == PARALLEL)
4352 x = XVECEXP (newi2pat, 0, 0);
4354 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4355 unsigned int regno = reg_or_subregno (SET_DEST (x));
4357 bool done = false;
4358 for (rtx_insn *insn = NEXT_INSN (i3);
4359 !done
4360 && insn
4361 && NONDEBUG_INSN_P (insn)
4362 && BLOCK_FOR_INSN (insn) == this_basic_block;
4363 insn = NEXT_INSN (insn))
4365 struct insn_link *link;
4366 FOR_EACH_LOG_LINK (link, insn)
4367 if (link->insn == i3 && link->regno == regno)
4369 link->insn = i2;
4370 done = true;
4371 break;
4377 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4378 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4379 rtx midnotes = 0;
4380 int from_luid;
4381 /* Compute which registers we expect to eliminate. newi2pat may be setting
4382 either i3dest or i2dest, so we must check it. */
4383 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4384 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4385 || !i2dest_killed
4386 ? 0 : i2dest);
4387 /* For i1, we need to compute both local elimination and global
4388 elimination information with respect to newi2pat because i1dest
4389 may be the same as i3dest, in which case newi2pat may be setting
4390 i1dest. Global information is used when distributing REG_DEAD
4391 note for i2 and i3, in which case it does matter if newi2pat sets
4392 i1dest or not.
4394 Local information is used when distributing REG_DEAD note for i1,
4395 in which case it doesn't matter if newi2pat sets i1dest or not.
4396 See PR62151, if we have four insns combination:
4397 i0: r0 <- i0src
4398 i1: r1 <- i1src (using r0)
4399 REG_DEAD (r0)
4400 i2: r0 <- i2src (using r1)
4401 i3: r3 <- i3src (using r0)
4402 ix: using r0
4403 From i1's point of view, r0 is eliminated, no matter if it is set
4404 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4405 should be discarded.
4407 Note local information only affects cases in forms like "I1->I2->I3",
4408 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4409 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4410 i0dest anyway. */
4411 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4412 || !i1dest_killed
4413 ? 0 : i1dest);
4414 rtx elim_i1 = (local_elim_i1 == 0
4415 || (newi2pat && reg_set_p (i1dest, newi2pat))
4416 ? 0 : i1dest);
4417 /* Same case as i1. */
4418 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4419 ? 0 : i0dest);
4420 rtx elim_i0 = (local_elim_i0 == 0
4421 || (newi2pat && reg_set_p (i0dest, newi2pat))
4422 ? 0 : i0dest);
4424 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4425 clear them. */
4426 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4427 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4428 if (i1)
4429 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4430 if (i0)
4431 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4433 /* Ensure that we do not have something that should not be shared but
4434 occurs multiple times in the new insns. Check this by first
4435 resetting all the `used' flags and then copying anything is shared. */
4437 reset_used_flags (i3notes);
4438 reset_used_flags (i2notes);
4439 reset_used_flags (i1notes);
4440 reset_used_flags (i0notes);
4441 reset_used_flags (newpat);
4442 reset_used_flags (newi2pat);
4443 if (undobuf.other_insn)
4444 reset_used_flags (PATTERN (undobuf.other_insn));
4446 i3notes = copy_rtx_if_shared (i3notes);
4447 i2notes = copy_rtx_if_shared (i2notes);
4448 i1notes = copy_rtx_if_shared (i1notes);
4449 i0notes = copy_rtx_if_shared (i0notes);
4450 newpat = copy_rtx_if_shared (newpat);
4451 newi2pat = copy_rtx_if_shared (newi2pat);
4452 if (undobuf.other_insn)
4453 reset_used_flags (PATTERN (undobuf.other_insn));
4455 INSN_CODE (i3) = insn_code_number;
4456 PATTERN (i3) = newpat;
4458 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4460 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4461 link = XEXP (link, 1))
4463 if (substed_i2)
4465 /* I2SRC must still be meaningful at this point. Some
4466 splitting operations can invalidate I2SRC, but those
4467 operations do not apply to calls. */
4468 gcc_assert (i2src);
4469 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4470 i2dest, i2src);
4472 if (substed_i1)
4473 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4474 i1dest, i1src);
4475 if (substed_i0)
4476 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4477 i0dest, i0src);
4481 if (undobuf.other_insn)
4482 INSN_CODE (undobuf.other_insn) = other_code_number;
4484 /* We had one special case above where I2 had more than one set and
4485 we replaced a destination of one of those sets with the destination
4486 of I3. In that case, we have to update LOG_LINKS of insns later
4487 in this basic block. Note that this (expensive) case is rare.
4489 Also, in this case, we must pretend that all REG_NOTEs for I2
4490 actually came from I3, so that REG_UNUSED notes from I2 will be
4491 properly handled. */
4493 if (i3_subst_into_i2)
4495 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4496 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4497 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4498 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4499 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4500 && ! find_reg_note (i2, REG_UNUSED,
4501 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4502 for (temp_insn = NEXT_INSN (i2);
4503 temp_insn
4504 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4505 || BB_HEAD (this_basic_block) != temp_insn);
4506 temp_insn = NEXT_INSN (temp_insn))
4507 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4508 FOR_EACH_LOG_LINK (link, temp_insn)
4509 if (link->insn == i2)
4510 link->insn = i3;
4512 if (i3notes)
4514 rtx link = i3notes;
4515 while (XEXP (link, 1))
4516 link = XEXP (link, 1);
4517 XEXP (link, 1) = i2notes;
4519 else
4520 i3notes = i2notes;
4521 i2notes = 0;
4524 LOG_LINKS (i3) = NULL;
4525 REG_NOTES (i3) = 0;
4526 LOG_LINKS (i2) = NULL;
4527 REG_NOTES (i2) = 0;
4529 if (newi2pat)
4531 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4532 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4533 this_basic_block);
4534 INSN_CODE (i2) = i2_code_number;
4535 PATTERN (i2) = newi2pat;
4537 else
4539 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4540 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4541 this_basic_block);
4542 SET_INSN_DELETED (i2);
4545 if (i1)
4547 LOG_LINKS (i1) = NULL;
4548 REG_NOTES (i1) = 0;
4549 if (MAY_HAVE_DEBUG_BIND_INSNS)
4550 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4551 this_basic_block);
4552 SET_INSN_DELETED (i1);
4555 if (i0)
4557 LOG_LINKS (i0) = NULL;
4558 REG_NOTES (i0) = 0;
4559 if (MAY_HAVE_DEBUG_BIND_INSNS)
4560 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4561 this_basic_block);
4562 SET_INSN_DELETED (i0);
4565 /* Get death notes for everything that is now used in either I3 or
4566 I2 and used to die in a previous insn. If we built two new
4567 patterns, move from I1 to I2 then I2 to I3 so that we get the
4568 proper movement on registers that I2 modifies. */
4570 if (i0)
4571 from_luid = DF_INSN_LUID (i0);
4572 else if (i1)
4573 from_luid = DF_INSN_LUID (i1);
4574 else
4575 from_luid = DF_INSN_LUID (i2);
4576 if (newi2pat)
4577 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4578 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4580 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4581 if (i3notes)
4582 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4583 elim_i2, elim_i1, elim_i0);
4584 if (i2notes)
4585 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4586 elim_i2, elim_i1, elim_i0);
4587 if (i1notes)
4588 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4589 elim_i2, local_elim_i1, local_elim_i0);
4590 if (i0notes)
4591 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4592 elim_i2, elim_i1, local_elim_i0);
4593 if (midnotes)
4594 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4595 elim_i2, elim_i1, elim_i0);
4597 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4598 know these are REG_UNUSED and want them to go to the desired insn,
4599 so we always pass it as i3. */
4601 if (newi2pat && new_i2_notes)
4602 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4603 NULL_RTX);
4605 if (new_i3_notes)
4606 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4607 NULL_RTX);
4609 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4610 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4611 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4612 in that case, it might delete I2. Similarly for I2 and I1.
4613 Show an additional death due to the REG_DEAD note we make here. If
4614 we discard it in distribute_notes, we will decrement it again. */
4616 if (i3dest_killed)
4618 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4619 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4620 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4621 elim_i1, elim_i0);
4622 else
4623 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4624 elim_i2, elim_i1, elim_i0);
4627 if (i2dest_in_i2src)
4629 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4630 if (newi2pat && reg_set_p (i2dest, newi2pat))
4631 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4632 NULL_RTX, NULL_RTX);
4633 else
4634 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4635 NULL_RTX, NULL_RTX, NULL_RTX);
4638 if (i1dest_in_i1src)
4640 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4641 if (newi2pat && reg_set_p (i1dest, newi2pat))
4642 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4643 NULL_RTX, NULL_RTX);
4644 else
4645 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4646 NULL_RTX, NULL_RTX, NULL_RTX);
4649 if (i0dest_in_i0src)
4651 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4652 if (newi2pat && reg_set_p (i0dest, newi2pat))
4653 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4654 NULL_RTX, NULL_RTX);
4655 else
4656 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4657 NULL_RTX, NULL_RTX, NULL_RTX);
4660 distribute_links (i3links);
4661 distribute_links (i2links);
4662 distribute_links (i1links);
4663 distribute_links (i0links);
4665 if (REG_P (i2dest))
4667 struct insn_link *link;
4668 rtx_insn *i2_insn = 0;
4669 rtx i2_val = 0, set;
4671 /* The insn that used to set this register doesn't exist, and
4672 this life of the register may not exist either. See if one of
4673 I3's links points to an insn that sets I2DEST. If it does,
4674 that is now the last known value for I2DEST. If we don't update
4675 this and I2 set the register to a value that depended on its old
4676 contents, we will get confused. If this insn is used, thing
4677 will be set correctly in combine_instructions. */
4678 FOR_EACH_LOG_LINK (link, i3)
4679 if ((set = single_set (link->insn)) != 0
4680 && rtx_equal_p (i2dest, SET_DEST (set)))
4681 i2_insn = link->insn, i2_val = SET_SRC (set);
4683 record_value_for_reg (i2dest, i2_insn, i2_val);
4685 /* If the reg formerly set in I2 died only once and that was in I3,
4686 zero its use count so it won't make `reload' do any work. */
4687 if (! added_sets_2
4688 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4689 && ! i2dest_in_i2src
4690 && REGNO (i2dest) < reg_n_sets_max)
4691 INC_REG_N_SETS (REGNO (i2dest), -1);
4694 if (i1 && REG_P (i1dest))
4696 struct insn_link *link;
4697 rtx_insn *i1_insn = 0;
4698 rtx i1_val = 0, set;
4700 FOR_EACH_LOG_LINK (link, i3)
4701 if ((set = single_set (link->insn)) != 0
4702 && rtx_equal_p (i1dest, SET_DEST (set)))
4703 i1_insn = link->insn, i1_val = SET_SRC (set);
4705 record_value_for_reg (i1dest, i1_insn, i1_val);
4707 if (! added_sets_1
4708 && ! i1dest_in_i1src
4709 && REGNO (i1dest) < reg_n_sets_max)
4710 INC_REG_N_SETS (REGNO (i1dest), -1);
4713 if (i0 && REG_P (i0dest))
4715 struct insn_link *link;
4716 rtx_insn *i0_insn = 0;
4717 rtx i0_val = 0, set;
4719 FOR_EACH_LOG_LINK (link, i3)
4720 if ((set = single_set (link->insn)) != 0
4721 && rtx_equal_p (i0dest, SET_DEST (set)))
4722 i0_insn = link->insn, i0_val = SET_SRC (set);
4724 record_value_for_reg (i0dest, i0_insn, i0_val);
4726 if (! added_sets_0
4727 && ! i0dest_in_i0src
4728 && REGNO (i0dest) < reg_n_sets_max)
4729 INC_REG_N_SETS (REGNO (i0dest), -1);
4732 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4733 been made to this insn. The order is important, because newi2pat
4734 can affect nonzero_bits of newpat. */
4735 if (newi2pat)
4736 note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4737 note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4740 if (undobuf.other_insn != NULL_RTX)
4742 if (dump_file)
4744 fprintf (dump_file, "modifying other_insn ");
4745 dump_insn_slim (dump_file, undobuf.other_insn);
4747 df_insn_rescan (undobuf.other_insn);
4750 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4752 if (dump_file)
4754 fprintf (dump_file, "modifying insn i0 ");
4755 dump_insn_slim (dump_file, i0);
4757 df_insn_rescan (i0);
4760 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4762 if (dump_file)
4764 fprintf (dump_file, "modifying insn i1 ");
4765 dump_insn_slim (dump_file, i1);
4767 df_insn_rescan (i1);
4770 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4772 if (dump_file)
4774 fprintf (dump_file, "modifying insn i2 ");
4775 dump_insn_slim (dump_file, i2);
4777 df_insn_rescan (i2);
4780 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4782 if (dump_file)
4784 fprintf (dump_file, "modifying insn i3 ");
4785 dump_insn_slim (dump_file, i3);
4787 df_insn_rescan (i3);
4790 /* Set new_direct_jump_p if a new return or simple jump instruction
4791 has been created. Adjust the CFG accordingly. */
4792 if (returnjump_p (i3) || any_uncondjump_p (i3))
4794 *new_direct_jump_p = 1;
4795 mark_jump_label (PATTERN (i3), i3, 0);
4796 update_cfg_for_uncondjump (i3);
4799 if (undobuf.other_insn != NULL_RTX
4800 && (returnjump_p (undobuf.other_insn)
4801 || any_uncondjump_p (undobuf.other_insn)))
4803 *new_direct_jump_p = 1;
4804 update_cfg_for_uncondjump (undobuf.other_insn);
4807 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4808 && XEXP (PATTERN (i3), 0) == const1_rtx)
4810 basic_block bb = BLOCK_FOR_INSN (i3);
4811 gcc_assert (bb);
4812 remove_edge (split_block (bb, i3));
4813 emit_barrier_after_bb (bb);
4814 *new_direct_jump_p = 1;
4817 if (undobuf.other_insn
4818 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4819 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4821 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4822 gcc_assert (bb);
4823 remove_edge (split_block (bb, undobuf.other_insn));
4824 emit_barrier_after_bb (bb);
4825 *new_direct_jump_p = 1;
4828 /* A noop might also need cleaning up of CFG, if it comes from the
4829 simplification of a jump. */
4830 if (JUMP_P (i3)
4831 && GET_CODE (newpat) == SET
4832 && SET_SRC (newpat) == pc_rtx
4833 && SET_DEST (newpat) == pc_rtx)
4835 *new_direct_jump_p = 1;
4836 update_cfg_for_uncondjump (i3);
4839 if (undobuf.other_insn != NULL_RTX
4840 && JUMP_P (undobuf.other_insn)
4841 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4842 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4843 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4845 *new_direct_jump_p = 1;
4846 update_cfg_for_uncondjump (undobuf.other_insn);
4849 combine_successes++;
4850 undo_commit ();
4852 rtx_insn *ret = newi2pat ? i2 : i3;
4853 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4854 ret = added_links_insn;
4855 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4856 ret = added_notes_insn;
4858 return ret;
4861 /* Get a marker for undoing to the current state. */
4863 static void *
4864 get_undo_marker (void)
4866 return undobuf.undos;
4869 /* Undo the modifications up to the marker. */
4871 static void
4872 undo_to_marker (void *marker)
4874 struct undo *undo, *next;
4876 for (undo = undobuf.undos; undo != marker; undo = next)
4878 gcc_assert (undo);
4880 next = undo->next;
4881 switch (undo->kind)
4883 case UNDO_RTX:
4884 *undo->where.r = undo->old_contents.r;
4885 break;
4886 case UNDO_INT:
4887 *undo->where.i = undo->old_contents.i;
4888 break;
4889 case UNDO_MODE:
4890 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4891 break;
4892 case UNDO_LINKS:
4893 *undo->where.l = undo->old_contents.l;
4894 break;
4895 default:
4896 gcc_unreachable ();
4899 undo->next = undobuf.frees;
4900 undobuf.frees = undo;
4903 undobuf.undos = (struct undo *) marker;
4906 /* Undo all the modifications recorded in undobuf. */
4908 static void
4909 undo_all (void)
4911 undo_to_marker (0);
4914 /* We've committed to accepting the changes we made. Move all
4915 of the undos to the free list. */
4917 static void
4918 undo_commit (void)
4920 struct undo *undo, *next;
4922 for (undo = undobuf.undos; undo; undo = next)
4924 next = undo->next;
4925 undo->next = undobuf.frees;
4926 undobuf.frees = undo;
4928 undobuf.undos = 0;
4931 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4932 where we have an arithmetic expression and return that point. LOC will
4933 be inside INSN.
4935 try_combine will call this function to see if an insn can be split into
4936 two insns. */
4938 static rtx *
4939 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4941 rtx x = *loc;
4942 enum rtx_code code = GET_CODE (x);
4943 rtx *split;
4944 unsigned HOST_WIDE_INT len = 0;
4945 HOST_WIDE_INT pos = 0;
4946 int unsignedp = 0;
4947 rtx inner = NULL_RTX;
4948 scalar_int_mode mode, inner_mode;
4950 /* First special-case some codes. */
4951 switch (code)
4953 case SUBREG:
4954 #ifdef INSN_SCHEDULING
4955 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4956 point. */
4957 if (MEM_P (SUBREG_REG (x)))
4958 return loc;
4959 #endif
4960 return find_split_point (&SUBREG_REG (x), insn, false);
4962 case MEM:
4963 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4964 using LO_SUM and HIGH. */
4965 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4966 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4968 machine_mode address_mode = get_address_mode (x);
4970 SUBST (XEXP (x, 0),
4971 gen_rtx_LO_SUM (address_mode,
4972 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4973 XEXP (x, 0)));
4974 return &XEXP (XEXP (x, 0), 0);
4977 /* If we have a PLUS whose second operand is a constant and the
4978 address is not valid, perhaps we can split it up using
4979 the machine-specific way to split large constants. We use
4980 the first pseudo-reg (one of the virtual regs) as a placeholder;
4981 it will not remain in the result. */
4982 if (GET_CODE (XEXP (x, 0)) == PLUS
4983 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4984 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4985 MEM_ADDR_SPACE (x)))
4987 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4988 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4989 subst_insn);
4991 /* This should have produced two insns, each of which sets our
4992 placeholder. If the source of the second is a valid address,
4993 we can put both sources together and make a split point
4994 in the middle. */
4996 if (seq
4997 && NEXT_INSN (seq) != NULL_RTX
4998 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4999 && NONJUMP_INSN_P (seq)
5000 && GET_CODE (PATTERN (seq)) == SET
5001 && SET_DEST (PATTERN (seq)) == reg
5002 && ! reg_mentioned_p (reg,
5003 SET_SRC (PATTERN (seq)))
5004 && NONJUMP_INSN_P (NEXT_INSN (seq))
5005 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5006 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5007 && memory_address_addr_space_p
5008 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5009 MEM_ADDR_SPACE (x)))
5011 rtx src1 = SET_SRC (PATTERN (seq));
5012 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5014 /* Replace the placeholder in SRC2 with SRC1. If we can
5015 find where in SRC2 it was placed, that can become our
5016 split point and we can replace this address with SRC2.
5017 Just try two obvious places. */
5019 src2 = replace_rtx (src2, reg, src1);
5020 split = 0;
5021 if (XEXP (src2, 0) == src1)
5022 split = &XEXP (src2, 0);
5023 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5024 && XEXP (XEXP (src2, 0), 0) == src1)
5025 split = &XEXP (XEXP (src2, 0), 0);
5027 if (split)
5029 SUBST (XEXP (x, 0), src2);
5030 return split;
5034 /* If that didn't work and we have a nested plus, like:
5035 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5036 is valid address, try to split (REG1 * CONST1). */
5037 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5038 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5039 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5040 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5041 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5042 0), 0)))))
5044 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5045 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5046 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5047 MEM_ADDR_SPACE (x)))
5049 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5050 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5052 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5054 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5055 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5056 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5057 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5058 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5059 0), 1)))))
5061 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5062 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5063 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5064 MEM_ADDR_SPACE (x)))
5066 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5067 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5069 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5072 /* If that didn't work, perhaps the first operand is complex and
5073 needs to be computed separately, so make a split point there.
5074 This will occur on machines that just support REG + CONST
5075 and have a constant moved through some previous computation. */
5076 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5077 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5078 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5079 return &XEXP (XEXP (x, 0), 0);
5082 /* If we have a PLUS whose first operand is complex, try computing it
5083 separately by making a split there. */
5084 if (GET_CODE (XEXP (x, 0)) == PLUS
5085 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5086 MEM_ADDR_SPACE (x))
5087 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5088 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5089 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5090 return &XEXP (XEXP (x, 0), 0);
5091 break;
5093 case SET:
5094 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5095 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5096 we need to put the operand into a register. So split at that
5097 point. */
5099 if (SET_DEST (x) == cc0_rtx
5100 && GET_CODE (SET_SRC (x)) != COMPARE
5101 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5102 && !OBJECT_P (SET_SRC (x))
5103 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5104 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5105 return &SET_SRC (x);
5107 /* See if we can split SET_SRC as it stands. */
5108 split = find_split_point (&SET_SRC (x), insn, true);
5109 if (split && split != &SET_SRC (x))
5110 return split;
5112 /* See if we can split SET_DEST as it stands. */
5113 split = find_split_point (&SET_DEST (x), insn, false);
5114 if (split && split != &SET_DEST (x))
5115 return split;
5117 /* See if this is a bitfield assignment with everything constant. If
5118 so, this is an IOR of an AND, so split it into that. */
5119 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5120 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5121 &inner_mode)
5122 && HWI_COMPUTABLE_MODE_P (inner_mode)
5123 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5124 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5125 && CONST_INT_P (SET_SRC (x))
5126 && ((INTVAL (XEXP (SET_DEST (x), 1))
5127 + INTVAL (XEXP (SET_DEST (x), 2)))
5128 <= GET_MODE_PRECISION (inner_mode))
5129 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5131 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5132 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5133 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
5134 rtx dest = XEXP (SET_DEST (x), 0);
5135 unsigned HOST_WIDE_INT mask
5136 = (HOST_WIDE_INT_1U << len) - 1;
5137 rtx or_mask;
5139 if (BITS_BIG_ENDIAN)
5140 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5142 or_mask = gen_int_mode (src << pos, inner_mode);
5143 if (src == mask)
5144 SUBST (SET_SRC (x),
5145 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5146 else
5148 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5149 SUBST (SET_SRC (x),
5150 simplify_gen_binary (IOR, inner_mode,
5151 simplify_gen_binary (AND, inner_mode,
5152 dest, negmask),
5153 or_mask));
5156 SUBST (SET_DEST (x), dest);
5158 split = find_split_point (&SET_SRC (x), insn, true);
5159 if (split && split != &SET_SRC (x))
5160 return split;
5163 /* Otherwise, see if this is an operation that we can split into two.
5164 If so, try to split that. */
5165 code = GET_CODE (SET_SRC (x));
5167 switch (code)
5169 case AND:
5170 /* If we are AND'ing with a large constant that is only a single
5171 bit and the result is only being used in a context where we
5172 need to know if it is zero or nonzero, replace it with a bit
5173 extraction. This will avoid the large constant, which might
5174 have taken more than one insn to make. If the constant were
5175 not a valid argument to the AND but took only one insn to make,
5176 this is no worse, but if it took more than one insn, it will
5177 be better. */
5179 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5180 && REG_P (XEXP (SET_SRC (x), 0))
5181 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5182 && REG_P (SET_DEST (x))
5183 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5184 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5185 && XEXP (*split, 0) == SET_DEST (x)
5186 && XEXP (*split, 1) == const0_rtx)
5188 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5189 XEXP (SET_SRC (x), 0),
5190 pos, NULL_RTX, 1, 1, 0, 0);
5191 if (extraction != 0)
5193 SUBST (SET_SRC (x), extraction);
5194 return find_split_point (loc, insn, false);
5197 break;
5199 case NE:
5200 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5201 is known to be on, this can be converted into a NEG of a shift. */
5202 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5203 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5204 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5205 GET_MODE (XEXP (SET_SRC (x),
5206 0))))) >= 1))
5208 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5209 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5210 SUBST (SET_SRC (x),
5211 gen_rtx_NEG (mode,
5212 gen_rtx_LSHIFTRT (mode,
5213 XEXP (SET_SRC (x), 0),
5214 pos_rtx)));
5216 split = find_split_point (&SET_SRC (x), insn, true);
5217 if (split && split != &SET_SRC (x))
5218 return split;
5220 break;
5222 case SIGN_EXTEND:
5223 inner = XEXP (SET_SRC (x), 0);
5225 /* We can't optimize if either mode is a partial integer
5226 mode as we don't know how many bits are significant
5227 in those modes. */
5228 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5229 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5230 break;
5232 pos = 0;
5233 len = GET_MODE_PRECISION (inner_mode);
5234 unsignedp = 0;
5235 break;
5237 case SIGN_EXTRACT:
5238 case ZERO_EXTRACT:
5239 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5240 &inner_mode)
5241 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5242 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5244 inner = XEXP (SET_SRC (x), 0);
5245 len = INTVAL (XEXP (SET_SRC (x), 1));
5246 pos = INTVAL (XEXP (SET_SRC (x), 2));
5248 if (BITS_BIG_ENDIAN)
5249 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5250 unsignedp = (code == ZERO_EXTRACT);
5252 break;
5254 default:
5255 break;
5258 if (len
5259 && known_subrange_p (pos, len,
5260 0, GET_MODE_PRECISION (GET_MODE (inner)))
5261 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5263 /* For unsigned, we have a choice of a shift followed by an
5264 AND or two shifts. Use two shifts for field sizes where the
5265 constant might be too large. We assume here that we can
5266 always at least get 8-bit constants in an AND insn, which is
5267 true for every current RISC. */
5269 if (unsignedp && len <= 8)
5271 unsigned HOST_WIDE_INT mask
5272 = (HOST_WIDE_INT_1U << len) - 1;
5273 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5274 SUBST (SET_SRC (x),
5275 gen_rtx_AND (mode,
5276 gen_rtx_LSHIFTRT
5277 (mode, gen_lowpart (mode, inner), pos_rtx),
5278 gen_int_mode (mask, mode)));
5280 split = find_split_point (&SET_SRC (x), insn, true);
5281 if (split && split != &SET_SRC (x))
5282 return split;
5284 else
5286 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5287 int right_bits = GET_MODE_PRECISION (mode) - len;
5288 SUBST (SET_SRC (x),
5289 gen_rtx_fmt_ee
5290 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5291 gen_rtx_ASHIFT (mode,
5292 gen_lowpart (mode, inner),
5293 gen_int_shift_amount (mode, left_bits)),
5294 gen_int_shift_amount (mode, right_bits)));
5296 split = find_split_point (&SET_SRC (x), insn, true);
5297 if (split && split != &SET_SRC (x))
5298 return split;
5302 /* See if this is a simple operation with a constant as the second
5303 operand. It might be that this constant is out of range and hence
5304 could be used as a split point. */
5305 if (BINARY_P (SET_SRC (x))
5306 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5307 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5308 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5309 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5310 return &XEXP (SET_SRC (x), 1);
5312 /* Finally, see if this is a simple operation with its first operand
5313 not in a register. The operation might require this operand in a
5314 register, so return it as a split point. We can always do this
5315 because if the first operand were another operation, we would have
5316 already found it as a split point. */
5317 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5318 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5319 return &XEXP (SET_SRC (x), 0);
5321 return 0;
5323 case AND:
5324 case IOR:
5325 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5326 it is better to write this as (not (ior A B)) so we can split it.
5327 Similarly for IOR. */
5328 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5330 SUBST (*loc,
5331 gen_rtx_NOT (GET_MODE (x),
5332 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5333 GET_MODE (x),
5334 XEXP (XEXP (x, 0), 0),
5335 XEXP (XEXP (x, 1), 0))));
5336 return find_split_point (loc, insn, set_src);
5339 /* Many RISC machines have a large set of logical insns. If the
5340 second operand is a NOT, put it first so we will try to split the
5341 other operand first. */
5342 if (GET_CODE (XEXP (x, 1)) == NOT)
5344 rtx tem = XEXP (x, 0);
5345 SUBST (XEXP (x, 0), XEXP (x, 1));
5346 SUBST (XEXP (x, 1), tem);
5348 break;
5350 case PLUS:
5351 case MINUS:
5352 /* Canonicalization can produce (minus A (mult B C)), where C is a
5353 constant. It may be better to try splitting (plus (mult B -C) A)
5354 instead if this isn't a multiply by a power of two. */
5355 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5356 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5357 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5359 machine_mode mode = GET_MODE (x);
5360 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5361 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5362 SUBST (*loc, gen_rtx_PLUS (mode,
5363 gen_rtx_MULT (mode,
5364 XEXP (XEXP (x, 1), 0),
5365 gen_int_mode (other_int,
5366 mode)),
5367 XEXP (x, 0)));
5368 return find_split_point (loc, insn, set_src);
5371 /* Split at a multiply-accumulate instruction. However if this is
5372 the SET_SRC, we likely do not have such an instruction and it's
5373 worthless to try this split. */
5374 if (!set_src
5375 && (GET_CODE (XEXP (x, 0)) == MULT
5376 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5377 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5378 return loc;
5380 default:
5381 break;
5384 /* Otherwise, select our actions depending on our rtx class. */
5385 switch (GET_RTX_CLASS (code))
5387 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5388 case RTX_TERNARY:
5389 split = find_split_point (&XEXP (x, 2), insn, false);
5390 if (split)
5391 return split;
5392 /* fall through */
5393 case RTX_BIN_ARITH:
5394 case RTX_COMM_ARITH:
5395 case RTX_COMPARE:
5396 case RTX_COMM_COMPARE:
5397 split = find_split_point (&XEXP (x, 1), insn, false);
5398 if (split)
5399 return split;
5400 /* fall through */
5401 case RTX_UNARY:
5402 /* Some machines have (and (shift ...) ...) insns. If X is not
5403 an AND, but XEXP (X, 0) is, use it as our split point. */
5404 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5405 return &XEXP (x, 0);
5407 split = find_split_point (&XEXP (x, 0), insn, false);
5408 if (split)
5409 return split;
5410 return loc;
5412 default:
5413 /* Otherwise, we don't have a split point. */
5414 return 0;
5418 /* Throughout X, replace FROM with TO, and return the result.
5419 The result is TO if X is FROM;
5420 otherwise the result is X, but its contents may have been modified.
5421 If they were modified, a record was made in undobuf so that
5422 undo_all will (among other things) return X to its original state.
5424 If the number of changes necessary is too much to record to undo,
5425 the excess changes are not made, so the result is invalid.
5426 The changes already made can still be undone.
5427 undobuf.num_undo is incremented for such changes, so by testing that
5428 the caller can tell whether the result is valid.
5430 `n_occurrences' is incremented each time FROM is replaced.
5432 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5434 IN_COND is nonzero if we are at the top level of a condition.
5436 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5437 by copying if `n_occurrences' is nonzero. */
5439 static rtx
5440 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5442 enum rtx_code code = GET_CODE (x);
5443 machine_mode op0_mode = VOIDmode;
5444 const char *fmt;
5445 int len, i;
5446 rtx new_rtx;
5448 /* Two expressions are equal if they are identical copies of a shared
5449 RTX or if they are both registers with the same register number
5450 and mode. */
5452 #define COMBINE_RTX_EQUAL_P(X,Y) \
5453 ((X) == (Y) \
5454 || (REG_P (X) && REG_P (Y) \
5455 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5457 /* Do not substitute into clobbers of regs -- this will never result in
5458 valid RTL. */
5459 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5460 return x;
5462 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5464 n_occurrences++;
5465 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5468 /* If X and FROM are the same register but different modes, they
5469 will not have been seen as equal above. However, the log links code
5470 will make a LOG_LINKS entry for that case. If we do nothing, we
5471 will try to rerecognize our original insn and, when it succeeds,
5472 we will delete the feeding insn, which is incorrect.
5474 So force this insn not to match in this (rare) case. */
5475 if (! in_dest && code == REG && REG_P (from)
5476 && reg_overlap_mentioned_p (x, from))
5477 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5479 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5480 of which may contain things that can be combined. */
5481 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5482 return x;
5484 /* It is possible to have a subexpression appear twice in the insn.
5485 Suppose that FROM is a register that appears within TO.
5486 Then, after that subexpression has been scanned once by `subst',
5487 the second time it is scanned, TO may be found. If we were
5488 to scan TO here, we would find FROM within it and create a
5489 self-referent rtl structure which is completely wrong. */
5490 if (COMBINE_RTX_EQUAL_P (x, to))
5491 return to;
5493 /* Parallel asm_operands need special attention because all of the
5494 inputs are shared across the arms. Furthermore, unsharing the
5495 rtl results in recognition failures. Failure to handle this case
5496 specially can result in circular rtl.
5498 Solve this by doing a normal pass across the first entry of the
5499 parallel, and only processing the SET_DESTs of the subsequent
5500 entries. Ug. */
5502 if (code == PARALLEL
5503 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5504 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5506 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5508 /* If this substitution failed, this whole thing fails. */
5509 if (GET_CODE (new_rtx) == CLOBBER
5510 && XEXP (new_rtx, 0) == const0_rtx)
5511 return new_rtx;
5513 SUBST (XVECEXP (x, 0, 0), new_rtx);
5515 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5517 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5519 if (!REG_P (dest)
5520 && GET_CODE (dest) != CC0
5521 && GET_CODE (dest) != PC)
5523 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5525 /* If this substitution failed, this whole thing fails. */
5526 if (GET_CODE (new_rtx) == CLOBBER
5527 && XEXP (new_rtx, 0) == const0_rtx)
5528 return new_rtx;
5530 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5534 else
5536 len = GET_RTX_LENGTH (code);
5537 fmt = GET_RTX_FORMAT (code);
5539 /* We don't need to process a SET_DEST that is a register, CC0,
5540 or PC, so set up to skip this common case. All other cases
5541 where we want to suppress replacing something inside a
5542 SET_SRC are handled via the IN_DEST operand. */
5543 if (code == SET
5544 && (REG_P (SET_DEST (x))
5545 || GET_CODE (SET_DEST (x)) == CC0
5546 || GET_CODE (SET_DEST (x)) == PC))
5547 fmt = "ie";
5549 /* Trying to simplify the operands of a widening MULT is not likely
5550 to create RTL matching a machine insn. */
5551 if (code == MULT
5552 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5553 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5554 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5555 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5556 && REG_P (XEXP (XEXP (x, 0), 0))
5557 && REG_P (XEXP (XEXP (x, 1), 0))
5558 && from == to)
5559 return x;
5562 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5563 constant. */
5564 if (fmt[0] == 'e')
5565 op0_mode = GET_MODE (XEXP (x, 0));
5567 for (i = 0; i < len; i++)
5569 if (fmt[i] == 'E')
5571 int j;
5572 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5574 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5576 new_rtx = (unique_copy && n_occurrences
5577 ? copy_rtx (to) : to);
5578 n_occurrences++;
5580 else
5582 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5583 unique_copy);
5585 /* If this substitution failed, this whole thing
5586 fails. */
5587 if (GET_CODE (new_rtx) == CLOBBER
5588 && XEXP (new_rtx, 0) == const0_rtx)
5589 return new_rtx;
5592 SUBST (XVECEXP (x, i, j), new_rtx);
5595 else if (fmt[i] == 'e')
5597 /* If this is a register being set, ignore it. */
5598 new_rtx = XEXP (x, i);
5599 if (in_dest
5600 && i == 0
5601 && (((code == SUBREG || code == ZERO_EXTRACT)
5602 && REG_P (new_rtx))
5603 || code == STRICT_LOW_PART))
5606 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5608 /* In general, don't install a subreg involving two
5609 modes not tieable. It can worsen register
5610 allocation, and can even make invalid reload
5611 insns, since the reg inside may need to be copied
5612 from in the outside mode, and that may be invalid
5613 if it is an fp reg copied in integer mode.
5615 We allow two exceptions to this: It is valid if
5616 it is inside another SUBREG and the mode of that
5617 SUBREG and the mode of the inside of TO is
5618 tieable and it is valid if X is a SET that copies
5619 FROM to CC0. */
5621 if (GET_CODE (to) == SUBREG
5622 && !targetm.modes_tieable_p (GET_MODE (to),
5623 GET_MODE (SUBREG_REG (to)))
5624 && ! (code == SUBREG
5625 && (targetm.modes_tieable_p
5626 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5627 && (!HAVE_cc0
5628 || (! (code == SET
5629 && i == 1
5630 && XEXP (x, 0) == cc0_rtx))))
5631 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5633 if (code == SUBREG
5634 && REG_P (to)
5635 && REGNO (to) < FIRST_PSEUDO_REGISTER
5636 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5637 SUBREG_BYTE (x),
5638 GET_MODE (x)) < 0)
5639 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5641 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5642 n_occurrences++;
5644 else
5645 /* If we are in a SET_DEST, suppress most cases unless we
5646 have gone inside a MEM, in which case we want to
5647 simplify the address. We assume here that things that
5648 are actually part of the destination have their inner
5649 parts in the first expression. This is true for SUBREG,
5650 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5651 things aside from REG and MEM that should appear in a
5652 SET_DEST. */
5653 new_rtx = subst (XEXP (x, i), from, to,
5654 (((in_dest
5655 && (code == SUBREG || code == STRICT_LOW_PART
5656 || code == ZERO_EXTRACT))
5657 || code == SET)
5658 && i == 0),
5659 code == IF_THEN_ELSE && i == 0,
5660 unique_copy);
5662 /* If we found that we will have to reject this combination,
5663 indicate that by returning the CLOBBER ourselves, rather than
5664 an expression containing it. This will speed things up as
5665 well as prevent accidents where two CLOBBERs are considered
5666 to be equal, thus producing an incorrect simplification. */
5668 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5669 return new_rtx;
5671 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5673 machine_mode mode = GET_MODE (x);
5675 x = simplify_subreg (GET_MODE (x), new_rtx,
5676 GET_MODE (SUBREG_REG (x)),
5677 SUBREG_BYTE (x));
5678 if (! x)
5679 x = gen_rtx_CLOBBER (mode, const0_rtx);
5681 else if (CONST_SCALAR_INT_P (new_rtx)
5682 && (GET_CODE (x) == ZERO_EXTEND
5683 || GET_CODE (x) == SIGN_EXTEND
5684 || GET_CODE (x) == FLOAT
5685 || GET_CODE (x) == UNSIGNED_FLOAT))
5687 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5688 new_rtx,
5689 GET_MODE (XEXP (x, 0)));
5690 if (!x)
5691 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5693 else
5694 SUBST (XEXP (x, i), new_rtx);
5699 /* Check if we are loading something from the constant pool via float
5700 extension; in this case we would undo compress_float_constant
5701 optimization and degenerate constant load to an immediate value. */
5702 if (GET_CODE (x) == FLOAT_EXTEND
5703 && MEM_P (XEXP (x, 0))
5704 && MEM_READONLY_P (XEXP (x, 0)))
5706 rtx tmp = avoid_constant_pool_reference (x);
5707 if (x != tmp)
5708 return x;
5711 /* Try to simplify X. If the simplification changed the code, it is likely
5712 that further simplification will help, so loop, but limit the number
5713 of repetitions that will be performed. */
5715 for (i = 0; i < 4; i++)
5717 /* If X is sufficiently simple, don't bother trying to do anything
5718 with it. */
5719 if (code != CONST_INT && code != REG && code != CLOBBER)
5720 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5722 if (GET_CODE (x) == code)
5723 break;
5725 code = GET_CODE (x);
5727 /* We no longer know the original mode of operand 0 since we
5728 have changed the form of X) */
5729 op0_mode = VOIDmode;
5732 return x;
5735 /* If X is a commutative operation whose operands are not in the canonical
5736 order, use substitutions to swap them. */
5738 static void
5739 maybe_swap_commutative_operands (rtx x)
5741 if (COMMUTATIVE_ARITH_P (x)
5742 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5744 rtx temp = XEXP (x, 0);
5745 SUBST (XEXP (x, 0), XEXP (x, 1));
5746 SUBST (XEXP (x, 1), temp);
5750 /* Simplify X, a piece of RTL. We just operate on the expression at the
5751 outer level; call `subst' to simplify recursively. Return the new
5752 expression.
5754 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5755 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5756 of a condition. */
5758 static rtx
5759 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5760 int in_cond)
5762 enum rtx_code code = GET_CODE (x);
5763 machine_mode mode = GET_MODE (x);
5764 scalar_int_mode int_mode;
5765 rtx temp;
5766 int i;
5768 /* If this is a commutative operation, put a constant last and a complex
5769 expression first. We don't need to do this for comparisons here. */
5770 maybe_swap_commutative_operands (x);
5772 /* Try to fold this expression in case we have constants that weren't
5773 present before. */
5774 temp = 0;
5775 switch (GET_RTX_CLASS (code))
5777 case RTX_UNARY:
5778 if (op0_mode == VOIDmode)
5779 op0_mode = GET_MODE (XEXP (x, 0));
5780 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5781 break;
5782 case RTX_COMPARE:
5783 case RTX_COMM_COMPARE:
5785 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5786 if (cmp_mode == VOIDmode)
5788 cmp_mode = GET_MODE (XEXP (x, 1));
5789 if (cmp_mode == VOIDmode)
5790 cmp_mode = op0_mode;
5792 temp = simplify_relational_operation (code, mode, cmp_mode,
5793 XEXP (x, 0), XEXP (x, 1));
5795 break;
5796 case RTX_COMM_ARITH:
5797 case RTX_BIN_ARITH:
5798 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5799 break;
5800 case RTX_BITFIELD_OPS:
5801 case RTX_TERNARY:
5802 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5803 XEXP (x, 1), XEXP (x, 2));
5804 break;
5805 default:
5806 break;
5809 if (temp)
5811 x = temp;
5812 code = GET_CODE (temp);
5813 op0_mode = VOIDmode;
5814 mode = GET_MODE (temp);
5817 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5818 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5819 things. Check for cases where both arms are testing the same
5820 condition.
5822 Don't do anything if all operands are very simple. */
5824 if ((BINARY_P (x)
5825 && ((!OBJECT_P (XEXP (x, 0))
5826 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5827 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5828 || (!OBJECT_P (XEXP (x, 1))
5829 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5830 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5831 || (UNARY_P (x)
5832 && (!OBJECT_P (XEXP (x, 0))
5833 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5834 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5836 rtx cond, true_rtx, false_rtx;
5838 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5839 if (cond != 0
5840 /* If everything is a comparison, what we have is highly unlikely
5841 to be simpler, so don't use it. */
5842 && ! (COMPARISON_P (x)
5843 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5844 /* Similarly, if we end up with one of the expressions the same
5845 as the original, it is certainly not simpler. */
5846 && ! rtx_equal_p (x, true_rtx)
5847 && ! rtx_equal_p (x, false_rtx))
5849 rtx cop1 = const0_rtx;
5850 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5852 if (cond_code == NE && COMPARISON_P (cond))
5853 return x;
5855 /* Simplify the alternative arms; this may collapse the true and
5856 false arms to store-flag values. Be careful to use copy_rtx
5857 here since true_rtx or false_rtx might share RTL with x as a
5858 result of the if_then_else_cond call above. */
5859 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5860 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5862 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5863 is unlikely to be simpler. */
5864 if (general_operand (true_rtx, VOIDmode)
5865 && general_operand (false_rtx, VOIDmode))
5867 enum rtx_code reversed;
5869 /* Restarting if we generate a store-flag expression will cause
5870 us to loop. Just drop through in this case. */
5872 /* If the result values are STORE_FLAG_VALUE and zero, we can
5873 just make the comparison operation. */
5874 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5875 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5876 cond, cop1);
5877 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5878 && ((reversed = reversed_comparison_code_parts
5879 (cond_code, cond, cop1, NULL))
5880 != UNKNOWN))
5881 x = simplify_gen_relational (reversed, mode, VOIDmode,
5882 cond, cop1);
5884 /* Likewise, we can make the negate of a comparison operation
5885 if the result values are - STORE_FLAG_VALUE and zero. */
5886 else if (CONST_INT_P (true_rtx)
5887 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5888 && false_rtx == const0_rtx)
5889 x = simplify_gen_unary (NEG, mode,
5890 simplify_gen_relational (cond_code,
5891 mode, VOIDmode,
5892 cond, cop1),
5893 mode);
5894 else if (CONST_INT_P (false_rtx)
5895 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5896 && true_rtx == const0_rtx
5897 && ((reversed = reversed_comparison_code_parts
5898 (cond_code, cond, cop1, NULL))
5899 != UNKNOWN))
5900 x = simplify_gen_unary (NEG, mode,
5901 simplify_gen_relational (reversed,
5902 mode, VOIDmode,
5903 cond, cop1),
5904 mode);
5906 code = GET_CODE (x);
5907 op0_mode = VOIDmode;
5912 /* First see if we can apply the inverse distributive law. */
5913 if (code == PLUS || code == MINUS
5914 || code == AND || code == IOR || code == XOR)
5916 x = apply_distributive_law (x);
5917 code = GET_CODE (x);
5918 op0_mode = VOIDmode;
5921 /* If CODE is an associative operation not otherwise handled, see if we
5922 can associate some operands. This can win if they are constants or
5923 if they are logically related (i.e. (a & b) & a). */
5924 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5925 || code == AND || code == IOR || code == XOR
5926 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5927 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5928 || (flag_associative_math && FLOAT_MODE_P (mode))))
5930 if (GET_CODE (XEXP (x, 0)) == code)
5932 rtx other = XEXP (XEXP (x, 0), 0);
5933 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5934 rtx inner_op1 = XEXP (x, 1);
5935 rtx inner;
5937 /* Make sure we pass the constant operand if any as the second
5938 one if this is a commutative operation. */
5939 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5940 std::swap (inner_op0, inner_op1);
5941 inner = simplify_binary_operation (code == MINUS ? PLUS
5942 : code == DIV ? MULT
5943 : code,
5944 mode, inner_op0, inner_op1);
5946 /* For commutative operations, try the other pair if that one
5947 didn't simplify. */
5948 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5950 other = XEXP (XEXP (x, 0), 1);
5951 inner = simplify_binary_operation (code, mode,
5952 XEXP (XEXP (x, 0), 0),
5953 XEXP (x, 1));
5956 if (inner)
5957 return simplify_gen_binary (code, mode, other, inner);
5961 /* A little bit of algebraic simplification here. */
5962 switch (code)
5964 case MEM:
5965 /* Ensure that our address has any ASHIFTs converted to MULT in case
5966 address-recognizing predicates are called later. */
5967 temp = make_compound_operation (XEXP (x, 0), MEM);
5968 SUBST (XEXP (x, 0), temp);
5969 break;
5971 case SUBREG:
5972 if (op0_mode == VOIDmode)
5973 op0_mode = GET_MODE (SUBREG_REG (x));
5975 /* See if this can be moved to simplify_subreg. */
5976 if (CONSTANT_P (SUBREG_REG (x))
5977 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5978 /* Don't call gen_lowpart if the inner mode
5979 is VOIDmode and we cannot simplify it, as SUBREG without
5980 inner mode is invalid. */
5981 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5982 || gen_lowpart_common (mode, SUBREG_REG (x))))
5983 return gen_lowpart (mode, SUBREG_REG (x));
5985 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5986 break;
5988 rtx temp;
5989 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5990 SUBREG_BYTE (x));
5991 if (temp)
5992 return temp;
5994 /* If op is known to have all lower bits zero, the result is zero. */
5995 scalar_int_mode int_mode, int_op0_mode;
5996 if (!in_dest
5997 && is_a <scalar_int_mode> (mode, &int_mode)
5998 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5999 && (GET_MODE_PRECISION (int_mode)
6000 < GET_MODE_PRECISION (int_op0_mode))
6001 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6002 SUBREG_BYTE (x))
6003 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6004 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6005 & GET_MODE_MASK (int_mode)) == 0)
6006 && !side_effects_p (SUBREG_REG (x)))
6007 return CONST0_RTX (int_mode);
6010 /* Don't change the mode of the MEM if that would change the meaning
6011 of the address. */
6012 if (MEM_P (SUBREG_REG (x))
6013 && (MEM_VOLATILE_P (SUBREG_REG (x))
6014 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6015 MEM_ADDR_SPACE (SUBREG_REG (x)))))
6016 return gen_rtx_CLOBBER (mode, const0_rtx);
6018 /* Note that we cannot do any narrowing for non-constants since
6019 we might have been counting on using the fact that some bits were
6020 zero. We now do this in the SET. */
6022 break;
6024 case NEG:
6025 temp = expand_compound_operation (XEXP (x, 0));
6027 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6028 replaced by (lshiftrt X C). This will convert
6029 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6031 if (GET_CODE (temp) == ASHIFTRT
6032 && CONST_INT_P (XEXP (temp, 1))
6033 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6034 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6035 INTVAL (XEXP (temp, 1)));
6037 /* If X has only a single bit that might be nonzero, say, bit I, convert
6038 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6039 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6040 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6041 or a SUBREG of one since we'd be making the expression more
6042 complex if it was just a register. */
6044 if (!REG_P (temp)
6045 && ! (GET_CODE (temp) == SUBREG
6046 && REG_P (SUBREG_REG (temp)))
6047 && is_a <scalar_int_mode> (mode, &int_mode)
6048 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6050 rtx temp1 = simplify_shift_const
6051 (NULL_RTX, ASHIFTRT, int_mode,
6052 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6053 GET_MODE_PRECISION (int_mode) - 1 - i),
6054 GET_MODE_PRECISION (int_mode) - 1 - i);
6056 /* If all we did was surround TEMP with the two shifts, we
6057 haven't improved anything, so don't use it. Otherwise,
6058 we are better off with TEMP1. */
6059 if (GET_CODE (temp1) != ASHIFTRT
6060 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6061 || XEXP (XEXP (temp1, 0), 0) != temp)
6062 return temp1;
6064 break;
6066 case TRUNCATE:
6067 /* We can't handle truncation to a partial integer mode here
6068 because we don't know the real bitsize of the partial
6069 integer mode. */
6070 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6071 break;
6073 if (HWI_COMPUTABLE_MODE_P (mode))
6074 SUBST (XEXP (x, 0),
6075 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6076 GET_MODE_MASK (mode), 0));
6078 /* We can truncate a constant value and return it. */
6080 poly_int64 c;
6081 if (poly_int_rtx_p (XEXP (x, 0), &c))
6082 return gen_int_mode (c, mode);
6085 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6086 whose value is a comparison can be replaced with a subreg if
6087 STORE_FLAG_VALUE permits. */
6088 if (HWI_COMPUTABLE_MODE_P (mode)
6089 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6090 && (temp = get_last_value (XEXP (x, 0)))
6091 && COMPARISON_P (temp))
6092 return gen_lowpart (mode, XEXP (x, 0));
6093 break;
6095 case CONST:
6096 /* (const (const X)) can become (const X). Do it this way rather than
6097 returning the inner CONST since CONST can be shared with a
6098 REG_EQUAL note. */
6099 if (GET_CODE (XEXP (x, 0)) == CONST)
6100 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6101 break;
6103 case LO_SUM:
6104 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6105 can add in an offset. find_split_point will split this address up
6106 again if it doesn't match. */
6107 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6108 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6109 return XEXP (x, 1);
6110 break;
6112 case PLUS:
6113 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6114 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6115 bit-field and can be replaced by either a sign_extend or a
6116 sign_extract. The `and' may be a zero_extend and the two
6117 <c>, -<c> constants may be reversed. */
6118 if (GET_CODE (XEXP (x, 0)) == XOR
6119 && is_a <scalar_int_mode> (mode, &int_mode)
6120 && CONST_INT_P (XEXP (x, 1))
6121 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6122 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6123 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6124 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6125 && HWI_COMPUTABLE_MODE_P (int_mode)
6126 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6127 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6128 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6129 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6130 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6131 && known_eq ((GET_MODE_PRECISION
6132 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6133 (unsigned int) i + 1))))
6134 return simplify_shift_const
6135 (NULL_RTX, ASHIFTRT, int_mode,
6136 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6137 XEXP (XEXP (XEXP (x, 0), 0), 0),
6138 GET_MODE_PRECISION (int_mode) - (i + 1)),
6139 GET_MODE_PRECISION (int_mode) - (i + 1));
6141 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6142 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6143 the bitsize of the mode - 1. This allows simplification of
6144 "a = (b & 8) == 0;" */
6145 if (XEXP (x, 1) == constm1_rtx
6146 && !REG_P (XEXP (x, 0))
6147 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6148 && REG_P (SUBREG_REG (XEXP (x, 0))))
6149 && is_a <scalar_int_mode> (mode, &int_mode)
6150 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6151 return simplify_shift_const
6152 (NULL_RTX, ASHIFTRT, int_mode,
6153 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6154 gen_rtx_XOR (int_mode, XEXP (x, 0),
6155 const1_rtx),
6156 GET_MODE_PRECISION (int_mode) - 1),
6157 GET_MODE_PRECISION (int_mode) - 1);
6159 /* If we are adding two things that have no bits in common, convert
6160 the addition into an IOR. This will often be further simplified,
6161 for example in cases like ((a & 1) + (a & 2)), which can
6162 become a & 3. */
6164 if (HWI_COMPUTABLE_MODE_P (mode)
6165 && (nonzero_bits (XEXP (x, 0), mode)
6166 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6168 /* Try to simplify the expression further. */
6169 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6170 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6172 /* If we could, great. If not, do not go ahead with the IOR
6173 replacement, since PLUS appears in many special purpose
6174 address arithmetic instructions. */
6175 if (GET_CODE (temp) != CLOBBER
6176 && (GET_CODE (temp) != IOR
6177 || ((XEXP (temp, 0) != XEXP (x, 0)
6178 || XEXP (temp, 1) != XEXP (x, 1))
6179 && (XEXP (temp, 0) != XEXP (x, 1)
6180 || XEXP (temp, 1) != XEXP (x, 0)))))
6181 return temp;
6184 /* Canonicalize x + x into x << 1. */
6185 if (GET_MODE_CLASS (mode) == MODE_INT
6186 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6187 && !side_effects_p (XEXP (x, 0)))
6188 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6190 break;
6192 case MINUS:
6193 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6194 (and <foo> (const_int pow2-1)) */
6195 if (is_a <scalar_int_mode> (mode, &int_mode)
6196 && GET_CODE (XEXP (x, 1)) == AND
6197 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6198 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6199 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6200 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6201 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6202 break;
6204 case MULT:
6205 /* If we have (mult (plus A B) C), apply the distributive law and then
6206 the inverse distributive law to see if things simplify. This
6207 occurs mostly in addresses, often when unrolling loops. */
6209 if (GET_CODE (XEXP (x, 0)) == PLUS)
6211 rtx result = distribute_and_simplify_rtx (x, 0);
6212 if (result)
6213 return result;
6216 /* Try simplify a*(b/c) as (a*b)/c. */
6217 if (FLOAT_MODE_P (mode) && flag_associative_math
6218 && GET_CODE (XEXP (x, 0)) == DIV)
6220 rtx tem = simplify_binary_operation (MULT, mode,
6221 XEXP (XEXP (x, 0), 0),
6222 XEXP (x, 1));
6223 if (tem)
6224 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6226 break;
6228 case UDIV:
6229 /* If this is a divide by a power of two, treat it as a shift if
6230 its first operand is a shift. */
6231 if (is_a <scalar_int_mode> (mode, &int_mode)
6232 && CONST_INT_P (XEXP (x, 1))
6233 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6234 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6235 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6236 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6237 || GET_CODE (XEXP (x, 0)) == ROTATE
6238 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6239 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6240 XEXP (x, 0), i);
6241 break;
6243 case EQ: case NE:
6244 case GT: case GTU: case GE: case GEU:
6245 case LT: case LTU: case LE: case LEU:
6246 case UNEQ: case LTGT:
6247 case UNGT: case UNGE:
6248 case UNLT: case UNLE:
6249 case UNORDERED: case ORDERED:
6250 /* If the first operand is a condition code, we can't do anything
6251 with it. */
6252 if (GET_CODE (XEXP (x, 0)) == COMPARE
6253 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6254 && ! CC0_P (XEXP (x, 0))))
6256 rtx op0 = XEXP (x, 0);
6257 rtx op1 = XEXP (x, 1);
6258 enum rtx_code new_code;
6260 if (GET_CODE (op0) == COMPARE)
6261 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6263 /* Simplify our comparison, if possible. */
6264 new_code = simplify_comparison (code, &op0, &op1);
6266 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6267 if only the low-order bit is possibly nonzero in X (such as when
6268 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6269 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6270 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6271 (plus X 1).
6273 Remove any ZERO_EXTRACT we made when thinking this was a
6274 comparison. It may now be simpler to use, e.g., an AND. If a
6275 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6276 the call to make_compound_operation in the SET case.
6278 Don't apply these optimizations if the caller would
6279 prefer a comparison rather than a value.
6280 E.g., for the condition in an IF_THEN_ELSE most targets need
6281 an explicit comparison. */
6283 if (in_cond)
6286 else if (STORE_FLAG_VALUE == 1
6287 && new_code == NE
6288 && is_int_mode (mode, &int_mode)
6289 && op1 == const0_rtx
6290 && int_mode == GET_MODE (op0)
6291 && nonzero_bits (op0, int_mode) == 1)
6292 return gen_lowpart (int_mode,
6293 expand_compound_operation (op0));
6295 else if (STORE_FLAG_VALUE == 1
6296 && new_code == NE
6297 && is_int_mode (mode, &int_mode)
6298 && op1 == const0_rtx
6299 && int_mode == GET_MODE (op0)
6300 && (num_sign_bit_copies (op0, int_mode)
6301 == GET_MODE_PRECISION (int_mode)))
6303 op0 = expand_compound_operation (op0);
6304 return simplify_gen_unary (NEG, int_mode,
6305 gen_lowpart (int_mode, op0),
6306 int_mode);
6309 else if (STORE_FLAG_VALUE == 1
6310 && new_code == EQ
6311 && is_int_mode (mode, &int_mode)
6312 && op1 == const0_rtx
6313 && int_mode == GET_MODE (op0)
6314 && nonzero_bits (op0, int_mode) == 1)
6316 op0 = expand_compound_operation (op0);
6317 return simplify_gen_binary (XOR, int_mode,
6318 gen_lowpart (int_mode, op0),
6319 const1_rtx);
6322 else if (STORE_FLAG_VALUE == 1
6323 && new_code == EQ
6324 && is_int_mode (mode, &int_mode)
6325 && op1 == const0_rtx
6326 && int_mode == GET_MODE (op0)
6327 && (num_sign_bit_copies (op0, int_mode)
6328 == GET_MODE_PRECISION (int_mode)))
6330 op0 = expand_compound_operation (op0);
6331 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6334 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6335 those above. */
6336 if (in_cond)
6339 else if (STORE_FLAG_VALUE == -1
6340 && new_code == NE
6341 && is_int_mode (mode, &int_mode)
6342 && op1 == const0_rtx
6343 && int_mode == GET_MODE (op0)
6344 && (num_sign_bit_copies (op0, int_mode)
6345 == GET_MODE_PRECISION (int_mode)))
6346 return gen_lowpart (int_mode, expand_compound_operation (op0));
6348 else if (STORE_FLAG_VALUE == -1
6349 && new_code == NE
6350 && is_int_mode (mode, &int_mode)
6351 && op1 == const0_rtx
6352 && int_mode == GET_MODE (op0)
6353 && nonzero_bits (op0, int_mode) == 1)
6355 op0 = expand_compound_operation (op0);
6356 return simplify_gen_unary (NEG, int_mode,
6357 gen_lowpart (int_mode, op0),
6358 int_mode);
6361 else if (STORE_FLAG_VALUE == -1
6362 && new_code == EQ
6363 && is_int_mode (mode, &int_mode)
6364 && op1 == const0_rtx
6365 && int_mode == GET_MODE (op0)
6366 && (num_sign_bit_copies (op0, int_mode)
6367 == GET_MODE_PRECISION (int_mode)))
6369 op0 = expand_compound_operation (op0);
6370 return simplify_gen_unary (NOT, int_mode,
6371 gen_lowpart (int_mode, op0),
6372 int_mode);
6375 /* If X is 0/1, (eq X 0) is X-1. */
6376 else if (STORE_FLAG_VALUE == -1
6377 && new_code == EQ
6378 && is_int_mode (mode, &int_mode)
6379 && op1 == const0_rtx
6380 && int_mode == GET_MODE (op0)
6381 && nonzero_bits (op0, int_mode) == 1)
6383 op0 = expand_compound_operation (op0);
6384 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6387 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6388 one bit that might be nonzero, we can convert (ne x 0) to
6389 (ashift x c) where C puts the bit in the sign bit. Remove any
6390 AND with STORE_FLAG_VALUE when we are done, since we are only
6391 going to test the sign bit. */
6392 if (new_code == NE
6393 && is_int_mode (mode, &int_mode)
6394 && HWI_COMPUTABLE_MODE_P (int_mode)
6395 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6396 && op1 == const0_rtx
6397 && int_mode == GET_MODE (op0)
6398 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6400 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6401 expand_compound_operation (op0),
6402 GET_MODE_PRECISION (int_mode) - 1 - i);
6403 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6404 return XEXP (x, 0);
6405 else
6406 return x;
6409 /* If the code changed, return a whole new comparison.
6410 We also need to avoid using SUBST in cases where
6411 simplify_comparison has widened a comparison with a CONST_INT,
6412 since in that case the wider CONST_INT may fail the sanity
6413 checks in do_SUBST. */
6414 if (new_code != code
6415 || (CONST_INT_P (op1)
6416 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6417 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6418 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6420 /* Otherwise, keep this operation, but maybe change its operands.
6421 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6422 SUBST (XEXP (x, 0), op0);
6423 SUBST (XEXP (x, 1), op1);
6425 break;
6427 case IF_THEN_ELSE:
6428 return simplify_if_then_else (x);
6430 case ZERO_EXTRACT:
6431 case SIGN_EXTRACT:
6432 case ZERO_EXTEND:
6433 case SIGN_EXTEND:
6434 /* If we are processing SET_DEST, we are done. */
6435 if (in_dest)
6436 return x;
6438 return expand_compound_operation (x);
6440 case SET:
6441 return simplify_set (x);
6443 case AND:
6444 case IOR:
6445 return simplify_logical (x);
6447 case ASHIFT:
6448 case LSHIFTRT:
6449 case ASHIFTRT:
6450 case ROTATE:
6451 case ROTATERT:
6452 /* If this is a shift by a constant amount, simplify it. */
6453 if (CONST_INT_P (XEXP (x, 1)))
6454 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6455 INTVAL (XEXP (x, 1)));
6457 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6458 SUBST (XEXP (x, 1),
6459 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6460 (HOST_WIDE_INT_1U
6461 << exact_log2 (GET_MODE_UNIT_BITSIZE
6462 (GET_MODE (x))))
6463 - 1,
6464 0));
6465 break;
6467 default:
6468 break;
6471 return x;
6474 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6476 static rtx
6477 simplify_if_then_else (rtx x)
6479 machine_mode mode = GET_MODE (x);
6480 rtx cond = XEXP (x, 0);
6481 rtx true_rtx = XEXP (x, 1);
6482 rtx false_rtx = XEXP (x, 2);
6483 enum rtx_code true_code = GET_CODE (cond);
6484 int comparison_p = COMPARISON_P (cond);
6485 rtx temp;
6486 int i;
6487 enum rtx_code false_code;
6488 rtx reversed;
6489 scalar_int_mode int_mode, inner_mode;
6491 /* Simplify storing of the truth value. */
6492 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6493 return simplify_gen_relational (true_code, mode, VOIDmode,
6494 XEXP (cond, 0), XEXP (cond, 1));
6496 /* Also when the truth value has to be reversed. */
6497 if (comparison_p
6498 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6499 && (reversed = reversed_comparison (cond, mode)))
6500 return reversed;
6502 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6503 in it is being compared against certain values. Get the true and false
6504 comparisons and see if that says anything about the value of each arm. */
6506 if (comparison_p
6507 && ((false_code = reversed_comparison_code (cond, NULL))
6508 != UNKNOWN)
6509 && REG_P (XEXP (cond, 0)))
6511 HOST_WIDE_INT nzb;
6512 rtx from = XEXP (cond, 0);
6513 rtx true_val = XEXP (cond, 1);
6514 rtx false_val = true_val;
6515 int swapped = 0;
6517 /* If FALSE_CODE is EQ, swap the codes and arms. */
6519 if (false_code == EQ)
6521 swapped = 1, true_code = EQ, false_code = NE;
6522 std::swap (true_rtx, false_rtx);
6525 scalar_int_mode from_mode;
6526 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6528 /* If we are comparing against zero and the expression being
6529 tested has only a single bit that might be nonzero, that is
6530 its value when it is not equal to zero. Similarly if it is
6531 known to be -1 or 0. */
6532 if (true_code == EQ
6533 && true_val == const0_rtx
6534 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6536 false_code = EQ;
6537 false_val = gen_int_mode (nzb, from_mode);
6539 else if (true_code == EQ
6540 && true_val == const0_rtx
6541 && (num_sign_bit_copies (from, from_mode)
6542 == GET_MODE_PRECISION (from_mode)))
6544 false_code = EQ;
6545 false_val = constm1_rtx;
6549 /* Now simplify an arm if we know the value of the register in the
6550 branch and it is used in the arm. Be careful due to the potential
6551 of locally-shared RTL. */
6553 if (reg_mentioned_p (from, true_rtx))
6554 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6555 from, true_val),
6556 pc_rtx, pc_rtx, 0, 0, 0);
6557 if (reg_mentioned_p (from, false_rtx))
6558 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6559 from, false_val),
6560 pc_rtx, pc_rtx, 0, 0, 0);
6562 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6563 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6565 true_rtx = XEXP (x, 1);
6566 false_rtx = XEXP (x, 2);
6567 true_code = GET_CODE (cond);
6570 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6571 reversed, do so to avoid needing two sets of patterns for
6572 subtract-and-branch insns. Similarly if we have a constant in the true
6573 arm, the false arm is the same as the first operand of the comparison, or
6574 the false arm is more complicated than the true arm. */
6576 if (comparison_p
6577 && reversed_comparison_code (cond, NULL) != UNKNOWN
6578 && (true_rtx == pc_rtx
6579 || (CONSTANT_P (true_rtx)
6580 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6581 || true_rtx == const0_rtx
6582 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6583 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6584 && !OBJECT_P (false_rtx))
6585 || reg_mentioned_p (true_rtx, false_rtx)
6586 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6588 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6589 SUBST (XEXP (x, 1), false_rtx);
6590 SUBST (XEXP (x, 2), true_rtx);
6592 std::swap (true_rtx, false_rtx);
6593 cond = XEXP (x, 0);
6595 /* It is possible that the conditional has been simplified out. */
6596 true_code = GET_CODE (cond);
6597 comparison_p = COMPARISON_P (cond);
6600 /* If the two arms are identical, we don't need the comparison. */
6602 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6603 return true_rtx;
6605 /* Convert a == b ? b : a to "a". */
6606 if (true_code == EQ && ! side_effects_p (cond)
6607 && !HONOR_NANS (mode)
6608 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6609 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6610 return false_rtx;
6611 else if (true_code == NE && ! side_effects_p (cond)
6612 && !HONOR_NANS (mode)
6613 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6614 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6615 return true_rtx;
6617 /* Look for cases where we have (abs x) or (neg (abs X)). */
6619 if (GET_MODE_CLASS (mode) == MODE_INT
6620 && comparison_p
6621 && XEXP (cond, 1) == const0_rtx
6622 && GET_CODE (false_rtx) == NEG
6623 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6624 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6625 && ! side_effects_p (true_rtx))
6626 switch (true_code)
6628 case GT:
6629 case GE:
6630 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6631 case LT:
6632 case LE:
6633 return
6634 simplify_gen_unary (NEG, mode,
6635 simplify_gen_unary (ABS, mode, true_rtx, mode),
6636 mode);
6637 default:
6638 break;
6641 /* Look for MIN or MAX. */
6643 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6644 && comparison_p
6645 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6646 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6647 && ! side_effects_p (cond))
6648 switch (true_code)
6650 case GE:
6651 case GT:
6652 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6653 case LE:
6654 case LT:
6655 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6656 case GEU:
6657 case GTU:
6658 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6659 case LEU:
6660 case LTU:
6661 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6662 default:
6663 break;
6666 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6667 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6668 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6669 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6670 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6671 neither 1 or -1, but it isn't worth checking for. */
6673 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6674 && comparison_p
6675 && is_int_mode (mode, &int_mode)
6676 && ! side_effects_p (x))
6678 rtx t = make_compound_operation (true_rtx, SET);
6679 rtx f = make_compound_operation (false_rtx, SET);
6680 rtx cond_op0 = XEXP (cond, 0);
6681 rtx cond_op1 = XEXP (cond, 1);
6682 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6683 scalar_int_mode m = int_mode;
6684 rtx z = 0, c1 = NULL_RTX;
6686 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6687 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6688 || GET_CODE (t) == ASHIFT
6689 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6690 && rtx_equal_p (XEXP (t, 0), f))
6691 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6693 /* If an identity-zero op is commutative, check whether there
6694 would be a match if we swapped the operands. */
6695 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6696 || GET_CODE (t) == XOR)
6697 && rtx_equal_p (XEXP (t, 1), f))
6698 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6699 else if (GET_CODE (t) == SIGN_EXTEND
6700 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6701 && (GET_CODE (XEXP (t, 0)) == PLUS
6702 || GET_CODE (XEXP (t, 0)) == MINUS
6703 || GET_CODE (XEXP (t, 0)) == IOR
6704 || GET_CODE (XEXP (t, 0)) == XOR
6705 || GET_CODE (XEXP (t, 0)) == ASHIFT
6706 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6707 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6708 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6709 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6710 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6711 && (num_sign_bit_copies (f, GET_MODE (f))
6712 > (unsigned int)
6713 (GET_MODE_PRECISION (int_mode)
6714 - GET_MODE_PRECISION (inner_mode))))
6716 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6717 extend_op = SIGN_EXTEND;
6718 m = inner_mode;
6720 else if (GET_CODE (t) == SIGN_EXTEND
6721 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6722 && (GET_CODE (XEXP (t, 0)) == PLUS
6723 || GET_CODE (XEXP (t, 0)) == IOR
6724 || GET_CODE (XEXP (t, 0)) == XOR)
6725 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6726 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6727 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6728 && (num_sign_bit_copies (f, GET_MODE (f))
6729 > (unsigned int)
6730 (GET_MODE_PRECISION (int_mode)
6731 - GET_MODE_PRECISION (inner_mode))))
6733 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6734 extend_op = SIGN_EXTEND;
6735 m = inner_mode;
6737 else if (GET_CODE (t) == ZERO_EXTEND
6738 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6739 && (GET_CODE (XEXP (t, 0)) == PLUS
6740 || GET_CODE (XEXP (t, 0)) == MINUS
6741 || GET_CODE (XEXP (t, 0)) == IOR
6742 || GET_CODE (XEXP (t, 0)) == XOR
6743 || GET_CODE (XEXP (t, 0)) == ASHIFT
6744 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6745 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6746 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6747 && HWI_COMPUTABLE_MODE_P (int_mode)
6748 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6749 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6750 && ((nonzero_bits (f, GET_MODE (f))
6751 & ~GET_MODE_MASK (inner_mode))
6752 == 0))
6754 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6755 extend_op = ZERO_EXTEND;
6756 m = inner_mode;
6758 else if (GET_CODE (t) == ZERO_EXTEND
6759 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6760 && (GET_CODE (XEXP (t, 0)) == PLUS
6761 || GET_CODE (XEXP (t, 0)) == IOR
6762 || GET_CODE (XEXP (t, 0)) == XOR)
6763 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6764 && HWI_COMPUTABLE_MODE_P (int_mode)
6765 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6766 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6767 && ((nonzero_bits (f, GET_MODE (f))
6768 & ~GET_MODE_MASK (inner_mode))
6769 == 0))
6771 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6772 extend_op = ZERO_EXTEND;
6773 m = inner_mode;
6776 if (z)
6778 machine_mode cm = m;
6779 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6780 && GET_MODE (c1) != VOIDmode)
6781 cm = GET_MODE (c1);
6782 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6783 cond_op0, cond_op1),
6784 pc_rtx, pc_rtx, 0, 0, 0);
6785 temp = simplify_gen_binary (MULT, cm, temp,
6786 simplify_gen_binary (MULT, cm, c1,
6787 const_true_rtx));
6788 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6789 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6791 if (extend_op != UNKNOWN)
6792 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6794 return temp;
6798 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6799 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6800 negation of a single bit, we can convert this operation to a shift. We
6801 can actually do this more generally, but it doesn't seem worth it. */
6803 if (true_code == NE
6804 && is_a <scalar_int_mode> (mode, &int_mode)
6805 && XEXP (cond, 1) == const0_rtx
6806 && false_rtx == const0_rtx
6807 && CONST_INT_P (true_rtx)
6808 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6809 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6810 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6811 == GET_MODE_PRECISION (int_mode))
6812 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6813 return
6814 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6815 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6817 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6818 non-zero bit in A is C1. */
6819 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6820 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6821 && is_a <scalar_int_mode> (mode, &int_mode)
6822 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6823 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6824 == nonzero_bits (XEXP (cond, 0), inner_mode)
6825 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6827 rtx val = XEXP (cond, 0);
6828 if (inner_mode == int_mode)
6829 return val;
6830 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6831 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6834 return x;
6837 /* Simplify X, a SET expression. Return the new expression. */
6839 static rtx
6840 simplify_set (rtx x)
6842 rtx src = SET_SRC (x);
6843 rtx dest = SET_DEST (x);
6844 machine_mode mode
6845 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6846 rtx_insn *other_insn;
6847 rtx *cc_use;
6848 scalar_int_mode int_mode;
6850 /* (set (pc) (return)) gets written as (return). */
6851 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6852 return src;
6854 /* Now that we know for sure which bits of SRC we are using, see if we can
6855 simplify the expression for the object knowing that we only need the
6856 low-order bits. */
6858 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6860 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6861 SUBST (SET_SRC (x), src);
6864 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6865 the comparison result and try to simplify it unless we already have used
6866 undobuf.other_insn. */
6867 if ((GET_MODE_CLASS (mode) == MODE_CC
6868 || GET_CODE (src) == COMPARE
6869 || CC0_P (dest))
6870 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6871 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6872 && COMPARISON_P (*cc_use)
6873 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6875 enum rtx_code old_code = GET_CODE (*cc_use);
6876 enum rtx_code new_code;
6877 rtx op0, op1, tmp;
6878 int other_changed = 0;
6879 rtx inner_compare = NULL_RTX;
6880 machine_mode compare_mode = GET_MODE (dest);
6882 if (GET_CODE (src) == COMPARE)
6884 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6885 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6887 inner_compare = op0;
6888 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6891 else
6892 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6894 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6895 op0, op1);
6896 if (!tmp)
6897 new_code = old_code;
6898 else if (!CONSTANT_P (tmp))
6900 new_code = GET_CODE (tmp);
6901 op0 = XEXP (tmp, 0);
6902 op1 = XEXP (tmp, 1);
6904 else
6906 rtx pat = PATTERN (other_insn);
6907 undobuf.other_insn = other_insn;
6908 SUBST (*cc_use, tmp);
6910 /* Attempt to simplify CC user. */
6911 if (GET_CODE (pat) == SET)
6913 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6914 if (new_rtx != NULL_RTX)
6915 SUBST (SET_SRC (pat), new_rtx);
6918 /* Convert X into a no-op move. */
6919 SUBST (SET_DEST (x), pc_rtx);
6920 SUBST (SET_SRC (x), pc_rtx);
6921 return x;
6924 /* Simplify our comparison, if possible. */
6925 new_code = simplify_comparison (new_code, &op0, &op1);
6927 #ifdef SELECT_CC_MODE
6928 /* If this machine has CC modes other than CCmode, check to see if we
6929 need to use a different CC mode here. */
6930 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6931 compare_mode = GET_MODE (op0);
6932 else if (inner_compare
6933 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6934 && new_code == old_code
6935 && op0 == XEXP (inner_compare, 0)
6936 && op1 == XEXP (inner_compare, 1))
6937 compare_mode = GET_MODE (inner_compare);
6938 else
6939 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6941 /* If the mode changed, we have to change SET_DEST, the mode in the
6942 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6943 a hard register, just build new versions with the proper mode. If it
6944 is a pseudo, we lose unless it is only time we set the pseudo, in
6945 which case we can safely change its mode. */
6946 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6948 if (can_change_dest_mode (dest, 0, compare_mode))
6950 unsigned int regno = REGNO (dest);
6951 rtx new_dest;
6953 if (regno < FIRST_PSEUDO_REGISTER)
6954 new_dest = gen_rtx_REG (compare_mode, regno);
6955 else
6957 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6958 new_dest = regno_reg_rtx[regno];
6961 SUBST (SET_DEST (x), new_dest);
6962 SUBST (XEXP (*cc_use, 0), new_dest);
6963 other_changed = 1;
6965 dest = new_dest;
6968 #endif /* SELECT_CC_MODE */
6970 /* If the code changed, we have to build a new comparison in
6971 undobuf.other_insn. */
6972 if (new_code != old_code)
6974 int other_changed_previously = other_changed;
6975 unsigned HOST_WIDE_INT mask;
6976 rtx old_cc_use = *cc_use;
6978 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6979 dest, const0_rtx));
6980 other_changed = 1;
6982 /* If the only change we made was to change an EQ into an NE or
6983 vice versa, OP0 has only one bit that might be nonzero, and OP1
6984 is zero, check if changing the user of the condition code will
6985 produce a valid insn. If it won't, we can keep the original code
6986 in that insn by surrounding our operation with an XOR. */
6988 if (((old_code == NE && new_code == EQ)
6989 || (old_code == EQ && new_code == NE))
6990 && ! other_changed_previously && op1 == const0_rtx
6991 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6992 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6994 rtx pat = PATTERN (other_insn), note = 0;
6996 if ((recog_for_combine (&pat, other_insn, &note) < 0
6997 && ! check_asm_operands (pat)))
6999 *cc_use = old_cc_use;
7000 other_changed = 0;
7002 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7003 gen_int_mode (mask,
7004 GET_MODE (op0)));
7009 if (other_changed)
7010 undobuf.other_insn = other_insn;
7012 /* Don't generate a compare of a CC with 0, just use that CC. */
7013 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7015 SUBST (SET_SRC (x), op0);
7016 src = SET_SRC (x);
7018 /* Otherwise, if we didn't previously have the same COMPARE we
7019 want, create it from scratch. */
7020 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7021 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7023 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7024 src = SET_SRC (x);
7027 else
7029 /* Get SET_SRC in a form where we have placed back any
7030 compound expressions. Then do the checks below. */
7031 src = make_compound_operation (src, SET);
7032 SUBST (SET_SRC (x), src);
7035 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7036 and X being a REG or (subreg (reg)), we may be able to convert this to
7037 (set (subreg:m2 x) (op)).
7039 We can always do this if M1 is narrower than M2 because that means that
7040 we only care about the low bits of the result.
7042 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7043 perform a narrower operation than requested since the high-order bits will
7044 be undefined. On machine where it is defined, this transformation is safe
7045 as long as M1 and M2 have the same number of words. */
7047 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7048 && !OBJECT_P (SUBREG_REG (src))
7049 && (known_equal_after_align_up
7050 (GET_MODE_SIZE (GET_MODE (src)),
7051 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7052 UNITS_PER_WORD))
7053 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7054 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7055 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7056 GET_MODE (SUBREG_REG (src)),
7057 GET_MODE (src)))
7058 && (REG_P (dest)
7059 || (GET_CODE (dest) == SUBREG
7060 && REG_P (SUBREG_REG (dest)))))
7062 SUBST (SET_DEST (x),
7063 gen_lowpart (GET_MODE (SUBREG_REG (src)),
7064 dest));
7065 SUBST (SET_SRC (x), SUBREG_REG (src));
7067 src = SET_SRC (x), dest = SET_DEST (x);
7070 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7071 in SRC. */
7072 if (dest == cc0_rtx
7073 && partial_subreg_p (src)
7074 && subreg_lowpart_p (src))
7076 rtx inner = SUBREG_REG (src);
7077 machine_mode inner_mode = GET_MODE (inner);
7079 /* Here we make sure that we don't have a sign bit on. */
7080 if (val_signbit_known_clear_p (GET_MODE (src),
7081 nonzero_bits (inner, inner_mode)))
7083 SUBST (SET_SRC (x), inner);
7084 src = SET_SRC (x);
7088 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7089 would require a paradoxical subreg. Replace the subreg with a
7090 zero_extend to avoid the reload that would otherwise be required.
7091 Don't do this unless we have a scalar integer mode, otherwise the
7092 transformation is incorrect. */
7094 enum rtx_code extend_op;
7095 if (paradoxical_subreg_p (src)
7096 && MEM_P (SUBREG_REG (src))
7097 && SCALAR_INT_MODE_P (GET_MODE (src))
7098 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7100 SUBST (SET_SRC (x),
7101 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7103 src = SET_SRC (x);
7106 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7107 are comparing an item known to be 0 or -1 against 0, use a logical
7108 operation instead. Check for one of the arms being an IOR of the other
7109 arm with some value. We compute three terms to be IOR'ed together. In
7110 practice, at most two will be nonzero. Then we do the IOR's. */
7112 if (GET_CODE (dest) != PC
7113 && GET_CODE (src) == IF_THEN_ELSE
7114 && is_int_mode (GET_MODE (src), &int_mode)
7115 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7116 && XEXP (XEXP (src, 0), 1) == const0_rtx
7117 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7118 && (!HAVE_conditional_move
7119 || ! can_conditionally_move_p (int_mode))
7120 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7121 == GET_MODE_PRECISION (int_mode))
7122 && ! side_effects_p (src))
7124 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7125 ? XEXP (src, 1) : XEXP (src, 2));
7126 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7127 ? XEXP (src, 2) : XEXP (src, 1));
7128 rtx term1 = const0_rtx, term2, term3;
7130 if (GET_CODE (true_rtx) == IOR
7131 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7132 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7133 else if (GET_CODE (true_rtx) == IOR
7134 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7135 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7136 else if (GET_CODE (false_rtx) == IOR
7137 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7138 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7139 else if (GET_CODE (false_rtx) == IOR
7140 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7141 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7143 term2 = simplify_gen_binary (AND, int_mode,
7144 XEXP (XEXP (src, 0), 0), true_rtx);
7145 term3 = simplify_gen_binary (AND, int_mode,
7146 simplify_gen_unary (NOT, int_mode,
7147 XEXP (XEXP (src, 0), 0),
7148 int_mode),
7149 false_rtx);
7151 SUBST (SET_SRC (x),
7152 simplify_gen_binary (IOR, int_mode,
7153 simplify_gen_binary (IOR, int_mode,
7154 term1, term2),
7155 term3));
7157 src = SET_SRC (x);
7160 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7161 whole thing fail. */
7162 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7163 return src;
7164 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7165 return dest;
7166 else
7167 /* Convert this into a field assignment operation, if possible. */
7168 return make_field_assignment (x);
7171 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7172 result. */
7174 static rtx
7175 simplify_logical (rtx x)
7177 rtx op0 = XEXP (x, 0);
7178 rtx op1 = XEXP (x, 1);
7179 scalar_int_mode mode;
7181 switch (GET_CODE (x))
7183 case AND:
7184 /* We can call simplify_and_const_int only if we don't lose
7185 any (sign) bits when converting INTVAL (op1) to
7186 "unsigned HOST_WIDE_INT". */
7187 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7188 && CONST_INT_P (op1)
7189 && (HWI_COMPUTABLE_MODE_P (mode)
7190 || INTVAL (op1) > 0))
7192 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7193 if (GET_CODE (x) != AND)
7194 return x;
7196 op0 = XEXP (x, 0);
7197 op1 = XEXP (x, 1);
7200 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7201 apply the distributive law and then the inverse distributive
7202 law to see if things simplify. */
7203 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7205 rtx result = distribute_and_simplify_rtx (x, 0);
7206 if (result)
7207 return result;
7209 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7211 rtx result = distribute_and_simplify_rtx (x, 1);
7212 if (result)
7213 return result;
7215 break;
7217 case IOR:
7218 /* If we have (ior (and A B) C), apply the distributive law and then
7219 the inverse distributive law to see if things simplify. */
7221 if (GET_CODE (op0) == AND)
7223 rtx result = distribute_and_simplify_rtx (x, 0);
7224 if (result)
7225 return result;
7228 if (GET_CODE (op1) == AND)
7230 rtx result = distribute_and_simplify_rtx (x, 1);
7231 if (result)
7232 return result;
7234 break;
7236 default:
7237 gcc_unreachable ();
7240 return x;
7243 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7244 operations" because they can be replaced with two more basic operations.
7245 ZERO_EXTEND is also considered "compound" because it can be replaced with
7246 an AND operation, which is simpler, though only one operation.
7248 The function expand_compound_operation is called with an rtx expression
7249 and will convert it to the appropriate shifts and AND operations,
7250 simplifying at each stage.
7252 The function make_compound_operation is called to convert an expression
7253 consisting of shifts and ANDs into the equivalent compound expression.
7254 It is the inverse of this function, loosely speaking. */
7256 static rtx
7257 expand_compound_operation (rtx x)
7259 unsigned HOST_WIDE_INT pos = 0, len;
7260 int unsignedp = 0;
7261 unsigned int modewidth;
7262 rtx tem;
7263 scalar_int_mode inner_mode;
7265 switch (GET_CODE (x))
7267 case ZERO_EXTEND:
7268 unsignedp = 1;
7269 /* FALLTHRU */
7270 case SIGN_EXTEND:
7271 /* We can't necessarily use a const_int for a multiword mode;
7272 it depends on implicitly extending the value.
7273 Since we don't know the right way to extend it,
7274 we can't tell whether the implicit way is right.
7276 Even for a mode that is no wider than a const_int,
7277 we can't win, because we need to sign extend one of its bits through
7278 the rest of it, and we don't know which bit. */
7279 if (CONST_INT_P (XEXP (x, 0)))
7280 return x;
7282 /* Reject modes that aren't scalar integers because turning vector
7283 or complex modes into shifts causes problems. */
7284 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7285 return x;
7287 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7288 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7289 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7290 reloaded. If not for that, MEM's would very rarely be safe.
7292 Reject modes bigger than a word, because we might not be able
7293 to reference a two-register group starting with an arbitrary register
7294 (and currently gen_lowpart might crash for a SUBREG). */
7296 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7297 return x;
7299 len = GET_MODE_PRECISION (inner_mode);
7300 /* If the inner object has VOIDmode (the only way this can happen
7301 is if it is an ASM_OPERANDS), we can't do anything since we don't
7302 know how much masking to do. */
7303 if (len == 0)
7304 return x;
7306 break;
7308 case ZERO_EXTRACT:
7309 unsignedp = 1;
7311 /* fall through */
7313 case SIGN_EXTRACT:
7314 /* If the operand is a CLOBBER, just return it. */
7315 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7316 return XEXP (x, 0);
7318 if (!CONST_INT_P (XEXP (x, 1))
7319 || !CONST_INT_P (XEXP (x, 2)))
7320 return x;
7322 /* Reject modes that aren't scalar integers because turning vector
7323 or complex modes into shifts causes problems. */
7324 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7325 return x;
7327 len = INTVAL (XEXP (x, 1));
7328 pos = INTVAL (XEXP (x, 2));
7330 /* This should stay within the object being extracted, fail otherwise. */
7331 if (len + pos > GET_MODE_PRECISION (inner_mode))
7332 return x;
7334 if (BITS_BIG_ENDIAN)
7335 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7337 break;
7339 default:
7340 return x;
7343 /* We've rejected non-scalar operations by now. */
7344 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7346 /* Convert sign extension to zero extension, if we know that the high
7347 bit is not set, as this is easier to optimize. It will be converted
7348 back to cheaper alternative in make_extraction. */
7349 if (GET_CODE (x) == SIGN_EXTEND
7350 && HWI_COMPUTABLE_MODE_P (mode)
7351 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7352 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7353 == 0))
7355 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7356 rtx temp2 = expand_compound_operation (temp);
7358 /* Make sure this is a profitable operation. */
7359 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7360 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7361 return temp2;
7362 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7363 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7364 return temp;
7365 else
7366 return x;
7369 /* We can optimize some special cases of ZERO_EXTEND. */
7370 if (GET_CODE (x) == ZERO_EXTEND)
7372 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7373 know that the last value didn't have any inappropriate bits
7374 set. */
7375 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7376 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7377 && HWI_COMPUTABLE_MODE_P (mode)
7378 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7379 & ~GET_MODE_MASK (inner_mode)) == 0)
7380 return XEXP (XEXP (x, 0), 0);
7382 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7383 if (GET_CODE (XEXP (x, 0)) == SUBREG
7384 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7385 && subreg_lowpart_p (XEXP (x, 0))
7386 && HWI_COMPUTABLE_MODE_P (mode)
7387 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7388 & ~GET_MODE_MASK (inner_mode)) == 0)
7389 return SUBREG_REG (XEXP (x, 0));
7391 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7392 is a comparison and STORE_FLAG_VALUE permits. This is like
7393 the first case, but it works even when MODE is larger
7394 than HOST_WIDE_INT. */
7395 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7396 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7397 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7398 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7399 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7400 return XEXP (XEXP (x, 0), 0);
7402 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7403 if (GET_CODE (XEXP (x, 0)) == SUBREG
7404 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7405 && subreg_lowpart_p (XEXP (x, 0))
7406 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7407 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7408 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7409 return SUBREG_REG (XEXP (x, 0));
7413 /* If we reach here, we want to return a pair of shifts. The inner
7414 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7415 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7416 logical depending on the value of UNSIGNEDP.
7418 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7419 converted into an AND of a shift.
7421 We must check for the case where the left shift would have a negative
7422 count. This can happen in a case like (x >> 31) & 255 on machines
7423 that can't shift by a constant. On those machines, we would first
7424 combine the shift with the AND to produce a variable-position
7425 extraction. Then the constant of 31 would be substituted in
7426 to produce such a position. */
7428 modewidth = GET_MODE_PRECISION (mode);
7429 if (modewidth >= pos + len)
7431 tem = gen_lowpart (mode, XEXP (x, 0));
7432 if (!tem || GET_CODE (tem) == CLOBBER)
7433 return x;
7434 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7435 tem, modewidth - pos - len);
7436 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7437 mode, tem, modewidth - len);
7439 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7440 tem = simplify_and_const_int (NULL_RTX, mode,
7441 simplify_shift_const (NULL_RTX, LSHIFTRT,
7442 mode, XEXP (x, 0),
7443 pos),
7444 (HOST_WIDE_INT_1U << len) - 1);
7445 else
7446 /* Any other cases we can't handle. */
7447 return x;
7449 /* If we couldn't do this for some reason, return the original
7450 expression. */
7451 if (GET_CODE (tem) == CLOBBER)
7452 return x;
7454 return tem;
7457 /* X is a SET which contains an assignment of one object into
7458 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7459 or certain SUBREGS). If possible, convert it into a series of
7460 logical operations.
7462 We half-heartedly support variable positions, but do not at all
7463 support variable lengths. */
7465 static const_rtx
7466 expand_field_assignment (const_rtx x)
7468 rtx inner;
7469 rtx pos; /* Always counts from low bit. */
7470 int len, inner_len;
7471 rtx mask, cleared, masked;
7472 scalar_int_mode compute_mode;
7474 /* Loop until we find something we can't simplify. */
7475 while (1)
7477 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7478 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7480 rtx x0 = XEXP (SET_DEST (x), 0);
7481 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7482 break;
7483 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7484 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7485 MAX_MODE_INT);
7487 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7488 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7490 inner = XEXP (SET_DEST (x), 0);
7491 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7492 break;
7494 len = INTVAL (XEXP (SET_DEST (x), 1));
7495 pos = XEXP (SET_DEST (x), 2);
7497 /* A constant position should stay within the width of INNER. */
7498 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7499 break;
7501 if (BITS_BIG_ENDIAN)
7503 if (CONST_INT_P (pos))
7504 pos = GEN_INT (inner_len - len - INTVAL (pos));
7505 else if (GET_CODE (pos) == MINUS
7506 && CONST_INT_P (XEXP (pos, 1))
7507 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7508 /* If position is ADJUST - X, new position is X. */
7509 pos = XEXP (pos, 0);
7510 else
7511 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7512 gen_int_mode (inner_len - len,
7513 GET_MODE (pos)),
7514 pos);
7518 /* If the destination is a subreg that overwrites the whole of the inner
7519 register, we can move the subreg to the source. */
7520 else if (GET_CODE (SET_DEST (x)) == SUBREG
7521 /* We need SUBREGs to compute nonzero_bits properly. */
7522 && nonzero_sign_valid
7523 && !read_modify_subreg_p (SET_DEST (x)))
7525 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7526 gen_lowpart
7527 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7528 SET_SRC (x)));
7529 continue;
7531 else
7532 break;
7534 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7535 inner = SUBREG_REG (inner);
7537 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7538 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7540 /* Don't do anything for vector or complex integral types. */
7541 if (! FLOAT_MODE_P (GET_MODE (inner)))
7542 break;
7544 /* Try to find an integral mode to pun with. */
7545 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7546 .exists (&compute_mode))
7547 break;
7549 inner = gen_lowpart (compute_mode, inner);
7552 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7553 if (len >= HOST_BITS_PER_WIDE_INT)
7554 break;
7556 /* Don't try to compute in too wide unsupported modes. */
7557 if (!targetm.scalar_mode_supported_p (compute_mode))
7558 break;
7560 /* Now compute the equivalent expression. Make a copy of INNER
7561 for the SET_DEST in case it is a MEM into which we will substitute;
7562 we don't want shared RTL in that case. */
7563 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7564 compute_mode);
7565 cleared = simplify_gen_binary (AND, compute_mode,
7566 simplify_gen_unary (NOT, compute_mode,
7567 simplify_gen_binary (ASHIFT,
7568 compute_mode,
7569 mask, pos),
7570 compute_mode),
7571 inner);
7572 masked = simplify_gen_binary (ASHIFT, compute_mode,
7573 simplify_gen_binary (
7574 AND, compute_mode,
7575 gen_lowpart (compute_mode, SET_SRC (x)),
7576 mask),
7577 pos);
7579 x = gen_rtx_SET (copy_rtx (inner),
7580 simplify_gen_binary (IOR, compute_mode,
7581 cleared, masked));
7584 return x;
7587 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7588 it is an RTX that represents the (variable) starting position; otherwise,
7589 POS is the (constant) starting bit position. Both are counted from the LSB.
7591 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7593 IN_DEST is nonzero if this is a reference in the destination of a SET.
7594 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7595 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7596 be used.
7598 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7599 ZERO_EXTRACT should be built even for bits starting at bit 0.
7601 MODE is the desired mode of the result (if IN_DEST == 0).
7603 The result is an RTX for the extraction or NULL_RTX if the target
7604 can't handle it. */
7606 static rtx
7607 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7608 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7609 int in_dest, int in_compare)
7611 /* This mode describes the size of the storage area
7612 to fetch the overall value from. Within that, we
7613 ignore the POS lowest bits, etc. */
7614 machine_mode is_mode = GET_MODE (inner);
7615 machine_mode inner_mode;
7616 scalar_int_mode wanted_inner_mode;
7617 scalar_int_mode wanted_inner_reg_mode = word_mode;
7618 scalar_int_mode pos_mode = word_mode;
7619 machine_mode extraction_mode = word_mode;
7620 rtx new_rtx = 0;
7621 rtx orig_pos_rtx = pos_rtx;
7622 HOST_WIDE_INT orig_pos;
7624 if (pos_rtx && CONST_INT_P (pos_rtx))
7625 pos = INTVAL (pos_rtx), pos_rtx = 0;
7627 if (GET_CODE (inner) == SUBREG
7628 && subreg_lowpart_p (inner)
7629 && (paradoxical_subreg_p (inner)
7630 /* If trying or potentionally trying to extract
7631 bits outside of is_mode, don't look through
7632 non-paradoxical SUBREGs. See PR82192. */
7633 || (pos_rtx == NULL_RTX
7634 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7636 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7637 consider just the QI as the memory to extract from.
7638 The subreg adds or removes high bits; its mode is
7639 irrelevant to the meaning of this extraction,
7640 since POS and LEN count from the lsb. */
7641 if (MEM_P (SUBREG_REG (inner)))
7642 is_mode = GET_MODE (SUBREG_REG (inner));
7643 inner = SUBREG_REG (inner);
7645 else if (GET_CODE (inner) == ASHIFT
7646 && CONST_INT_P (XEXP (inner, 1))
7647 && pos_rtx == 0 && pos == 0
7648 && len > UINTVAL (XEXP (inner, 1)))
7650 /* We're extracting the least significant bits of an rtx
7651 (ashift X (const_int C)), where LEN > C. Extract the
7652 least significant (LEN - C) bits of X, giving an rtx
7653 whose mode is MODE, then shift it left C times. */
7654 new_rtx = make_extraction (mode, XEXP (inner, 0),
7655 0, 0, len - INTVAL (XEXP (inner, 1)),
7656 unsignedp, in_dest, in_compare);
7657 if (new_rtx != 0)
7658 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7660 else if (GET_CODE (inner) == TRUNCATE
7661 /* If trying or potentionally trying to extract
7662 bits outside of is_mode, don't look through
7663 TRUNCATE. See PR82192. */
7664 && pos_rtx == NULL_RTX
7665 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7666 inner = XEXP (inner, 0);
7668 inner_mode = GET_MODE (inner);
7670 /* See if this can be done without an extraction. We never can if the
7671 width of the field is not the same as that of some integer mode. For
7672 registers, we can only avoid the extraction if the position is at the
7673 low-order bit and this is either not in the destination or we have the
7674 appropriate STRICT_LOW_PART operation available.
7676 For MEM, we can avoid an extract if the field starts on an appropriate
7677 boundary and we can change the mode of the memory reference. */
7679 scalar_int_mode tmode;
7680 if (int_mode_for_size (len, 1).exists (&tmode)
7681 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7682 && !MEM_P (inner)
7683 && (pos == 0 || REG_P (inner))
7684 && (inner_mode == tmode
7685 || !REG_P (inner)
7686 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7687 || reg_truncated_to_mode (tmode, inner))
7688 && (! in_dest
7689 || (REG_P (inner)
7690 && have_insn_for (STRICT_LOW_PART, tmode))))
7691 || (MEM_P (inner) && pos_rtx == 0
7692 && (pos
7693 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7694 : BITS_PER_UNIT)) == 0
7695 /* We can't do this if we are widening INNER_MODE (it
7696 may not be aligned, for one thing). */
7697 && !paradoxical_subreg_p (tmode, inner_mode)
7698 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7699 && (inner_mode == tmode
7700 || (! mode_dependent_address_p (XEXP (inner, 0),
7701 MEM_ADDR_SPACE (inner))
7702 && ! MEM_VOLATILE_P (inner))))))
7704 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7705 field. If the original and current mode are the same, we need not
7706 adjust the offset. Otherwise, we do if bytes big endian.
7708 If INNER is not a MEM, get a piece consisting of just the field
7709 of interest (in this case POS % BITS_PER_WORD must be 0). */
7711 if (MEM_P (inner))
7713 poly_int64 offset;
7715 /* POS counts from lsb, but make OFFSET count in memory order. */
7716 if (BYTES_BIG_ENDIAN)
7717 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7718 - len - pos);
7719 else
7720 offset = pos / BITS_PER_UNIT;
7722 new_rtx = adjust_address_nv (inner, tmode, offset);
7724 else if (REG_P (inner))
7726 if (tmode != inner_mode)
7728 /* We can't call gen_lowpart in a DEST since we
7729 always want a SUBREG (see below) and it would sometimes
7730 return a new hard register. */
7731 if (pos || in_dest)
7733 poly_uint64 offset
7734 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7736 /* Avoid creating invalid subregs, for example when
7737 simplifying (x>>32)&255. */
7738 if (!validate_subreg (tmode, inner_mode, inner, offset))
7739 return NULL_RTX;
7741 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7743 else
7744 new_rtx = gen_lowpart (tmode, inner);
7746 else
7747 new_rtx = inner;
7749 else
7750 new_rtx = force_to_mode (inner, tmode,
7751 len >= HOST_BITS_PER_WIDE_INT
7752 ? HOST_WIDE_INT_M1U
7753 : (HOST_WIDE_INT_1U << len) - 1, 0);
7755 /* If this extraction is going into the destination of a SET,
7756 make a STRICT_LOW_PART unless we made a MEM. */
7758 if (in_dest)
7759 return (MEM_P (new_rtx) ? new_rtx
7760 : (GET_CODE (new_rtx) != SUBREG
7761 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7762 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7764 if (mode == tmode)
7765 return new_rtx;
7767 if (CONST_SCALAR_INT_P (new_rtx))
7768 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7769 mode, new_rtx, tmode);
7771 /* If we know that no extraneous bits are set, and that the high
7772 bit is not set, convert the extraction to the cheaper of
7773 sign and zero extension, that are equivalent in these cases. */
7774 if (flag_expensive_optimizations
7775 && (HWI_COMPUTABLE_MODE_P (tmode)
7776 && ((nonzero_bits (new_rtx, tmode)
7777 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7778 == 0)))
7780 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7781 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7783 /* Prefer ZERO_EXTENSION, since it gives more information to
7784 backends. */
7785 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7786 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7787 return temp;
7788 return temp1;
7791 /* Otherwise, sign- or zero-extend unless we already are in the
7792 proper mode. */
7794 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7795 mode, new_rtx));
7798 /* Unless this is a COMPARE or we have a funny memory reference,
7799 don't do anything with zero-extending field extracts starting at
7800 the low-order bit since they are simple AND operations. */
7801 if (pos_rtx == 0 && pos == 0 && ! in_dest
7802 && ! in_compare && unsignedp)
7803 return 0;
7805 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7806 if the position is not a constant and the length is not 1. In all
7807 other cases, we would only be going outside our object in cases when
7808 an original shift would have been undefined. */
7809 if (MEM_P (inner)
7810 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7811 || (pos_rtx != 0 && len != 1)))
7812 return 0;
7814 enum extraction_pattern pattern = (in_dest ? EP_insv
7815 : unsignedp ? EP_extzv : EP_extv);
7817 /* If INNER is not from memory, we want it to have the mode of a register
7818 extraction pattern's structure operand, or word_mode if there is no
7819 such pattern. The same applies to extraction_mode and pos_mode
7820 and their respective operands.
7822 For memory, assume that the desired extraction_mode and pos_mode
7823 are the same as for a register operation, since at present we don't
7824 have named patterns for aligned memory structures. */
7825 class extraction_insn insn;
7826 unsigned int inner_size;
7827 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7828 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7830 wanted_inner_reg_mode = insn.struct_mode.require ();
7831 pos_mode = insn.pos_mode;
7832 extraction_mode = insn.field_mode;
7835 /* Never narrow an object, since that might not be safe. */
7837 if (mode != VOIDmode
7838 && partial_subreg_p (extraction_mode, mode))
7839 extraction_mode = mode;
7841 /* Punt if len is too large for extraction_mode. */
7842 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7843 return NULL_RTX;
7845 if (!MEM_P (inner))
7846 wanted_inner_mode = wanted_inner_reg_mode;
7847 else
7849 /* Be careful not to go beyond the extracted object and maintain the
7850 natural alignment of the memory. */
7851 wanted_inner_mode = smallest_int_mode_for_size (len);
7852 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7853 > GET_MODE_BITSIZE (wanted_inner_mode))
7854 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7857 orig_pos = pos;
7859 if (BITS_BIG_ENDIAN)
7861 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7862 BITS_BIG_ENDIAN style. If position is constant, compute new
7863 position. Otherwise, build subtraction.
7864 Note that POS is relative to the mode of the original argument.
7865 If it's a MEM we need to recompute POS relative to that.
7866 However, if we're extracting from (or inserting into) a register,
7867 we want to recompute POS relative to wanted_inner_mode. */
7868 int width;
7869 if (!MEM_P (inner))
7870 width = GET_MODE_BITSIZE (wanted_inner_mode);
7871 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7872 return NULL_RTX;
7874 if (pos_rtx == 0)
7875 pos = width - len - pos;
7876 else
7877 pos_rtx
7878 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7879 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7880 pos_rtx);
7881 /* POS may be less than 0 now, but we check for that below.
7882 Note that it can only be less than 0 if !MEM_P (inner). */
7885 /* If INNER has a wider mode, and this is a constant extraction, try to
7886 make it smaller and adjust the byte to point to the byte containing
7887 the value. */
7888 if (wanted_inner_mode != VOIDmode
7889 && inner_mode != wanted_inner_mode
7890 && ! pos_rtx
7891 && partial_subreg_p (wanted_inner_mode, is_mode)
7892 && MEM_P (inner)
7893 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7894 && ! MEM_VOLATILE_P (inner))
7896 poly_int64 offset = 0;
7898 /* The computations below will be correct if the machine is big
7899 endian in both bits and bytes or little endian in bits and bytes.
7900 If it is mixed, we must adjust. */
7902 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7903 adjust OFFSET to compensate. */
7904 if (BYTES_BIG_ENDIAN
7905 && paradoxical_subreg_p (is_mode, inner_mode))
7906 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7908 /* We can now move to the desired byte. */
7909 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7910 * GET_MODE_SIZE (wanted_inner_mode);
7911 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7913 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7914 && is_mode != wanted_inner_mode)
7915 offset = (GET_MODE_SIZE (is_mode)
7916 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7918 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7921 /* If INNER is not memory, get it into the proper mode. If we are changing
7922 its mode, POS must be a constant and smaller than the size of the new
7923 mode. */
7924 else if (!MEM_P (inner))
7926 /* On the LHS, don't create paradoxical subregs implicitely truncating
7927 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7928 if (in_dest
7929 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7930 wanted_inner_mode))
7931 return NULL_RTX;
7933 if (GET_MODE (inner) != wanted_inner_mode
7934 && (pos_rtx != 0
7935 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7936 return NULL_RTX;
7938 if (orig_pos < 0)
7939 return NULL_RTX;
7941 inner = force_to_mode (inner, wanted_inner_mode,
7942 pos_rtx
7943 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7944 ? HOST_WIDE_INT_M1U
7945 : (((HOST_WIDE_INT_1U << len) - 1)
7946 << orig_pos),
7950 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7951 have to zero extend. Otherwise, we can just use a SUBREG.
7953 We dealt with constant rtxes earlier, so pos_rtx cannot
7954 have VOIDmode at this point. */
7955 if (pos_rtx != 0
7956 && (GET_MODE_SIZE (pos_mode)
7957 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7959 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7960 GET_MODE (pos_rtx));
7962 /* If we know that no extraneous bits are set, and that the high
7963 bit is not set, convert extraction to cheaper one - either
7964 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7965 cases. */
7966 if (flag_expensive_optimizations
7967 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7968 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7969 & ~(((unsigned HOST_WIDE_INT)
7970 GET_MODE_MASK (GET_MODE (pos_rtx)))
7971 >> 1))
7972 == 0)))
7974 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7975 GET_MODE (pos_rtx));
7977 /* Prefer ZERO_EXTENSION, since it gives more information to
7978 backends. */
7979 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7980 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7981 temp = temp1;
7983 pos_rtx = temp;
7986 /* Make POS_RTX unless we already have it and it is correct. If we don't
7987 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7988 be a CONST_INT. */
7989 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7990 pos_rtx = orig_pos_rtx;
7992 else if (pos_rtx == 0)
7993 pos_rtx = GEN_INT (pos);
7995 /* Make the required operation. See if we can use existing rtx. */
7996 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7997 extraction_mode, inner, GEN_INT (len), pos_rtx);
7998 if (! in_dest)
7999 new_rtx = gen_lowpart (mode, new_rtx);
8001 return new_rtx;
8004 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8005 can be commuted with any other operations in X. Return X without
8006 that shift if so. */
8008 static rtx
8009 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8011 enum rtx_code code = GET_CODE (x);
8012 rtx tem;
8014 switch (code)
8016 case ASHIFT:
8017 /* This is the shift itself. If it is wide enough, we will return
8018 either the value being shifted if the shift count is equal to
8019 COUNT or a shift for the difference. */
8020 if (CONST_INT_P (XEXP (x, 1))
8021 && INTVAL (XEXP (x, 1)) >= count)
8022 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8023 INTVAL (XEXP (x, 1)) - count);
8024 break;
8026 case NEG: case NOT:
8027 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8028 return simplify_gen_unary (code, mode, tem, mode);
8030 break;
8032 case PLUS: case IOR: case XOR: case AND:
8033 /* If we can safely shift this constant and we find the inner shift,
8034 make a new operation. */
8035 if (CONST_INT_P (XEXP (x, 1))
8036 && (UINTVAL (XEXP (x, 1))
8037 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8038 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8040 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8041 return simplify_gen_binary (code, mode, tem,
8042 gen_int_mode (val, mode));
8044 break;
8046 default:
8047 break;
8050 return 0;
8053 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8054 level of the expression and MODE is its mode. IN_CODE is as for
8055 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8056 that should be used when recursing on operands of *X_PTR.
8058 There are two possible actions:
8060 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8061 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8063 - Return a new rtx, which the caller returns directly. */
8065 static rtx
8066 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8067 enum rtx_code in_code,
8068 enum rtx_code *next_code_ptr)
8070 rtx x = *x_ptr;
8071 enum rtx_code next_code = *next_code_ptr;
8072 enum rtx_code code = GET_CODE (x);
8073 int mode_width = GET_MODE_PRECISION (mode);
8074 rtx rhs, lhs;
8075 rtx new_rtx = 0;
8076 int i;
8077 rtx tem;
8078 scalar_int_mode inner_mode;
8079 bool equality_comparison = false;
8081 if (in_code == EQ)
8083 equality_comparison = true;
8084 in_code = COMPARE;
8087 /* Process depending on the code of this operation. If NEW is set
8088 nonzero, it will be returned. */
8090 switch (code)
8092 case ASHIFT:
8093 /* Convert shifts by constants into multiplications if inside
8094 an address. */
8095 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8096 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8097 && INTVAL (XEXP (x, 1)) >= 0)
8099 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8100 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8102 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8103 if (GET_CODE (new_rtx) == NEG)
8105 new_rtx = XEXP (new_rtx, 0);
8106 multval = -multval;
8108 multval = trunc_int_for_mode (multval, mode);
8109 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8111 break;
8113 case PLUS:
8114 lhs = XEXP (x, 0);
8115 rhs = XEXP (x, 1);
8116 lhs = make_compound_operation (lhs, next_code);
8117 rhs = make_compound_operation (rhs, next_code);
8118 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8120 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8121 XEXP (lhs, 1));
8122 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8124 else if (GET_CODE (lhs) == MULT
8125 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8127 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8128 simplify_gen_unary (NEG, mode,
8129 XEXP (lhs, 1),
8130 mode));
8131 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8133 else
8135 SUBST (XEXP (x, 0), lhs);
8136 SUBST (XEXP (x, 1), rhs);
8138 maybe_swap_commutative_operands (x);
8139 return x;
8141 case MINUS:
8142 lhs = XEXP (x, 0);
8143 rhs = XEXP (x, 1);
8144 lhs = make_compound_operation (lhs, next_code);
8145 rhs = make_compound_operation (rhs, next_code);
8146 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8148 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8149 XEXP (rhs, 1));
8150 return simplify_gen_binary (PLUS, mode, tem, lhs);
8152 else if (GET_CODE (rhs) == MULT
8153 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8155 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8156 simplify_gen_unary (NEG, mode,
8157 XEXP (rhs, 1),
8158 mode));
8159 return simplify_gen_binary (PLUS, mode, tem, lhs);
8161 else
8163 SUBST (XEXP (x, 0), lhs);
8164 SUBST (XEXP (x, 1), rhs);
8165 return x;
8168 case AND:
8169 /* If the second operand is not a constant, we can't do anything
8170 with it. */
8171 if (!CONST_INT_P (XEXP (x, 1)))
8172 break;
8174 /* If the constant is a power of two minus one and the first operand
8175 is a logical right shift, make an extraction. */
8176 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8177 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8179 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8180 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8181 i, 1, 0, in_code == COMPARE);
8184 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8185 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8186 && subreg_lowpart_p (XEXP (x, 0))
8187 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8188 &inner_mode)
8189 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8190 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8192 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8193 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8194 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8195 XEXP (inner_x0, 1),
8196 i, 1, 0, in_code == COMPARE);
8198 /* If we narrowed the mode when dropping the subreg, then we lose. */
8199 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8200 new_rtx = NULL;
8202 /* If that didn't give anything, see if the AND simplifies on
8203 its own. */
8204 if (!new_rtx && i >= 0)
8206 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8207 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8208 0, in_code == COMPARE);
8211 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8212 else if ((GET_CODE (XEXP (x, 0)) == XOR
8213 || GET_CODE (XEXP (x, 0)) == IOR)
8214 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8215 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8216 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8218 /* Apply the distributive law, and then try to make extractions. */
8219 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8220 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8221 XEXP (x, 1)),
8222 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8223 XEXP (x, 1)));
8224 new_rtx = make_compound_operation (new_rtx, in_code);
8227 /* If we are have (and (rotate X C) M) and C is larger than the number
8228 of bits in M, this is an extraction. */
8230 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8231 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8232 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8233 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8235 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8236 new_rtx = make_extraction (mode, new_rtx,
8237 (GET_MODE_PRECISION (mode)
8238 - INTVAL (XEXP (XEXP (x, 0), 1))),
8239 NULL_RTX, i, 1, 0, in_code == COMPARE);
8242 /* On machines without logical shifts, if the operand of the AND is
8243 a logical shift and our mask turns off all the propagated sign
8244 bits, we can replace the logical shift with an arithmetic shift. */
8245 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8246 && !have_insn_for (LSHIFTRT, mode)
8247 && have_insn_for (ASHIFTRT, mode)
8248 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8249 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8250 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8251 && mode_width <= HOST_BITS_PER_WIDE_INT)
8253 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8255 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8256 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8257 SUBST (XEXP (x, 0),
8258 gen_rtx_ASHIFTRT (mode,
8259 make_compound_operation (XEXP (XEXP (x,
8262 next_code),
8263 XEXP (XEXP (x, 0), 1)));
8266 /* If the constant is one less than a power of two, this might be
8267 representable by an extraction even if no shift is present.
8268 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8269 we are in a COMPARE. */
8270 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8271 new_rtx = make_extraction (mode,
8272 make_compound_operation (XEXP (x, 0),
8273 next_code),
8274 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8276 /* If we are in a comparison and this is an AND with a power of two,
8277 convert this into the appropriate bit extract. */
8278 else if (in_code == COMPARE
8279 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8280 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8281 new_rtx = make_extraction (mode,
8282 make_compound_operation (XEXP (x, 0),
8283 next_code),
8284 i, NULL_RTX, 1, 1, 0, 1);
8286 /* If the one operand is a paradoxical subreg of a register or memory and
8287 the constant (limited to the smaller mode) has only zero bits where
8288 the sub expression has known zero bits, this can be expressed as
8289 a zero_extend. */
8290 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8292 rtx sub;
8294 sub = XEXP (XEXP (x, 0), 0);
8295 machine_mode sub_mode = GET_MODE (sub);
8296 int sub_width;
8297 if ((REG_P (sub) || MEM_P (sub))
8298 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8299 && sub_width < mode_width)
8301 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8302 unsigned HOST_WIDE_INT mask;
8304 /* original AND constant with all the known zero bits set */
8305 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8306 if ((mask & mode_mask) == mode_mask)
8308 new_rtx = make_compound_operation (sub, next_code);
8309 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8310 1, 0, in_code == COMPARE);
8315 break;
8317 case LSHIFTRT:
8318 /* If the sign bit is known to be zero, replace this with an
8319 arithmetic shift. */
8320 if (have_insn_for (ASHIFTRT, mode)
8321 && ! have_insn_for (LSHIFTRT, mode)
8322 && mode_width <= HOST_BITS_PER_WIDE_INT
8323 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8325 new_rtx = gen_rtx_ASHIFTRT (mode,
8326 make_compound_operation (XEXP (x, 0),
8327 next_code),
8328 XEXP (x, 1));
8329 break;
8332 /* fall through */
8334 case ASHIFTRT:
8335 lhs = XEXP (x, 0);
8336 rhs = XEXP (x, 1);
8338 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8339 this is a SIGN_EXTRACT. */
8340 if (CONST_INT_P (rhs)
8341 && GET_CODE (lhs) == ASHIFT
8342 && CONST_INT_P (XEXP (lhs, 1))
8343 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8344 && INTVAL (XEXP (lhs, 1)) >= 0
8345 && INTVAL (rhs) < mode_width)
8347 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8348 new_rtx = make_extraction (mode, new_rtx,
8349 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8350 NULL_RTX, mode_width - INTVAL (rhs),
8351 code == LSHIFTRT, 0, in_code == COMPARE);
8352 break;
8355 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8356 If so, try to merge the shifts into a SIGN_EXTEND. We could
8357 also do this for some cases of SIGN_EXTRACT, but it doesn't
8358 seem worth the effort; the case checked for occurs on Alpha. */
8360 if (!OBJECT_P (lhs)
8361 && ! (GET_CODE (lhs) == SUBREG
8362 && (OBJECT_P (SUBREG_REG (lhs))))
8363 && CONST_INT_P (rhs)
8364 && INTVAL (rhs) >= 0
8365 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8366 && INTVAL (rhs) < mode_width
8367 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8368 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8369 next_code),
8370 0, NULL_RTX, mode_width - INTVAL (rhs),
8371 code == LSHIFTRT, 0, in_code == COMPARE);
8373 break;
8375 case SUBREG:
8376 /* Call ourselves recursively on the inner expression. If we are
8377 narrowing the object and it has a different RTL code from
8378 what it originally did, do this SUBREG as a force_to_mode. */
8380 rtx inner = SUBREG_REG (x), simplified;
8381 enum rtx_code subreg_code = in_code;
8383 /* If the SUBREG is masking of a logical right shift,
8384 make an extraction. */
8385 if (GET_CODE (inner) == LSHIFTRT
8386 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8387 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8388 && CONST_INT_P (XEXP (inner, 1))
8389 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8390 && subreg_lowpart_p (x))
8392 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8393 int width = GET_MODE_PRECISION (inner_mode)
8394 - INTVAL (XEXP (inner, 1));
8395 if (width > mode_width)
8396 width = mode_width;
8397 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8398 width, 1, 0, in_code == COMPARE);
8399 break;
8402 /* If in_code is COMPARE, it isn't always safe to pass it through
8403 to the recursive make_compound_operation call. */
8404 if (subreg_code == COMPARE
8405 && (!subreg_lowpart_p (x)
8406 || GET_CODE (inner) == SUBREG
8407 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8408 is (const_int 0), rather than
8409 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8410 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8411 for non-equality comparisons against 0 is not equivalent
8412 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8413 || (GET_CODE (inner) == AND
8414 && CONST_INT_P (XEXP (inner, 1))
8415 && partial_subreg_p (x)
8416 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8417 >= GET_MODE_BITSIZE (mode) - 1)))
8418 subreg_code = SET;
8420 tem = make_compound_operation (inner, subreg_code);
8422 simplified
8423 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8424 if (simplified)
8425 tem = simplified;
8427 if (GET_CODE (tem) != GET_CODE (inner)
8428 && partial_subreg_p (x)
8429 && subreg_lowpart_p (x))
8431 rtx newer
8432 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8434 /* If we have something other than a SUBREG, we might have
8435 done an expansion, so rerun ourselves. */
8436 if (GET_CODE (newer) != SUBREG)
8437 newer = make_compound_operation (newer, in_code);
8439 /* force_to_mode can expand compounds. If it just re-expanded
8440 the compound, use gen_lowpart to convert to the desired
8441 mode. */
8442 if (rtx_equal_p (newer, x)
8443 /* Likewise if it re-expanded the compound only partially.
8444 This happens for SUBREG of ZERO_EXTRACT if they extract
8445 the same number of bits. */
8446 || (GET_CODE (newer) == SUBREG
8447 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8448 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8449 && GET_CODE (inner) == AND
8450 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8451 return gen_lowpart (GET_MODE (x), tem);
8453 return newer;
8456 if (simplified)
8457 return tem;
8459 break;
8461 default:
8462 break;
8465 if (new_rtx)
8466 *x_ptr = gen_lowpart (mode, new_rtx);
8467 *next_code_ptr = next_code;
8468 return NULL_RTX;
8471 /* Look at the expression rooted at X. Look for expressions
8472 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8473 Form these expressions.
8475 Return the new rtx, usually just X.
8477 Also, for machines like the VAX that don't have logical shift insns,
8478 try to convert logical to arithmetic shift operations in cases where
8479 they are equivalent. This undoes the canonicalizations to logical
8480 shifts done elsewhere.
8482 We try, as much as possible, to re-use rtl expressions to save memory.
8484 IN_CODE says what kind of expression we are processing. Normally, it is
8485 SET. In a memory address it is MEM. When processing the arguments of
8486 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8487 precisely it is an equality comparison against zero. */
8490 make_compound_operation (rtx x, enum rtx_code in_code)
8492 enum rtx_code code = GET_CODE (x);
8493 const char *fmt;
8494 int i, j;
8495 enum rtx_code next_code;
8496 rtx new_rtx, tem;
8498 /* Select the code to be used in recursive calls. Once we are inside an
8499 address, we stay there. If we have a comparison, set to COMPARE,
8500 but once inside, go back to our default of SET. */
8502 next_code = (code == MEM ? MEM
8503 : ((code == COMPARE || COMPARISON_P (x))
8504 && XEXP (x, 1) == const0_rtx) ? COMPARE
8505 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8507 scalar_int_mode mode;
8508 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8510 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8511 &next_code);
8512 if (new_rtx)
8513 return new_rtx;
8514 code = GET_CODE (x);
8517 /* Now recursively process each operand of this operation. We need to
8518 handle ZERO_EXTEND specially so that we don't lose track of the
8519 inner mode. */
8520 if (code == ZERO_EXTEND)
8522 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8523 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8524 new_rtx, GET_MODE (XEXP (x, 0)));
8525 if (tem)
8526 return tem;
8527 SUBST (XEXP (x, 0), new_rtx);
8528 return x;
8531 fmt = GET_RTX_FORMAT (code);
8532 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8533 if (fmt[i] == 'e')
8535 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8536 SUBST (XEXP (x, i), new_rtx);
8538 else if (fmt[i] == 'E')
8539 for (j = 0; j < XVECLEN (x, i); j++)
8541 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8542 SUBST (XVECEXP (x, i, j), new_rtx);
8545 maybe_swap_commutative_operands (x);
8546 return x;
8549 /* Given M see if it is a value that would select a field of bits
8550 within an item, but not the entire word. Return -1 if not.
8551 Otherwise, return the starting position of the field, where 0 is the
8552 low-order bit.
8554 *PLEN is set to the length of the field. */
8556 static int
8557 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8559 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8560 int pos = m ? ctz_hwi (m) : -1;
8561 int len = 0;
8563 if (pos >= 0)
8564 /* Now shift off the low-order zero bits and see if we have a
8565 power of two minus 1. */
8566 len = exact_log2 ((m >> pos) + 1);
8568 if (len <= 0)
8569 pos = -1;
8571 *plen = len;
8572 return pos;
8575 /* If X refers to a register that equals REG in value, replace these
8576 references with REG. */
8577 static rtx
8578 canon_reg_for_combine (rtx x, rtx reg)
8580 rtx op0, op1, op2;
8581 const char *fmt;
8582 int i;
8583 bool copied;
8585 enum rtx_code code = GET_CODE (x);
8586 switch (GET_RTX_CLASS (code))
8588 case RTX_UNARY:
8589 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8590 if (op0 != XEXP (x, 0))
8591 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8592 GET_MODE (reg));
8593 break;
8595 case RTX_BIN_ARITH:
8596 case RTX_COMM_ARITH:
8597 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8598 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8599 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8600 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8601 break;
8603 case RTX_COMPARE:
8604 case RTX_COMM_COMPARE:
8605 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8606 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8607 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8608 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8609 GET_MODE (op0), op0, op1);
8610 break;
8612 case RTX_TERNARY:
8613 case RTX_BITFIELD_OPS:
8614 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8615 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8616 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8617 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8618 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8619 GET_MODE (op0), op0, op1, op2);
8620 /* FALLTHRU */
8622 case RTX_OBJ:
8623 if (REG_P (x))
8625 if (rtx_equal_p (get_last_value (reg), x)
8626 || rtx_equal_p (reg, get_last_value (x)))
8627 return reg;
8628 else
8629 break;
8632 /* fall through */
8634 default:
8635 fmt = GET_RTX_FORMAT (code);
8636 copied = false;
8637 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8638 if (fmt[i] == 'e')
8640 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8641 if (op != XEXP (x, i))
8643 if (!copied)
8645 copied = true;
8646 x = copy_rtx (x);
8648 XEXP (x, i) = op;
8651 else if (fmt[i] == 'E')
8653 int j;
8654 for (j = 0; j < XVECLEN (x, i); j++)
8656 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8657 if (op != XVECEXP (x, i, j))
8659 if (!copied)
8661 copied = true;
8662 x = copy_rtx (x);
8664 XVECEXP (x, i, j) = op;
8669 break;
8672 return x;
8675 /* Return X converted to MODE. If the value is already truncated to
8676 MODE we can just return a subreg even though in the general case we
8677 would need an explicit truncation. */
8679 static rtx
8680 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8682 if (!CONST_INT_P (x)
8683 && partial_subreg_p (mode, GET_MODE (x))
8684 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8685 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8687 /* Bit-cast X into an integer mode. */
8688 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8689 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8690 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8691 x, GET_MODE (x));
8694 return gen_lowpart (mode, x);
8697 /* See if X can be simplified knowing that we will only refer to it in
8698 MODE and will only refer to those bits that are nonzero in MASK.
8699 If other bits are being computed or if masking operations are done
8700 that select a superset of the bits in MASK, they can sometimes be
8701 ignored.
8703 Return a possibly simplified expression, but always convert X to
8704 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8706 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8707 are all off in X. This is used when X will be complemented, by either
8708 NOT, NEG, or XOR. */
8710 static rtx
8711 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8712 int just_select)
8714 enum rtx_code code = GET_CODE (x);
8715 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8716 machine_mode op_mode;
8717 unsigned HOST_WIDE_INT nonzero;
8719 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8720 code below will do the wrong thing since the mode of such an
8721 expression is VOIDmode.
8723 Also do nothing if X is a CLOBBER; this can happen if X was
8724 the return value from a call to gen_lowpart. */
8725 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8726 return x;
8728 /* We want to perform the operation in its present mode unless we know
8729 that the operation is valid in MODE, in which case we do the operation
8730 in MODE. */
8731 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8732 && have_insn_for (code, mode))
8733 ? mode : GET_MODE (x));
8735 /* It is not valid to do a right-shift in a narrower mode
8736 than the one it came in with. */
8737 if ((code == LSHIFTRT || code == ASHIFTRT)
8738 && partial_subreg_p (mode, GET_MODE (x)))
8739 op_mode = GET_MODE (x);
8741 /* Truncate MASK to fit OP_MODE. */
8742 if (op_mode)
8743 mask &= GET_MODE_MASK (op_mode);
8745 /* Determine what bits of X are guaranteed to be (non)zero. */
8746 nonzero = nonzero_bits (x, mode);
8748 /* If none of the bits in X are needed, return a zero. */
8749 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8750 x = const0_rtx;
8752 /* If X is a CONST_INT, return a new one. Do this here since the
8753 test below will fail. */
8754 if (CONST_INT_P (x))
8756 if (SCALAR_INT_MODE_P (mode))
8757 return gen_int_mode (INTVAL (x) & mask, mode);
8758 else
8760 x = GEN_INT (INTVAL (x) & mask);
8761 return gen_lowpart_common (mode, x);
8765 /* If X is narrower than MODE and we want all the bits in X's mode, just
8766 get X in the proper mode. */
8767 if (paradoxical_subreg_p (mode, GET_MODE (x))
8768 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8769 return gen_lowpart (mode, x);
8771 /* We can ignore the effect of a SUBREG if it narrows the mode or
8772 if the constant masks to zero all the bits the mode doesn't have. */
8773 if (GET_CODE (x) == SUBREG
8774 && subreg_lowpart_p (x)
8775 && (partial_subreg_p (x)
8776 || (mask
8777 & GET_MODE_MASK (GET_MODE (x))
8778 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8779 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8781 scalar_int_mode int_mode, xmode;
8782 if (is_a <scalar_int_mode> (mode, &int_mode)
8783 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8784 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8785 integer too. */
8786 return force_int_to_mode (x, int_mode, xmode,
8787 as_a <scalar_int_mode> (op_mode),
8788 mask, just_select);
8790 return gen_lowpart_or_truncate (mode, x);
8793 /* Subroutine of force_to_mode that handles cases in which both X and
8794 the result are scalar integers. MODE is the mode of the result,
8795 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8796 is preferred for simplified versions of X. The other arguments
8797 are as for force_to_mode. */
8799 static rtx
8800 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8801 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8802 int just_select)
8804 enum rtx_code code = GET_CODE (x);
8805 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8806 unsigned HOST_WIDE_INT fuller_mask;
8807 rtx op0, op1, temp;
8808 poly_int64 const_op0;
8810 /* When we have an arithmetic operation, or a shift whose count we
8811 do not know, we need to assume that all bits up to the highest-order
8812 bit in MASK will be needed. This is how we form such a mask. */
8813 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8814 fuller_mask = HOST_WIDE_INT_M1U;
8815 else
8816 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8817 - 1);
8819 switch (code)
8821 case CLOBBER:
8822 /* If X is a (clobber (const_int)), return it since we know we are
8823 generating something that won't match. */
8824 return x;
8826 case SIGN_EXTEND:
8827 case ZERO_EXTEND:
8828 case ZERO_EXTRACT:
8829 case SIGN_EXTRACT:
8830 x = expand_compound_operation (x);
8831 if (GET_CODE (x) != code)
8832 return force_to_mode (x, mode, mask, next_select);
8833 break;
8835 case TRUNCATE:
8836 /* Similarly for a truncate. */
8837 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8839 case AND:
8840 /* If this is an AND with a constant, convert it into an AND
8841 whose constant is the AND of that constant with MASK. If it
8842 remains an AND of MASK, delete it since it is redundant. */
8844 if (CONST_INT_P (XEXP (x, 1)))
8846 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8847 mask & INTVAL (XEXP (x, 1)));
8848 xmode = op_mode;
8850 /* If X is still an AND, see if it is an AND with a mask that
8851 is just some low-order bits. If so, and it is MASK, we don't
8852 need it. */
8854 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8855 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8856 x = XEXP (x, 0);
8858 /* If it remains an AND, try making another AND with the bits
8859 in the mode mask that aren't in MASK turned on. If the
8860 constant in the AND is wide enough, this might make a
8861 cheaper constant. */
8863 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8864 && GET_MODE_MASK (xmode) != mask
8865 && HWI_COMPUTABLE_MODE_P (xmode))
8867 unsigned HOST_WIDE_INT cval
8868 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8869 rtx y;
8871 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8872 gen_int_mode (cval, xmode));
8873 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8874 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8875 x = y;
8878 break;
8881 goto binop;
8883 case PLUS:
8884 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8885 low-order bits (as in an alignment operation) and FOO is already
8886 aligned to that boundary, mask C1 to that boundary as well.
8887 This may eliminate that PLUS and, later, the AND. */
8890 unsigned int width = GET_MODE_PRECISION (mode);
8891 unsigned HOST_WIDE_INT smask = mask;
8893 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8894 number, sign extend it. */
8896 if (width < HOST_BITS_PER_WIDE_INT
8897 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8898 smask |= HOST_WIDE_INT_M1U << width;
8900 if (CONST_INT_P (XEXP (x, 1))
8901 && pow2p_hwi (- smask)
8902 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8903 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8904 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8905 (INTVAL (XEXP (x, 1)) & smask)),
8906 mode, smask, next_select);
8909 /* fall through */
8911 case MULT:
8912 /* Substituting into the operands of a widening MULT is not likely to
8913 create RTL matching a machine insn. */
8914 if (code == MULT
8915 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8916 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8917 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8918 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8919 && REG_P (XEXP (XEXP (x, 0), 0))
8920 && REG_P (XEXP (XEXP (x, 1), 0)))
8921 return gen_lowpart_or_truncate (mode, x);
8923 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8924 most significant bit in MASK since carries from those bits will
8925 affect the bits we are interested in. */
8926 mask = fuller_mask;
8927 goto binop;
8929 case MINUS:
8930 /* If X is (minus C Y) where C's least set bit is larger than any bit
8931 in the mask, then we may replace with (neg Y). */
8932 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8933 && known_alignment (poly_uint64 (const_op0)) > mask)
8935 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8936 return force_to_mode (x, mode, mask, next_select);
8939 /* Similarly, if C contains every bit in the fuller_mask, then we may
8940 replace with (not Y). */
8941 if (CONST_INT_P (XEXP (x, 0))
8942 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8944 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8945 return force_to_mode (x, mode, mask, next_select);
8948 mask = fuller_mask;
8949 goto binop;
8951 case IOR:
8952 case XOR:
8953 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8954 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8955 operation which may be a bitfield extraction. Ensure that the
8956 constant we form is not wider than the mode of X. */
8958 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8959 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8960 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8961 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8962 && CONST_INT_P (XEXP (x, 1))
8963 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8964 + floor_log2 (INTVAL (XEXP (x, 1))))
8965 < GET_MODE_PRECISION (xmode))
8966 && (UINTVAL (XEXP (x, 1))
8967 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8969 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8970 << INTVAL (XEXP (XEXP (x, 0), 1)),
8971 xmode);
8972 temp = simplify_gen_binary (GET_CODE (x), xmode,
8973 XEXP (XEXP (x, 0), 0), temp);
8974 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8975 XEXP (XEXP (x, 0), 1));
8976 return force_to_mode (x, mode, mask, next_select);
8979 binop:
8980 /* For most binary operations, just propagate into the operation and
8981 change the mode if we have an operation of that mode. */
8983 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8984 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8986 /* If we ended up truncating both operands, truncate the result of the
8987 operation instead. */
8988 if (GET_CODE (op0) == TRUNCATE
8989 && GET_CODE (op1) == TRUNCATE)
8991 op0 = XEXP (op0, 0);
8992 op1 = XEXP (op1, 0);
8995 op0 = gen_lowpart_or_truncate (op_mode, op0);
8996 op1 = gen_lowpart_or_truncate (op_mode, op1);
8998 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
9000 x = simplify_gen_binary (code, op_mode, op0, op1);
9001 xmode = op_mode;
9003 break;
9005 case ASHIFT:
9006 /* For left shifts, do the same, but just for the first operand.
9007 However, we cannot do anything with shifts where we cannot
9008 guarantee that the counts are smaller than the size of the mode
9009 because such a count will have a different meaning in a
9010 wider mode. */
9012 if (! (CONST_INT_P (XEXP (x, 1))
9013 && INTVAL (XEXP (x, 1)) >= 0
9014 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9015 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9016 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9017 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9018 break;
9020 /* If the shift count is a constant and we can do arithmetic in
9021 the mode of the shift, refine which bits we need. Otherwise, use the
9022 conservative form of the mask. */
9023 if (CONST_INT_P (XEXP (x, 1))
9024 && INTVAL (XEXP (x, 1)) >= 0
9025 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9026 && HWI_COMPUTABLE_MODE_P (op_mode))
9027 mask >>= INTVAL (XEXP (x, 1));
9028 else
9029 mask = fuller_mask;
9031 op0 = gen_lowpart_or_truncate (op_mode,
9032 force_to_mode (XEXP (x, 0), mode,
9033 mask, next_select));
9035 if (op_mode != xmode || op0 != XEXP (x, 0))
9037 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9038 xmode = op_mode;
9040 break;
9042 case LSHIFTRT:
9043 /* Here we can only do something if the shift count is a constant,
9044 this shift constant is valid for the host, and we can do arithmetic
9045 in OP_MODE. */
9047 if (CONST_INT_P (XEXP (x, 1))
9048 && INTVAL (XEXP (x, 1)) >= 0
9049 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9050 && HWI_COMPUTABLE_MODE_P (op_mode))
9052 rtx inner = XEXP (x, 0);
9053 unsigned HOST_WIDE_INT inner_mask;
9055 /* Select the mask of the bits we need for the shift operand. */
9056 inner_mask = mask << INTVAL (XEXP (x, 1));
9058 /* We can only change the mode of the shift if we can do arithmetic
9059 in the mode of the shift and INNER_MASK is no wider than the
9060 width of X's mode. */
9061 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9062 op_mode = xmode;
9064 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9066 if (xmode != op_mode || inner != XEXP (x, 0))
9068 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9069 xmode = op_mode;
9073 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9074 shift and AND produces only copies of the sign bit (C2 is one less
9075 than a power of two), we can do this with just a shift. */
9077 if (GET_CODE (x) == LSHIFTRT
9078 && CONST_INT_P (XEXP (x, 1))
9079 /* The shift puts one of the sign bit copies in the least significant
9080 bit. */
9081 && ((INTVAL (XEXP (x, 1))
9082 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9083 >= GET_MODE_PRECISION (xmode))
9084 && pow2p_hwi (mask + 1)
9085 /* Number of bits left after the shift must be more than the mask
9086 needs. */
9087 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9088 <= GET_MODE_PRECISION (xmode))
9089 /* Must be more sign bit copies than the mask needs. */
9090 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9091 >= exact_log2 (mask + 1)))
9093 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9094 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9095 gen_int_shift_amount (xmode, nbits));
9097 goto shiftrt;
9099 case ASHIFTRT:
9100 /* If we are just looking for the sign bit, we don't need this shift at
9101 all, even if it has a variable count. */
9102 if (val_signbit_p (xmode, mask))
9103 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9105 /* If this is a shift by a constant, get a mask that contains those bits
9106 that are not copies of the sign bit. We then have two cases: If
9107 MASK only includes those bits, this can be a logical shift, which may
9108 allow simplifications. If MASK is a single-bit field not within
9109 those bits, we are requesting a copy of the sign bit and hence can
9110 shift the sign bit to the appropriate location. */
9112 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9113 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9115 unsigned HOST_WIDE_INT nonzero;
9116 int i;
9118 /* If the considered data is wider than HOST_WIDE_INT, we can't
9119 represent a mask for all its bits in a single scalar.
9120 But we only care about the lower bits, so calculate these. */
9122 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9124 nonzero = HOST_WIDE_INT_M1U;
9126 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9127 is the number of bits a full-width mask would have set.
9128 We need only shift if these are fewer than nonzero can
9129 hold. If not, we must keep all bits set in nonzero. */
9131 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9132 < HOST_BITS_PER_WIDE_INT)
9133 nonzero >>= INTVAL (XEXP (x, 1))
9134 + HOST_BITS_PER_WIDE_INT
9135 - GET_MODE_PRECISION (xmode);
9137 else
9139 nonzero = GET_MODE_MASK (xmode);
9140 nonzero >>= INTVAL (XEXP (x, 1));
9143 if ((mask & ~nonzero) == 0)
9145 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9146 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9147 if (GET_CODE (x) != ASHIFTRT)
9148 return force_to_mode (x, mode, mask, next_select);
9151 else if ((i = exact_log2 (mask)) >= 0)
9153 x = simplify_shift_const
9154 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9155 GET_MODE_PRECISION (xmode) - 1 - i);
9157 if (GET_CODE (x) != ASHIFTRT)
9158 return force_to_mode (x, mode, mask, next_select);
9162 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9163 even if the shift count isn't a constant. */
9164 if (mask == 1)
9165 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9167 shiftrt:
9169 /* If this is a zero- or sign-extension operation that just affects bits
9170 we don't care about, remove it. Be sure the call above returned
9171 something that is still a shift. */
9173 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9174 && CONST_INT_P (XEXP (x, 1))
9175 && INTVAL (XEXP (x, 1)) >= 0
9176 && (INTVAL (XEXP (x, 1))
9177 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9178 && GET_CODE (XEXP (x, 0)) == ASHIFT
9179 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9180 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9181 next_select);
9183 break;
9185 case ROTATE:
9186 case ROTATERT:
9187 /* If the shift count is constant and we can do computations
9188 in the mode of X, compute where the bits we care about are.
9189 Otherwise, we can't do anything. Don't change the mode of
9190 the shift or propagate MODE into the shift, though. */
9191 if (CONST_INT_P (XEXP (x, 1))
9192 && INTVAL (XEXP (x, 1)) >= 0)
9194 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9195 xmode, gen_int_mode (mask, xmode),
9196 XEXP (x, 1));
9197 if (temp && CONST_INT_P (temp))
9198 x = simplify_gen_binary (code, xmode,
9199 force_to_mode (XEXP (x, 0), xmode,
9200 INTVAL (temp), next_select),
9201 XEXP (x, 1));
9203 break;
9205 case NEG:
9206 /* If we just want the low-order bit, the NEG isn't needed since it
9207 won't change the low-order bit. */
9208 if (mask == 1)
9209 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9211 /* We need any bits less significant than the most significant bit in
9212 MASK since carries from those bits will affect the bits we are
9213 interested in. */
9214 mask = fuller_mask;
9215 goto unop;
9217 case NOT:
9218 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9219 same as the XOR case above. Ensure that the constant we form is not
9220 wider than the mode of X. */
9222 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9223 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9224 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9225 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9226 < GET_MODE_PRECISION (xmode))
9227 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9229 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9230 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9231 x = simplify_gen_binary (LSHIFTRT, xmode,
9232 temp, XEXP (XEXP (x, 0), 1));
9234 return force_to_mode (x, mode, mask, next_select);
9237 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9238 use the full mask inside the NOT. */
9239 mask = fuller_mask;
9241 unop:
9242 op0 = gen_lowpart_or_truncate (op_mode,
9243 force_to_mode (XEXP (x, 0), mode, mask,
9244 next_select));
9245 if (op_mode != xmode || op0 != XEXP (x, 0))
9247 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9248 xmode = op_mode;
9250 break;
9252 case NE:
9253 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9254 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9255 which is equal to STORE_FLAG_VALUE. */
9256 if ((mask & ~STORE_FLAG_VALUE) == 0
9257 && XEXP (x, 1) == const0_rtx
9258 && GET_MODE (XEXP (x, 0)) == mode
9259 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9260 && (nonzero_bits (XEXP (x, 0), mode)
9261 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9262 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9264 break;
9266 case IF_THEN_ELSE:
9267 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9268 written in a narrower mode. We play it safe and do not do so. */
9270 op0 = gen_lowpart_or_truncate (xmode,
9271 force_to_mode (XEXP (x, 1), mode,
9272 mask, next_select));
9273 op1 = gen_lowpart_or_truncate (xmode,
9274 force_to_mode (XEXP (x, 2), mode,
9275 mask, next_select));
9276 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9277 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9278 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9279 op0, op1);
9280 break;
9282 default:
9283 break;
9286 /* Ensure we return a value of the proper mode. */
9287 return gen_lowpart_or_truncate (mode, x);
9290 /* Return nonzero if X is an expression that has one of two values depending on
9291 whether some other value is zero or nonzero. In that case, we return the
9292 value that is being tested, *PTRUE is set to the value if the rtx being
9293 returned has a nonzero value, and *PFALSE is set to the other alternative.
9295 If we return zero, we set *PTRUE and *PFALSE to X. */
9297 static rtx
9298 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9300 machine_mode mode = GET_MODE (x);
9301 enum rtx_code code = GET_CODE (x);
9302 rtx cond0, cond1, true0, true1, false0, false1;
9303 unsigned HOST_WIDE_INT nz;
9304 scalar_int_mode int_mode;
9306 /* If we are comparing a value against zero, we are done. */
9307 if ((code == NE || code == EQ)
9308 && XEXP (x, 1) == const0_rtx)
9310 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9311 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9312 return XEXP (x, 0);
9315 /* If this is a unary operation whose operand has one of two values, apply
9316 our opcode to compute those values. */
9317 else if (UNARY_P (x)
9318 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9320 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9321 *pfalse = simplify_gen_unary (code, mode, false0,
9322 GET_MODE (XEXP (x, 0)));
9323 return cond0;
9326 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9327 make can't possibly match and would suppress other optimizations. */
9328 else if (code == COMPARE)
9331 /* If this is a binary operation, see if either side has only one of two
9332 values. If either one does or if both do and they are conditional on
9333 the same value, compute the new true and false values. */
9334 else if (BINARY_P (x))
9336 rtx op0 = XEXP (x, 0);
9337 rtx op1 = XEXP (x, 1);
9338 cond0 = if_then_else_cond (op0, &true0, &false0);
9339 cond1 = if_then_else_cond (op1, &true1, &false1);
9341 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9342 && (REG_P (op0) || REG_P (op1)))
9344 /* Try to enable a simplification by undoing work done by
9345 if_then_else_cond if it converted a REG into something more
9346 complex. */
9347 if (REG_P (op0))
9349 cond0 = 0;
9350 true0 = false0 = op0;
9352 else
9354 cond1 = 0;
9355 true1 = false1 = op1;
9359 if ((cond0 != 0 || cond1 != 0)
9360 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9362 /* If if_then_else_cond returned zero, then true/false are the
9363 same rtl. We must copy one of them to prevent invalid rtl
9364 sharing. */
9365 if (cond0 == 0)
9366 true0 = copy_rtx (true0);
9367 else if (cond1 == 0)
9368 true1 = copy_rtx (true1);
9370 if (COMPARISON_P (x))
9372 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9373 true0, true1);
9374 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9375 false0, false1);
9377 else
9379 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9380 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9383 return cond0 ? cond0 : cond1;
9386 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9387 operands is zero when the other is nonzero, and vice-versa,
9388 and STORE_FLAG_VALUE is 1 or -1. */
9390 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9391 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9392 || code == UMAX)
9393 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9395 rtx op0 = XEXP (XEXP (x, 0), 1);
9396 rtx op1 = XEXP (XEXP (x, 1), 1);
9398 cond0 = XEXP (XEXP (x, 0), 0);
9399 cond1 = XEXP (XEXP (x, 1), 0);
9401 if (COMPARISON_P (cond0)
9402 && COMPARISON_P (cond1)
9403 && SCALAR_INT_MODE_P (mode)
9404 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9405 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9406 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9407 || ((swap_condition (GET_CODE (cond0))
9408 == reversed_comparison_code (cond1, NULL))
9409 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9410 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9411 && ! side_effects_p (x))
9413 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9414 *pfalse = simplify_gen_binary (MULT, mode,
9415 (code == MINUS
9416 ? simplify_gen_unary (NEG, mode,
9417 op1, mode)
9418 : op1),
9419 const_true_rtx);
9420 return cond0;
9424 /* Similarly for MULT, AND and UMIN, except that for these the result
9425 is always zero. */
9426 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9427 && (code == MULT || code == AND || code == UMIN)
9428 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9430 cond0 = XEXP (XEXP (x, 0), 0);
9431 cond1 = XEXP (XEXP (x, 1), 0);
9433 if (COMPARISON_P (cond0)
9434 && COMPARISON_P (cond1)
9435 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9436 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9437 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9438 || ((swap_condition (GET_CODE (cond0))
9439 == reversed_comparison_code (cond1, NULL))
9440 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9441 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9442 && ! side_effects_p (x))
9444 *ptrue = *pfalse = const0_rtx;
9445 return cond0;
9450 else if (code == IF_THEN_ELSE)
9452 /* If we have IF_THEN_ELSE already, extract the condition and
9453 canonicalize it if it is NE or EQ. */
9454 cond0 = XEXP (x, 0);
9455 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9456 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9457 return XEXP (cond0, 0);
9458 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9460 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9461 return XEXP (cond0, 0);
9463 else
9464 return cond0;
9467 /* If X is a SUBREG, we can narrow both the true and false values
9468 if the inner expression, if there is a condition. */
9469 else if (code == SUBREG
9470 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9471 &false0)) != 0)
9473 true0 = simplify_gen_subreg (mode, true0,
9474 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9475 false0 = simplify_gen_subreg (mode, false0,
9476 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9477 if (true0 && false0)
9479 *ptrue = true0;
9480 *pfalse = false0;
9481 return cond0;
9485 /* If X is a constant, this isn't special and will cause confusions
9486 if we treat it as such. Likewise if it is equivalent to a constant. */
9487 else if (CONSTANT_P (x)
9488 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9491 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9492 will be least confusing to the rest of the compiler. */
9493 else if (mode == BImode)
9495 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9496 return x;
9499 /* If X is known to be either 0 or -1, those are the true and
9500 false values when testing X. */
9501 else if (x == constm1_rtx || x == const0_rtx
9502 || (is_a <scalar_int_mode> (mode, &int_mode)
9503 && (num_sign_bit_copies (x, int_mode)
9504 == GET_MODE_PRECISION (int_mode))))
9506 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9507 return x;
9510 /* Likewise for 0 or a single bit. */
9511 else if (HWI_COMPUTABLE_MODE_P (mode)
9512 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9514 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9515 return x;
9518 /* Otherwise fail; show no condition with true and false values the same. */
9519 *ptrue = *pfalse = x;
9520 return 0;
9523 /* Return the value of expression X given the fact that condition COND
9524 is known to be true when applied to REG as its first operand and VAL
9525 as its second. X is known to not be shared and so can be modified in
9526 place.
9528 We only handle the simplest cases, and specifically those cases that
9529 arise with IF_THEN_ELSE expressions. */
9531 static rtx
9532 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9534 enum rtx_code code = GET_CODE (x);
9535 const char *fmt;
9536 int i, j;
9538 if (side_effects_p (x))
9539 return x;
9541 /* If either operand of the condition is a floating point value,
9542 then we have to avoid collapsing an EQ comparison. */
9543 if (cond == EQ
9544 && rtx_equal_p (x, reg)
9545 && ! FLOAT_MODE_P (GET_MODE (x))
9546 && ! FLOAT_MODE_P (GET_MODE (val)))
9547 return val;
9549 if (cond == UNEQ && rtx_equal_p (x, reg))
9550 return val;
9552 /* If X is (abs REG) and we know something about REG's relationship
9553 with zero, we may be able to simplify this. */
9555 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9556 switch (cond)
9558 case GE: case GT: case EQ:
9559 return XEXP (x, 0);
9560 case LT: case LE:
9561 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9562 XEXP (x, 0),
9563 GET_MODE (XEXP (x, 0)));
9564 default:
9565 break;
9568 /* The only other cases we handle are MIN, MAX, and comparisons if the
9569 operands are the same as REG and VAL. */
9571 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9573 if (rtx_equal_p (XEXP (x, 0), val))
9575 std::swap (val, reg);
9576 cond = swap_condition (cond);
9579 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9581 if (COMPARISON_P (x))
9583 if (comparison_dominates_p (cond, code))
9584 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9586 code = reversed_comparison_code (x, NULL);
9587 if (code != UNKNOWN
9588 && comparison_dominates_p (cond, code))
9589 return CONST0_RTX (GET_MODE (x));
9590 else
9591 return x;
9593 else if (code == SMAX || code == SMIN
9594 || code == UMIN || code == UMAX)
9596 int unsignedp = (code == UMIN || code == UMAX);
9598 /* Do not reverse the condition when it is NE or EQ.
9599 This is because we cannot conclude anything about
9600 the value of 'SMAX (x, y)' when x is not equal to y,
9601 but we can when x equals y. */
9602 if ((code == SMAX || code == UMAX)
9603 && ! (cond == EQ || cond == NE))
9604 cond = reverse_condition (cond);
9606 switch (cond)
9608 case GE: case GT:
9609 return unsignedp ? x : XEXP (x, 1);
9610 case LE: case LT:
9611 return unsignedp ? x : XEXP (x, 0);
9612 case GEU: case GTU:
9613 return unsignedp ? XEXP (x, 1) : x;
9614 case LEU: case LTU:
9615 return unsignedp ? XEXP (x, 0) : x;
9616 default:
9617 break;
9622 else if (code == SUBREG)
9624 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9625 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9627 if (SUBREG_REG (x) != r)
9629 /* We must simplify subreg here, before we lose track of the
9630 original inner_mode. */
9631 new_rtx = simplify_subreg (GET_MODE (x), r,
9632 inner_mode, SUBREG_BYTE (x));
9633 if (new_rtx)
9634 return new_rtx;
9635 else
9636 SUBST (SUBREG_REG (x), r);
9639 return x;
9641 /* We don't have to handle SIGN_EXTEND here, because even in the
9642 case of replacing something with a modeless CONST_INT, a
9643 CONST_INT is already (supposed to be) a valid sign extension for
9644 its narrower mode, which implies it's already properly
9645 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9646 story is different. */
9647 else if (code == ZERO_EXTEND)
9649 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9650 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9652 if (XEXP (x, 0) != r)
9654 /* We must simplify the zero_extend here, before we lose
9655 track of the original inner_mode. */
9656 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9657 r, inner_mode);
9658 if (new_rtx)
9659 return new_rtx;
9660 else
9661 SUBST (XEXP (x, 0), r);
9664 return x;
9667 fmt = GET_RTX_FORMAT (code);
9668 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9670 if (fmt[i] == 'e')
9671 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9672 else if (fmt[i] == 'E')
9673 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9674 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9675 cond, reg, val));
9678 return x;
9681 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9682 assignment as a field assignment. */
9684 static int
9685 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9687 if (widen_x && GET_MODE (x) != GET_MODE (y))
9689 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9690 return 0;
9691 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9692 return 0;
9693 x = adjust_address_nv (x, GET_MODE (y),
9694 byte_lowpart_offset (GET_MODE (y),
9695 GET_MODE (x)));
9698 if (x == y || rtx_equal_p (x, y))
9699 return 1;
9701 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9702 return 0;
9704 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9705 Note that all SUBREGs of MEM are paradoxical; otherwise they
9706 would have been rewritten. */
9707 if (MEM_P (x) && GET_CODE (y) == SUBREG
9708 && MEM_P (SUBREG_REG (y))
9709 && rtx_equal_p (SUBREG_REG (y),
9710 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9711 return 1;
9713 if (MEM_P (y) && GET_CODE (x) == SUBREG
9714 && MEM_P (SUBREG_REG (x))
9715 && rtx_equal_p (SUBREG_REG (x),
9716 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9717 return 1;
9719 /* We used to see if get_last_value of X and Y were the same but that's
9720 not correct. In one direction, we'll cause the assignment to have
9721 the wrong destination and in the case, we'll import a register into this
9722 insn that might have already have been dead. So fail if none of the
9723 above cases are true. */
9724 return 0;
9727 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9728 Return that assignment if so.
9730 We only handle the most common cases. */
9732 static rtx
9733 make_field_assignment (rtx x)
9735 rtx dest = SET_DEST (x);
9736 rtx src = SET_SRC (x);
9737 rtx assign;
9738 rtx rhs, lhs;
9739 HOST_WIDE_INT c1;
9740 HOST_WIDE_INT pos;
9741 unsigned HOST_WIDE_INT len;
9742 rtx other;
9744 /* All the rules in this function are specific to scalar integers. */
9745 scalar_int_mode mode;
9746 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9747 return x;
9749 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9750 a clear of a one-bit field. We will have changed it to
9751 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9752 for a SUBREG. */
9754 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9755 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9756 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9757 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9759 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9760 1, 1, 1, 0);
9761 if (assign != 0)
9762 return gen_rtx_SET (assign, const0_rtx);
9763 return x;
9766 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9767 && subreg_lowpart_p (XEXP (src, 0))
9768 && partial_subreg_p (XEXP (src, 0))
9769 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9770 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9771 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9772 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9774 assign = make_extraction (VOIDmode, dest, 0,
9775 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9776 1, 1, 1, 0);
9777 if (assign != 0)
9778 return gen_rtx_SET (assign, const0_rtx);
9779 return x;
9782 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9783 one-bit field. */
9784 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9785 && XEXP (XEXP (src, 0), 0) == const1_rtx
9786 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9788 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9789 1, 1, 1, 0);
9790 if (assign != 0)
9791 return gen_rtx_SET (assign, const1_rtx);
9792 return x;
9795 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9796 SRC is an AND with all bits of that field set, then we can discard
9797 the AND. */
9798 if (GET_CODE (dest) == ZERO_EXTRACT
9799 && CONST_INT_P (XEXP (dest, 1))
9800 && GET_CODE (src) == AND
9801 && CONST_INT_P (XEXP (src, 1)))
9803 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9804 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9805 unsigned HOST_WIDE_INT ze_mask;
9807 if (width >= HOST_BITS_PER_WIDE_INT)
9808 ze_mask = -1;
9809 else
9810 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9812 /* Complete overlap. We can remove the source AND. */
9813 if ((and_mask & ze_mask) == ze_mask)
9814 return gen_rtx_SET (dest, XEXP (src, 0));
9816 /* Partial overlap. We can reduce the source AND. */
9817 if ((and_mask & ze_mask) != and_mask)
9819 src = gen_rtx_AND (mode, XEXP (src, 0),
9820 gen_int_mode (and_mask & ze_mask, mode));
9821 return gen_rtx_SET (dest, src);
9825 /* The other case we handle is assignments into a constant-position
9826 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9827 a mask that has all one bits except for a group of zero bits and
9828 OTHER is known to have zeros where C1 has ones, this is such an
9829 assignment. Compute the position and length from C1. Shift OTHER
9830 to the appropriate position, force it to the required mode, and
9831 make the extraction. Check for the AND in both operands. */
9833 /* One or more SUBREGs might obscure the constant-position field
9834 assignment. The first one we are likely to encounter is an outer
9835 narrowing SUBREG, which we can just strip for the purposes of
9836 identifying the constant-field assignment. */
9837 scalar_int_mode src_mode = mode;
9838 if (GET_CODE (src) == SUBREG
9839 && subreg_lowpart_p (src)
9840 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9841 src = SUBREG_REG (src);
9843 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9844 return x;
9846 rhs = expand_compound_operation (XEXP (src, 0));
9847 lhs = expand_compound_operation (XEXP (src, 1));
9849 if (GET_CODE (rhs) == AND
9850 && CONST_INT_P (XEXP (rhs, 1))
9851 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9852 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9853 /* The second SUBREG that might get in the way is a paradoxical
9854 SUBREG around the first operand of the AND. We want to
9855 pretend the operand is as wide as the destination here. We
9856 do this by adjusting the MEM to wider mode for the sole
9857 purpose of the call to rtx_equal_for_field_assignment_p. Also
9858 note this trick only works for MEMs. */
9859 else if (GET_CODE (rhs) == AND
9860 && paradoxical_subreg_p (XEXP (rhs, 0))
9861 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9862 && CONST_INT_P (XEXP (rhs, 1))
9863 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9864 dest, true))
9865 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9866 else if (GET_CODE (lhs) == AND
9867 && CONST_INT_P (XEXP (lhs, 1))
9868 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9869 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9870 /* The second SUBREG that might get in the way is a paradoxical
9871 SUBREG around the first operand of the AND. We want to
9872 pretend the operand is as wide as the destination here. We
9873 do this by adjusting the MEM to wider mode for the sole
9874 purpose of the call to rtx_equal_for_field_assignment_p. Also
9875 note this trick only works for MEMs. */
9876 else if (GET_CODE (lhs) == AND
9877 && paradoxical_subreg_p (XEXP (lhs, 0))
9878 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9879 && CONST_INT_P (XEXP (lhs, 1))
9880 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9881 dest, true))
9882 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9883 else
9884 return x;
9886 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9887 if (pos < 0
9888 || pos + len > GET_MODE_PRECISION (mode)
9889 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9890 || (c1 & nonzero_bits (other, mode)) != 0)
9891 return x;
9893 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9894 if (assign == 0)
9895 return x;
9897 /* The mode to use for the source is the mode of the assignment, or of
9898 what is inside a possible STRICT_LOW_PART. */
9899 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9900 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9902 /* Shift OTHER right POS places and make it the source, restricting it
9903 to the proper length and mode. */
9905 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9906 src_mode, other, pos),
9907 dest);
9908 src = force_to_mode (src, new_mode,
9909 len >= HOST_BITS_PER_WIDE_INT
9910 ? HOST_WIDE_INT_M1U
9911 : (HOST_WIDE_INT_1U << len) - 1,
9914 /* If SRC is masked by an AND that does not make a difference in
9915 the value being stored, strip it. */
9916 if (GET_CODE (assign) == ZERO_EXTRACT
9917 && CONST_INT_P (XEXP (assign, 1))
9918 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9919 && GET_CODE (src) == AND
9920 && CONST_INT_P (XEXP (src, 1))
9921 && UINTVAL (XEXP (src, 1))
9922 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9923 src = XEXP (src, 0);
9925 return gen_rtx_SET (assign, src);
9928 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9929 if so. */
9931 static rtx
9932 apply_distributive_law (rtx x)
9934 enum rtx_code code = GET_CODE (x);
9935 enum rtx_code inner_code;
9936 rtx lhs, rhs, other;
9937 rtx tem;
9939 /* Distributivity is not true for floating point as it can change the
9940 value. So we don't do it unless -funsafe-math-optimizations. */
9941 if (FLOAT_MODE_P (GET_MODE (x))
9942 && ! flag_unsafe_math_optimizations)
9943 return x;
9945 /* The outer operation can only be one of the following: */
9946 if (code != IOR && code != AND && code != XOR
9947 && code != PLUS && code != MINUS)
9948 return x;
9950 lhs = XEXP (x, 0);
9951 rhs = XEXP (x, 1);
9953 /* If either operand is a primitive we can't do anything, so get out
9954 fast. */
9955 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9956 return x;
9958 lhs = expand_compound_operation (lhs);
9959 rhs = expand_compound_operation (rhs);
9960 inner_code = GET_CODE (lhs);
9961 if (inner_code != GET_CODE (rhs))
9962 return x;
9964 /* See if the inner and outer operations distribute. */
9965 switch (inner_code)
9967 case LSHIFTRT:
9968 case ASHIFTRT:
9969 case AND:
9970 case IOR:
9971 /* These all distribute except over PLUS. */
9972 if (code == PLUS || code == MINUS)
9973 return x;
9974 break;
9976 case MULT:
9977 if (code != PLUS && code != MINUS)
9978 return x;
9979 break;
9981 case ASHIFT:
9982 /* This is also a multiply, so it distributes over everything. */
9983 break;
9985 /* This used to handle SUBREG, but this turned out to be counter-
9986 productive, since (subreg (op ...)) usually is not handled by
9987 insn patterns, and this "optimization" therefore transformed
9988 recognizable patterns into unrecognizable ones. Therefore the
9989 SUBREG case was removed from here.
9991 It is possible that distributing SUBREG over arithmetic operations
9992 leads to an intermediate result than can then be optimized further,
9993 e.g. by moving the outer SUBREG to the other side of a SET as done
9994 in simplify_set. This seems to have been the original intent of
9995 handling SUBREGs here.
9997 However, with current GCC this does not appear to actually happen,
9998 at least on major platforms. If some case is found where removing
9999 the SUBREG case here prevents follow-on optimizations, distributing
10000 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10002 default:
10003 return x;
10006 /* Set LHS and RHS to the inner operands (A and B in the example
10007 above) and set OTHER to the common operand (C in the example).
10008 There is only one way to do this unless the inner operation is
10009 commutative. */
10010 if (COMMUTATIVE_ARITH_P (lhs)
10011 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10012 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10013 else if (COMMUTATIVE_ARITH_P (lhs)
10014 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10015 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10016 else if (COMMUTATIVE_ARITH_P (lhs)
10017 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10018 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10019 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10020 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10021 else
10022 return x;
10024 /* Form the new inner operation, seeing if it simplifies first. */
10025 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10027 /* There is one exception to the general way of distributing:
10028 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10029 if (code == XOR && inner_code == IOR)
10031 inner_code = AND;
10032 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10035 /* We may be able to continuing distributing the result, so call
10036 ourselves recursively on the inner operation before forming the
10037 outer operation, which we return. */
10038 return simplify_gen_binary (inner_code, GET_MODE (x),
10039 apply_distributive_law (tem), other);
10042 /* See if X is of the form (* (+ A B) C), and if so convert to
10043 (+ (* A C) (* B C)) and try to simplify.
10045 Most of the time, this results in no change. However, if some of
10046 the operands are the same or inverses of each other, simplifications
10047 will result.
10049 For example, (and (ior A B) (not B)) can occur as the result of
10050 expanding a bit field assignment. When we apply the distributive
10051 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10052 which then simplifies to (and (A (not B))).
10054 Note that no checks happen on the validity of applying the inverse
10055 distributive law. This is pointless since we can do it in the
10056 few places where this routine is called.
10058 N is the index of the term that is decomposed (the arithmetic operation,
10059 i.e. (+ A B) in the first example above). !N is the index of the term that
10060 is distributed, i.e. of C in the first example above. */
10061 static rtx
10062 distribute_and_simplify_rtx (rtx x, int n)
10064 machine_mode mode;
10065 enum rtx_code outer_code, inner_code;
10066 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10068 /* Distributivity is not true for floating point as it can change the
10069 value. So we don't do it unless -funsafe-math-optimizations. */
10070 if (FLOAT_MODE_P (GET_MODE (x))
10071 && ! flag_unsafe_math_optimizations)
10072 return NULL_RTX;
10074 decomposed = XEXP (x, n);
10075 if (!ARITHMETIC_P (decomposed))
10076 return NULL_RTX;
10078 mode = GET_MODE (x);
10079 outer_code = GET_CODE (x);
10080 distributed = XEXP (x, !n);
10082 inner_code = GET_CODE (decomposed);
10083 inner_op0 = XEXP (decomposed, 0);
10084 inner_op1 = XEXP (decomposed, 1);
10086 /* Special case (and (xor B C) (not A)), which is equivalent to
10087 (xor (ior A B) (ior A C)) */
10088 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10090 distributed = XEXP (distributed, 0);
10091 outer_code = IOR;
10094 if (n == 0)
10096 /* Distribute the second term. */
10097 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10098 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10100 else
10102 /* Distribute the first term. */
10103 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10104 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10107 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10108 new_op0, new_op1));
10109 if (GET_CODE (tmp) != outer_code
10110 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10111 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10112 return tmp;
10114 return NULL_RTX;
10117 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10118 in MODE. Return an equivalent form, if different from (and VAROP
10119 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10121 static rtx
10122 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10123 unsigned HOST_WIDE_INT constop)
10125 unsigned HOST_WIDE_INT nonzero;
10126 unsigned HOST_WIDE_INT orig_constop;
10127 rtx orig_varop;
10128 int i;
10130 orig_varop = varop;
10131 orig_constop = constop;
10132 if (GET_CODE (varop) == CLOBBER)
10133 return NULL_RTX;
10135 /* Simplify VAROP knowing that we will be only looking at some of the
10136 bits in it.
10138 Note by passing in CONSTOP, we guarantee that the bits not set in
10139 CONSTOP are not significant and will never be examined. We must
10140 ensure that is the case by explicitly masking out those bits
10141 before returning. */
10142 varop = force_to_mode (varop, mode, constop, 0);
10144 /* If VAROP is a CLOBBER, we will fail so return it. */
10145 if (GET_CODE (varop) == CLOBBER)
10146 return varop;
10148 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10149 to VAROP and return the new constant. */
10150 if (CONST_INT_P (varop))
10151 return gen_int_mode (INTVAL (varop) & constop, mode);
10153 /* See what bits may be nonzero in VAROP. Unlike the general case of
10154 a call to nonzero_bits, here we don't care about bits outside
10155 MODE. */
10157 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10159 /* Turn off all bits in the constant that are known to already be zero.
10160 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10161 which is tested below. */
10163 constop &= nonzero;
10165 /* If we don't have any bits left, return zero. */
10166 if (constop == 0)
10167 return const0_rtx;
10169 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10170 a power of two, we can replace this with an ASHIFT. */
10171 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10172 && (i = exact_log2 (constop)) >= 0)
10173 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10175 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10176 or XOR, then try to apply the distributive law. This may eliminate
10177 operations if either branch can be simplified because of the AND.
10178 It may also make some cases more complex, but those cases probably
10179 won't match a pattern either with or without this. */
10181 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10183 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10184 return
10185 gen_lowpart
10186 (mode,
10187 apply_distributive_law
10188 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10189 simplify_and_const_int (NULL_RTX, varop_mode,
10190 XEXP (varop, 0),
10191 constop),
10192 simplify_and_const_int (NULL_RTX, varop_mode,
10193 XEXP (varop, 1),
10194 constop))));
10197 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10198 the AND and see if one of the operands simplifies to zero. If so, we
10199 may eliminate it. */
10201 if (GET_CODE (varop) == PLUS
10202 && pow2p_hwi (constop + 1))
10204 rtx o0, o1;
10206 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10207 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10208 if (o0 == const0_rtx)
10209 return o1;
10210 if (o1 == const0_rtx)
10211 return o0;
10214 /* Make a SUBREG if necessary. If we can't make it, fail. */
10215 varop = gen_lowpart (mode, varop);
10216 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10217 return NULL_RTX;
10219 /* If we are only masking insignificant bits, return VAROP. */
10220 if (constop == nonzero)
10221 return varop;
10223 if (varop == orig_varop && constop == orig_constop)
10224 return NULL_RTX;
10226 /* Otherwise, return an AND. */
10227 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10231 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10232 in MODE.
10234 Return an equivalent form, if different from X. Otherwise, return X. If
10235 X is zero, we are to always construct the equivalent form. */
10237 static rtx
10238 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10239 unsigned HOST_WIDE_INT constop)
10241 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10242 if (tem)
10243 return tem;
10245 if (!x)
10246 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10247 gen_int_mode (constop, mode));
10248 if (GET_MODE (x) != mode)
10249 x = gen_lowpart (mode, x);
10250 return x;
10253 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10254 We don't care about bits outside of those defined in MODE.
10255 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10257 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10258 a shift, AND, or zero_extract, we can do better. */
10260 static rtx
10261 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10262 scalar_int_mode mode,
10263 unsigned HOST_WIDE_INT *nonzero)
10265 rtx tem;
10266 reg_stat_type *rsp;
10268 /* If X is a register whose nonzero bits value is current, use it.
10269 Otherwise, if X is a register whose value we can find, use that
10270 value. Otherwise, use the previously-computed global nonzero bits
10271 for this register. */
10273 rsp = &reg_stat[REGNO (x)];
10274 if (rsp->last_set_value != 0
10275 && (rsp->last_set_mode == mode
10276 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10277 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10278 && GET_MODE_CLASS (mode) == MODE_INT))
10279 && ((rsp->last_set_label >= label_tick_ebb_start
10280 && rsp->last_set_label < label_tick)
10281 || (rsp->last_set_label == label_tick
10282 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10283 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10284 && REGNO (x) < reg_n_sets_max
10285 && REG_N_SETS (REGNO (x)) == 1
10286 && !REGNO_REG_SET_P
10287 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10288 REGNO (x)))))
10290 /* Note that, even if the precision of last_set_mode is lower than that
10291 of mode, record_value_for_reg invoked nonzero_bits on the register
10292 with nonzero_bits_mode (because last_set_mode is necessarily integral
10293 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10294 are all valid, hence in mode too since nonzero_bits_mode is defined
10295 to the largest HWI_COMPUTABLE_MODE_P mode. */
10296 *nonzero &= rsp->last_set_nonzero_bits;
10297 return NULL;
10300 tem = get_last_value (x);
10301 if (tem)
10303 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10304 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10306 return tem;
10309 if (nonzero_sign_valid && rsp->nonzero_bits)
10311 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10313 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10314 /* We don't know anything about the upper bits. */
10315 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10317 *nonzero &= mask;
10320 return NULL;
10323 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10324 end of X that are known to be equal to the sign bit. X will be used
10325 in mode MODE; the returned value will always be between 1 and the
10326 number of bits in MODE. */
10328 static rtx
10329 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10330 scalar_int_mode mode,
10331 unsigned int *result)
10333 rtx tem;
10334 reg_stat_type *rsp;
10336 rsp = &reg_stat[REGNO (x)];
10337 if (rsp->last_set_value != 0
10338 && rsp->last_set_mode == mode
10339 && ((rsp->last_set_label >= label_tick_ebb_start
10340 && rsp->last_set_label < label_tick)
10341 || (rsp->last_set_label == label_tick
10342 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10343 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10344 && REGNO (x) < reg_n_sets_max
10345 && REG_N_SETS (REGNO (x)) == 1
10346 && !REGNO_REG_SET_P
10347 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10348 REGNO (x)))))
10350 *result = rsp->last_set_sign_bit_copies;
10351 return NULL;
10354 tem = get_last_value (x);
10355 if (tem != 0)
10356 return tem;
10358 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10359 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10360 *result = rsp->sign_bit_copies;
10362 return NULL;
10365 /* Return the number of "extended" bits there are in X, when interpreted
10366 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10367 unsigned quantities, this is the number of high-order zero bits.
10368 For signed quantities, this is the number of copies of the sign bit
10369 minus 1. In both case, this function returns the number of "spare"
10370 bits. For example, if two quantities for which this function returns
10371 at least 1 are added, the addition is known not to overflow.
10373 This function will always return 0 unless called during combine, which
10374 implies that it must be called from a define_split. */
10376 unsigned int
10377 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10379 if (nonzero_sign_valid == 0)
10380 return 0;
10382 scalar_int_mode int_mode;
10383 return (unsignedp
10384 ? (is_a <scalar_int_mode> (mode, &int_mode)
10385 && HWI_COMPUTABLE_MODE_P (int_mode)
10386 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10387 - floor_log2 (nonzero_bits (x, int_mode)))
10388 : 0)
10389 : num_sign_bit_copies (x, mode) - 1);
10392 /* This function is called from `simplify_shift_const' to merge two
10393 outer operations. Specifically, we have already found that we need
10394 to perform operation *POP0 with constant *PCONST0 at the outermost
10395 position. We would now like to also perform OP1 with constant CONST1
10396 (with *POP0 being done last).
10398 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10399 the resulting operation. *PCOMP_P is set to 1 if we would need to
10400 complement the innermost operand, otherwise it is unchanged.
10402 MODE is the mode in which the operation will be done. No bits outside
10403 the width of this mode matter. It is assumed that the width of this mode
10404 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10406 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10407 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10408 result is simply *PCONST0.
10410 If the resulting operation cannot be expressed as one operation, we
10411 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10413 static int
10414 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10416 enum rtx_code op0 = *pop0;
10417 HOST_WIDE_INT const0 = *pconst0;
10419 const0 &= GET_MODE_MASK (mode);
10420 const1 &= GET_MODE_MASK (mode);
10422 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10423 if (op0 == AND)
10424 const1 &= const0;
10426 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10427 if OP0 is SET. */
10429 if (op1 == UNKNOWN || op0 == SET)
10430 return 1;
10432 else if (op0 == UNKNOWN)
10433 op0 = op1, const0 = const1;
10435 else if (op0 == op1)
10437 switch (op0)
10439 case AND:
10440 const0 &= const1;
10441 break;
10442 case IOR:
10443 const0 |= const1;
10444 break;
10445 case XOR:
10446 const0 ^= const1;
10447 break;
10448 case PLUS:
10449 const0 += const1;
10450 break;
10451 case NEG:
10452 op0 = UNKNOWN;
10453 break;
10454 default:
10455 break;
10459 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10460 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10461 return 0;
10463 /* If the two constants aren't the same, we can't do anything. The
10464 remaining six cases can all be done. */
10465 else if (const0 != const1)
10466 return 0;
10468 else
10469 switch (op0)
10471 case IOR:
10472 if (op1 == AND)
10473 /* (a & b) | b == b */
10474 op0 = SET;
10475 else /* op1 == XOR */
10476 /* (a ^ b) | b == a | b */
10478 break;
10480 case XOR:
10481 if (op1 == AND)
10482 /* (a & b) ^ b == (~a) & b */
10483 op0 = AND, *pcomp_p = 1;
10484 else /* op1 == IOR */
10485 /* (a | b) ^ b == a & ~b */
10486 op0 = AND, const0 = ~const0;
10487 break;
10489 case AND:
10490 if (op1 == IOR)
10491 /* (a | b) & b == b */
10492 op0 = SET;
10493 else /* op1 == XOR */
10494 /* (a ^ b) & b) == (~a) & b */
10495 *pcomp_p = 1;
10496 break;
10497 default:
10498 break;
10501 /* Check for NO-OP cases. */
10502 const0 &= GET_MODE_MASK (mode);
10503 if (const0 == 0
10504 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10505 op0 = UNKNOWN;
10506 else if (const0 == 0 && op0 == AND)
10507 op0 = SET;
10508 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10509 && op0 == AND)
10510 op0 = UNKNOWN;
10512 *pop0 = op0;
10514 /* ??? Slightly redundant with the above mask, but not entirely.
10515 Moving this above means we'd have to sign-extend the mode mask
10516 for the final test. */
10517 if (op0 != UNKNOWN && op0 != NEG)
10518 *pconst0 = trunc_int_for_mode (const0, mode);
10520 return 1;
10523 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10524 the shift in. The original shift operation CODE is performed on OP in
10525 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10526 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10527 result of the shift is subject to operation OUTER_CODE with operand
10528 OUTER_CONST. */
10530 static scalar_int_mode
10531 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10532 scalar_int_mode orig_mode, scalar_int_mode mode,
10533 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10535 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10537 /* In general we can't perform in wider mode for right shift and rotate. */
10538 switch (code)
10540 case ASHIFTRT:
10541 /* We can still widen if the bits brought in from the left are identical
10542 to the sign bit of ORIG_MODE. */
10543 if (num_sign_bit_copies (op, mode)
10544 > (unsigned) (GET_MODE_PRECISION (mode)
10545 - GET_MODE_PRECISION (orig_mode)))
10546 return mode;
10547 return orig_mode;
10549 case LSHIFTRT:
10550 /* Similarly here but with zero bits. */
10551 if (HWI_COMPUTABLE_MODE_P (mode)
10552 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10553 return mode;
10555 /* We can also widen if the bits brought in will be masked off. This
10556 operation is performed in ORIG_MODE. */
10557 if (outer_code == AND)
10559 int care_bits = low_bitmask_len (orig_mode, outer_const);
10561 if (care_bits >= 0
10562 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10563 return mode;
10565 /* fall through */
10567 case ROTATE:
10568 return orig_mode;
10570 case ROTATERT:
10571 gcc_unreachable ();
10573 default:
10574 return mode;
10578 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10579 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10580 if we cannot simplify it. Otherwise, return a simplified value.
10582 The shift is normally computed in the widest mode we find in VAROP, as
10583 long as it isn't a different number of words than RESULT_MODE. Exceptions
10584 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10586 static rtx
10587 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10588 rtx varop, int orig_count)
10590 enum rtx_code orig_code = code;
10591 rtx orig_varop = varop;
10592 int count, log2;
10593 machine_mode mode = result_mode;
10594 machine_mode shift_mode;
10595 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10596 /* We form (outer_op (code varop count) (outer_const)). */
10597 enum rtx_code outer_op = UNKNOWN;
10598 HOST_WIDE_INT outer_const = 0;
10599 int complement_p = 0;
10600 rtx new_rtx, x;
10602 /* Make sure and truncate the "natural" shift on the way in. We don't
10603 want to do this inside the loop as it makes it more difficult to
10604 combine shifts. */
10605 if (SHIFT_COUNT_TRUNCATED)
10606 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10608 /* If we were given an invalid count, don't do anything except exactly
10609 what was requested. */
10611 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10612 return NULL_RTX;
10614 count = orig_count;
10616 /* Unless one of the branches of the `if' in this loop does a `continue',
10617 we will `break' the loop after the `if'. */
10619 while (count != 0)
10621 /* If we have an operand of (clobber (const_int 0)), fail. */
10622 if (GET_CODE (varop) == CLOBBER)
10623 return NULL_RTX;
10625 /* Convert ROTATERT to ROTATE. */
10626 if (code == ROTATERT)
10628 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10629 code = ROTATE;
10630 count = bitsize - count;
10633 shift_mode = result_mode;
10634 if (shift_mode != mode)
10636 /* We only change the modes of scalar shifts. */
10637 int_mode = as_a <scalar_int_mode> (mode);
10638 int_result_mode = as_a <scalar_int_mode> (result_mode);
10639 shift_mode = try_widen_shift_mode (code, varop, count,
10640 int_result_mode, int_mode,
10641 outer_op, outer_const);
10644 scalar_int_mode shift_unit_mode
10645 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10647 /* Handle cases where the count is greater than the size of the mode
10648 minus 1. For ASHIFT, use the size minus one as the count (this can
10649 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10650 take the count modulo the size. For other shifts, the result is
10651 zero.
10653 Since these shifts are being produced by the compiler by combining
10654 multiple operations, each of which are defined, we know what the
10655 result is supposed to be. */
10657 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10659 if (code == ASHIFTRT)
10660 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10661 else if (code == ROTATE || code == ROTATERT)
10662 count %= GET_MODE_PRECISION (shift_unit_mode);
10663 else
10665 /* We can't simply return zero because there may be an
10666 outer op. */
10667 varop = const0_rtx;
10668 count = 0;
10669 break;
10673 /* If we discovered we had to complement VAROP, leave. Making a NOT
10674 here would cause an infinite loop. */
10675 if (complement_p)
10676 break;
10678 if (shift_mode == shift_unit_mode)
10680 /* An arithmetic right shift of a quantity known to be -1 or 0
10681 is a no-op. */
10682 if (code == ASHIFTRT
10683 && (num_sign_bit_copies (varop, shift_unit_mode)
10684 == GET_MODE_PRECISION (shift_unit_mode)))
10686 count = 0;
10687 break;
10690 /* If we are doing an arithmetic right shift and discarding all but
10691 the sign bit copies, this is equivalent to doing a shift by the
10692 bitsize minus one. Convert it into that shift because it will
10693 often allow other simplifications. */
10695 if (code == ASHIFTRT
10696 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10697 >= GET_MODE_PRECISION (shift_unit_mode)))
10698 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10700 /* We simplify the tests below and elsewhere by converting
10701 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10702 `make_compound_operation' will convert it to an ASHIFTRT for
10703 those machines (such as VAX) that don't have an LSHIFTRT. */
10704 if (code == ASHIFTRT
10705 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10706 && val_signbit_known_clear_p (shift_unit_mode,
10707 nonzero_bits (varop,
10708 shift_unit_mode)))
10709 code = LSHIFTRT;
10711 if (((code == LSHIFTRT
10712 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10713 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10714 || (code == ASHIFT
10715 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10716 && !((nonzero_bits (varop, shift_unit_mode) << count)
10717 & GET_MODE_MASK (shift_unit_mode))))
10718 && !side_effects_p (varop))
10719 varop = const0_rtx;
10722 switch (GET_CODE (varop))
10724 case SIGN_EXTEND:
10725 case ZERO_EXTEND:
10726 case SIGN_EXTRACT:
10727 case ZERO_EXTRACT:
10728 new_rtx = expand_compound_operation (varop);
10729 if (new_rtx != varop)
10731 varop = new_rtx;
10732 continue;
10734 break;
10736 case MEM:
10737 /* The following rules apply only to scalars. */
10738 if (shift_mode != shift_unit_mode)
10739 break;
10740 int_mode = as_a <scalar_int_mode> (mode);
10742 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10743 minus the width of a smaller mode, we can do this with a
10744 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10745 if ((code == ASHIFTRT || code == LSHIFTRT)
10746 && ! mode_dependent_address_p (XEXP (varop, 0),
10747 MEM_ADDR_SPACE (varop))
10748 && ! MEM_VOLATILE_P (varop)
10749 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10750 .exists (&tmode)))
10752 new_rtx = adjust_address_nv (varop, tmode,
10753 BYTES_BIG_ENDIAN ? 0
10754 : count / BITS_PER_UNIT);
10756 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10757 : ZERO_EXTEND, int_mode, new_rtx);
10758 count = 0;
10759 continue;
10761 break;
10763 case SUBREG:
10764 /* The following rules apply only to scalars. */
10765 if (shift_mode != shift_unit_mode)
10766 break;
10767 int_mode = as_a <scalar_int_mode> (mode);
10768 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10770 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10771 the same number of words as what we've seen so far. Then store
10772 the widest mode in MODE. */
10773 if (subreg_lowpart_p (varop)
10774 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10775 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10776 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10777 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10778 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10780 varop = SUBREG_REG (varop);
10781 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10782 mode = inner_mode;
10783 continue;
10785 break;
10787 case MULT:
10788 /* Some machines use MULT instead of ASHIFT because MULT
10789 is cheaper. But it is still better on those machines to
10790 merge two shifts into one. */
10791 if (CONST_INT_P (XEXP (varop, 1))
10792 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10794 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10795 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10796 XEXP (varop, 0), log2_rtx);
10797 continue;
10799 break;
10801 case UDIV:
10802 /* Similar, for when divides are cheaper. */
10803 if (CONST_INT_P (XEXP (varop, 1))
10804 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10806 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10807 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10808 XEXP (varop, 0), log2_rtx);
10809 continue;
10811 break;
10813 case ASHIFTRT:
10814 /* If we are extracting just the sign bit of an arithmetic
10815 right shift, that shift is not needed. However, the sign
10816 bit of a wider mode may be different from what would be
10817 interpreted as the sign bit in a narrower mode, so, if
10818 the result is narrower, don't discard the shift. */
10819 if (code == LSHIFTRT
10820 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10821 && (GET_MODE_UNIT_BITSIZE (result_mode)
10822 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10824 varop = XEXP (varop, 0);
10825 continue;
10828 /* fall through */
10830 case LSHIFTRT:
10831 case ASHIFT:
10832 case ROTATE:
10833 /* The following rules apply only to scalars. */
10834 if (shift_mode != shift_unit_mode)
10835 break;
10836 int_mode = as_a <scalar_int_mode> (mode);
10837 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10838 int_result_mode = as_a <scalar_int_mode> (result_mode);
10840 /* Here we have two nested shifts. The result is usually the
10841 AND of a new shift with a mask. We compute the result below. */
10842 if (CONST_INT_P (XEXP (varop, 1))
10843 && INTVAL (XEXP (varop, 1)) >= 0
10844 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10845 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10846 && HWI_COMPUTABLE_MODE_P (int_mode))
10848 enum rtx_code first_code = GET_CODE (varop);
10849 unsigned int first_count = INTVAL (XEXP (varop, 1));
10850 unsigned HOST_WIDE_INT mask;
10851 rtx mask_rtx;
10853 /* We have one common special case. We can't do any merging if
10854 the inner code is an ASHIFTRT of a smaller mode. However, if
10855 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10856 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10857 we can convert it to
10858 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10859 This simplifies certain SIGN_EXTEND operations. */
10860 if (code == ASHIFT && first_code == ASHIFTRT
10861 && count == (GET_MODE_PRECISION (int_result_mode)
10862 - GET_MODE_PRECISION (int_varop_mode)))
10864 /* C3 has the low-order C1 bits zero. */
10866 mask = GET_MODE_MASK (int_mode)
10867 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10869 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10870 XEXP (varop, 0), mask);
10871 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10872 int_result_mode, varop, count);
10873 count = first_count;
10874 code = ASHIFTRT;
10875 continue;
10878 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10879 than C1 high-order bits equal to the sign bit, we can convert
10880 this to either an ASHIFT or an ASHIFTRT depending on the
10881 two counts.
10883 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10885 if (code == ASHIFTRT && first_code == ASHIFT
10886 && int_varop_mode == shift_unit_mode
10887 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10888 > first_count))
10890 varop = XEXP (varop, 0);
10891 count -= first_count;
10892 if (count < 0)
10894 count = -count;
10895 code = ASHIFT;
10898 continue;
10901 /* There are some cases we can't do. If CODE is ASHIFTRT,
10902 we can only do this if FIRST_CODE is also ASHIFTRT.
10904 We can't do the case when CODE is ROTATE and FIRST_CODE is
10905 ASHIFTRT.
10907 If the mode of this shift is not the mode of the outer shift,
10908 we can't do this if either shift is a right shift or ROTATE.
10910 Finally, we can't do any of these if the mode is too wide
10911 unless the codes are the same.
10913 Handle the case where the shift codes are the same
10914 first. */
10916 if (code == first_code)
10918 if (int_varop_mode != int_result_mode
10919 && (code == ASHIFTRT || code == LSHIFTRT
10920 || code == ROTATE))
10921 break;
10923 count += first_count;
10924 varop = XEXP (varop, 0);
10925 continue;
10928 if (code == ASHIFTRT
10929 || (code == ROTATE && first_code == ASHIFTRT)
10930 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10931 || (int_varop_mode != int_result_mode
10932 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10933 || first_code == ROTATE
10934 || code == ROTATE)))
10935 break;
10937 /* To compute the mask to apply after the shift, shift the
10938 nonzero bits of the inner shift the same way the
10939 outer shift will. */
10941 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10942 int_result_mode);
10943 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10944 mask_rtx
10945 = simplify_const_binary_operation (code, int_result_mode,
10946 mask_rtx, count_rtx);
10948 /* Give up if we can't compute an outer operation to use. */
10949 if (mask_rtx == 0
10950 || !CONST_INT_P (mask_rtx)
10951 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10952 INTVAL (mask_rtx),
10953 int_result_mode, &complement_p))
10954 break;
10956 /* If the shifts are in the same direction, we add the
10957 counts. Otherwise, we subtract them. */
10958 if ((code == ASHIFTRT || code == LSHIFTRT)
10959 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10960 count += first_count;
10961 else
10962 count -= first_count;
10964 /* If COUNT is positive, the new shift is usually CODE,
10965 except for the two exceptions below, in which case it is
10966 FIRST_CODE. If the count is negative, FIRST_CODE should
10967 always be used */
10968 if (count > 0
10969 && ((first_code == ROTATE && code == ASHIFT)
10970 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10971 code = first_code;
10972 else if (count < 0)
10973 code = first_code, count = -count;
10975 varop = XEXP (varop, 0);
10976 continue;
10979 /* If we have (A << B << C) for any shift, we can convert this to
10980 (A << C << B). This wins if A is a constant. Only try this if
10981 B is not a constant. */
10983 else if (GET_CODE (varop) == code
10984 && CONST_INT_P (XEXP (varop, 0))
10985 && !CONST_INT_P (XEXP (varop, 1)))
10987 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10988 sure the result will be masked. See PR70222. */
10989 if (code == LSHIFTRT
10990 && int_mode != int_result_mode
10991 && !merge_outer_ops (&outer_op, &outer_const, AND,
10992 GET_MODE_MASK (int_result_mode)
10993 >> orig_count, int_result_mode,
10994 &complement_p))
10995 break;
10996 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10997 up outer sign extension (often left and right shift) is
10998 hardly more efficient than the original. See PR70429. */
10999 if (code == ASHIFTRT && int_mode != int_result_mode)
11000 break;
11002 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11003 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11004 XEXP (varop, 0),
11005 count_rtx);
11006 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11007 count = 0;
11008 continue;
11010 break;
11012 case NOT:
11013 /* The following rules apply only to scalars. */
11014 if (shift_mode != shift_unit_mode)
11015 break;
11017 /* Make this fit the case below. */
11018 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11019 continue;
11021 case IOR:
11022 case AND:
11023 case XOR:
11024 /* The following rules apply only to scalars. */
11025 if (shift_mode != shift_unit_mode)
11026 break;
11027 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11028 int_result_mode = as_a <scalar_int_mode> (result_mode);
11030 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11031 with C the size of VAROP - 1 and the shift is logical if
11032 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11033 we have an (le X 0) operation. If we have an arithmetic shift
11034 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11035 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11037 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11038 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11039 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11040 && (code == LSHIFTRT || code == ASHIFTRT)
11041 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11042 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11044 count = 0;
11045 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11046 const0_rtx);
11048 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11049 varop = gen_rtx_NEG (int_varop_mode, varop);
11051 continue;
11054 /* If we have (shift (logical)), move the logical to the outside
11055 to allow it to possibly combine with another logical and the
11056 shift to combine with another shift. This also canonicalizes to
11057 what a ZERO_EXTRACT looks like. Also, some machines have
11058 (and (shift)) insns. */
11060 if (CONST_INT_P (XEXP (varop, 1))
11061 /* We can't do this if we have (ashiftrt (xor)) and the
11062 constant has its sign bit set in shift_unit_mode with
11063 shift_unit_mode wider than result_mode. */
11064 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11065 && int_result_mode != shift_unit_mode
11066 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11067 shift_unit_mode) < 0)
11068 && (new_rtx = simplify_const_binary_operation
11069 (code, int_result_mode,
11070 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11071 gen_int_shift_amount (int_result_mode, count))) != 0
11072 && CONST_INT_P (new_rtx)
11073 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11074 INTVAL (new_rtx), int_result_mode,
11075 &complement_p))
11077 varop = XEXP (varop, 0);
11078 continue;
11081 /* If we can't do that, try to simplify the shift in each arm of the
11082 logical expression, make a new logical expression, and apply
11083 the inverse distributive law. This also can't be done for
11084 (ashiftrt (xor)) where we've widened the shift and the constant
11085 changes the sign bit. */
11086 if (CONST_INT_P (XEXP (varop, 1))
11087 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11088 && int_result_mode != shift_unit_mode
11089 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11090 shift_unit_mode) < 0))
11092 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11093 XEXP (varop, 0), count);
11094 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11095 XEXP (varop, 1), count);
11097 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11098 lhs, rhs);
11099 varop = apply_distributive_law (varop);
11101 count = 0;
11102 continue;
11104 break;
11106 case EQ:
11107 /* The following rules apply only to scalars. */
11108 if (shift_mode != shift_unit_mode)
11109 break;
11110 int_result_mode = as_a <scalar_int_mode> (result_mode);
11112 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11113 says that the sign bit can be tested, FOO has mode MODE, C is
11114 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11115 that may be nonzero. */
11116 if (code == LSHIFTRT
11117 && XEXP (varop, 1) == const0_rtx
11118 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11119 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11120 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11121 && STORE_FLAG_VALUE == -1
11122 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11123 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11124 int_result_mode, &complement_p))
11126 varop = XEXP (varop, 0);
11127 count = 0;
11128 continue;
11130 break;
11132 case NEG:
11133 /* The following rules apply only to scalars. */
11134 if (shift_mode != shift_unit_mode)
11135 break;
11136 int_result_mode = as_a <scalar_int_mode> (result_mode);
11138 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11139 than the number of bits in the mode is equivalent to A. */
11140 if (code == LSHIFTRT
11141 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11142 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11144 varop = XEXP (varop, 0);
11145 count = 0;
11146 continue;
11149 /* NEG commutes with ASHIFT since it is multiplication. Move the
11150 NEG outside to allow shifts to combine. */
11151 if (code == ASHIFT
11152 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11153 int_result_mode, &complement_p))
11155 varop = XEXP (varop, 0);
11156 continue;
11158 break;
11160 case PLUS:
11161 /* The following rules apply only to scalars. */
11162 if (shift_mode != shift_unit_mode)
11163 break;
11164 int_result_mode = as_a <scalar_int_mode> (result_mode);
11166 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11167 is one less than the number of bits in the mode is
11168 equivalent to (xor A 1). */
11169 if (code == LSHIFTRT
11170 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11171 && XEXP (varop, 1) == constm1_rtx
11172 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11173 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11174 int_result_mode, &complement_p))
11176 count = 0;
11177 varop = XEXP (varop, 0);
11178 continue;
11181 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11182 that might be nonzero in BAR are those being shifted out and those
11183 bits are known zero in FOO, we can replace the PLUS with FOO.
11184 Similarly in the other operand order. This code occurs when
11185 we are computing the size of a variable-size array. */
11187 if ((code == ASHIFTRT || code == LSHIFTRT)
11188 && count < HOST_BITS_PER_WIDE_INT
11189 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11190 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11191 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11193 varop = XEXP (varop, 0);
11194 continue;
11196 else if ((code == ASHIFTRT || code == LSHIFTRT)
11197 && count < HOST_BITS_PER_WIDE_INT
11198 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11199 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11200 >> count) == 0
11201 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11202 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11204 varop = XEXP (varop, 1);
11205 continue;
11208 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11209 if (code == ASHIFT
11210 && CONST_INT_P (XEXP (varop, 1))
11211 && (new_rtx = simplify_const_binary_operation
11212 (ASHIFT, int_result_mode,
11213 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11214 gen_int_shift_amount (int_result_mode, count))) != 0
11215 && CONST_INT_P (new_rtx)
11216 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11217 INTVAL (new_rtx), int_result_mode,
11218 &complement_p))
11220 varop = XEXP (varop, 0);
11221 continue;
11224 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11225 signbit', and attempt to change the PLUS to an XOR and move it to
11226 the outer operation as is done above in the AND/IOR/XOR case
11227 leg for shift(logical). See details in logical handling above
11228 for reasoning in doing so. */
11229 if (code == LSHIFTRT
11230 && CONST_INT_P (XEXP (varop, 1))
11231 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11232 && (new_rtx = simplify_const_binary_operation
11233 (code, int_result_mode,
11234 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11235 gen_int_shift_amount (int_result_mode, count))) != 0
11236 && CONST_INT_P (new_rtx)
11237 && merge_outer_ops (&outer_op, &outer_const, XOR,
11238 INTVAL (new_rtx), int_result_mode,
11239 &complement_p))
11241 varop = XEXP (varop, 0);
11242 continue;
11245 break;
11247 case MINUS:
11248 /* The following rules apply only to scalars. */
11249 if (shift_mode != shift_unit_mode)
11250 break;
11251 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11253 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11254 with C the size of VAROP - 1 and the shift is logical if
11255 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11256 we have a (gt X 0) operation. If the shift is arithmetic with
11257 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11258 we have a (neg (gt X 0)) operation. */
11260 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11261 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11262 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11263 && (code == LSHIFTRT || code == ASHIFTRT)
11264 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11265 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11266 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11268 count = 0;
11269 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11270 const0_rtx);
11272 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11273 varop = gen_rtx_NEG (int_varop_mode, varop);
11275 continue;
11277 break;
11279 case TRUNCATE:
11280 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11281 if the truncate does not affect the value. */
11282 if (code == LSHIFTRT
11283 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11284 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11285 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11286 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11287 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11289 rtx varop_inner = XEXP (varop, 0);
11290 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11291 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11292 new_count);
11293 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11294 XEXP (varop_inner, 0),
11295 new_count_rtx);
11296 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11297 count = 0;
11298 continue;
11300 break;
11302 default:
11303 break;
11306 break;
11309 shift_mode = result_mode;
11310 if (shift_mode != mode)
11312 /* We only change the modes of scalar shifts. */
11313 int_mode = as_a <scalar_int_mode> (mode);
11314 int_result_mode = as_a <scalar_int_mode> (result_mode);
11315 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11316 int_mode, outer_op, outer_const);
11319 /* We have now finished analyzing the shift. The result should be
11320 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11321 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11322 to the result of the shift. OUTER_CONST is the relevant constant,
11323 but we must turn off all bits turned off in the shift. */
11325 if (outer_op == UNKNOWN
11326 && orig_code == code && orig_count == count
11327 && varop == orig_varop
11328 && shift_mode == GET_MODE (varop))
11329 return NULL_RTX;
11331 /* Make a SUBREG if necessary. If we can't make it, fail. */
11332 varop = gen_lowpart (shift_mode, varop);
11333 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11334 return NULL_RTX;
11336 /* If we have an outer operation and we just made a shift, it is
11337 possible that we could have simplified the shift were it not
11338 for the outer operation. So try to do the simplification
11339 recursively. */
11341 if (outer_op != UNKNOWN)
11342 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11343 else
11344 x = NULL_RTX;
11346 if (x == NULL_RTX)
11347 x = simplify_gen_binary (code, shift_mode, varop,
11348 gen_int_shift_amount (shift_mode, count));
11350 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11351 turn off all the bits that the shift would have turned off. */
11352 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11353 /* We only change the modes of scalar shifts. */
11354 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11355 x, GET_MODE_MASK (result_mode) >> orig_count);
11357 /* Do the remainder of the processing in RESULT_MODE. */
11358 x = gen_lowpart_or_truncate (result_mode, x);
11360 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11361 operation. */
11362 if (complement_p)
11363 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11365 if (outer_op != UNKNOWN)
11367 int_result_mode = as_a <scalar_int_mode> (result_mode);
11369 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11370 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11371 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11373 if (outer_op == AND)
11374 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11375 else if (outer_op == SET)
11377 /* This means that we have determined that the result is
11378 equivalent to a constant. This should be rare. */
11379 if (!side_effects_p (x))
11380 x = GEN_INT (outer_const);
11382 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11383 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11384 else
11385 x = simplify_gen_binary (outer_op, int_result_mode, x,
11386 GEN_INT (outer_const));
11389 return x;
11392 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11393 The result of the shift is RESULT_MODE. If we cannot simplify it,
11394 return X or, if it is NULL, synthesize the expression with
11395 simplify_gen_binary. Otherwise, return a simplified value.
11397 The shift is normally computed in the widest mode we find in VAROP, as
11398 long as it isn't a different number of words than RESULT_MODE. Exceptions
11399 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11401 static rtx
11402 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11403 rtx varop, int count)
11405 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11406 if (tem)
11407 return tem;
11409 if (!x)
11410 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11411 gen_int_shift_amount (GET_MODE (varop), count));
11412 if (GET_MODE (x) != result_mode)
11413 x = gen_lowpart (result_mode, x);
11414 return x;
11418 /* A subroutine of recog_for_combine. See there for arguments and
11419 return value. */
11421 static int
11422 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11424 rtx pat = *pnewpat;
11425 rtx pat_without_clobbers;
11426 int insn_code_number;
11427 int num_clobbers_to_add = 0;
11428 int i;
11429 rtx notes = NULL_RTX;
11430 rtx old_notes, old_pat;
11431 int old_icode;
11433 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11434 we use to indicate that something didn't match. If we find such a
11435 thing, force rejection. */
11436 if (GET_CODE (pat) == PARALLEL)
11437 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11438 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11439 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11440 return -1;
11442 old_pat = PATTERN (insn);
11443 old_notes = REG_NOTES (insn);
11444 PATTERN (insn) = pat;
11445 REG_NOTES (insn) = NULL_RTX;
11447 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11448 if (dump_file && (dump_flags & TDF_DETAILS))
11450 if (insn_code_number < 0)
11451 fputs ("Failed to match this instruction:\n", dump_file);
11452 else
11453 fputs ("Successfully matched this instruction:\n", dump_file);
11454 print_rtl_single (dump_file, pat);
11457 /* If it isn't, there is the possibility that we previously had an insn
11458 that clobbered some register as a side effect, but the combined
11459 insn doesn't need to do that. So try once more without the clobbers
11460 unless this represents an ASM insn. */
11462 if (insn_code_number < 0 && ! check_asm_operands (pat)
11463 && GET_CODE (pat) == PARALLEL)
11465 int pos;
11467 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11468 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11470 if (i != pos)
11471 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11472 pos++;
11475 SUBST_INT (XVECLEN (pat, 0), pos);
11477 if (pos == 1)
11478 pat = XVECEXP (pat, 0, 0);
11480 PATTERN (insn) = pat;
11481 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11482 if (dump_file && (dump_flags & TDF_DETAILS))
11484 if (insn_code_number < 0)
11485 fputs ("Failed to match this instruction:\n", dump_file);
11486 else
11487 fputs ("Successfully matched this instruction:\n", dump_file);
11488 print_rtl_single (dump_file, pat);
11492 pat_without_clobbers = pat;
11494 PATTERN (insn) = old_pat;
11495 REG_NOTES (insn) = old_notes;
11497 /* Recognize all noop sets, these will be killed by followup pass. */
11498 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11499 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11501 /* If we had any clobbers to add, make a new pattern than contains
11502 them. Then check to make sure that all of them are dead. */
11503 if (num_clobbers_to_add)
11505 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11506 rtvec_alloc (GET_CODE (pat) == PARALLEL
11507 ? (XVECLEN (pat, 0)
11508 + num_clobbers_to_add)
11509 : num_clobbers_to_add + 1));
11511 if (GET_CODE (pat) == PARALLEL)
11512 for (i = 0; i < XVECLEN (pat, 0); i++)
11513 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11514 else
11515 XVECEXP (newpat, 0, 0) = pat;
11517 add_clobbers (newpat, insn_code_number);
11519 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11520 i < XVECLEN (newpat, 0); i++)
11522 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11523 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11524 return -1;
11525 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11527 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11528 notes = alloc_reg_note (REG_UNUSED,
11529 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11532 pat = newpat;
11535 if (insn_code_number >= 0
11536 && insn_code_number != NOOP_MOVE_INSN_CODE)
11538 old_pat = PATTERN (insn);
11539 old_notes = REG_NOTES (insn);
11540 old_icode = INSN_CODE (insn);
11541 PATTERN (insn) = pat;
11542 REG_NOTES (insn) = notes;
11543 INSN_CODE (insn) = insn_code_number;
11545 /* Allow targets to reject combined insn. */
11546 if (!targetm.legitimate_combined_insn (insn))
11548 if (dump_file && (dump_flags & TDF_DETAILS))
11549 fputs ("Instruction not appropriate for target.",
11550 dump_file);
11552 /* Callers expect recog_for_combine to strip
11553 clobbers from the pattern on failure. */
11554 pat = pat_without_clobbers;
11555 notes = NULL_RTX;
11557 insn_code_number = -1;
11560 PATTERN (insn) = old_pat;
11561 REG_NOTES (insn) = old_notes;
11562 INSN_CODE (insn) = old_icode;
11565 *pnewpat = pat;
11566 *pnotes = notes;
11568 return insn_code_number;
11571 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11572 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11573 Return whether anything was so changed. */
11575 static bool
11576 change_zero_ext (rtx pat)
11578 bool changed = false;
11579 rtx *src = &SET_SRC (pat);
11581 subrtx_ptr_iterator::array_type array;
11582 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11584 rtx x = **iter;
11585 scalar_int_mode mode, inner_mode;
11586 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11587 continue;
11588 int size;
11590 if (GET_CODE (x) == ZERO_EXTRACT
11591 && CONST_INT_P (XEXP (x, 1))
11592 && CONST_INT_P (XEXP (x, 2))
11593 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11594 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11596 size = INTVAL (XEXP (x, 1));
11598 int start = INTVAL (XEXP (x, 2));
11599 if (BITS_BIG_ENDIAN)
11600 start = GET_MODE_PRECISION (inner_mode) - size - start;
11602 if (start != 0)
11603 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11604 gen_int_shift_amount (inner_mode, start));
11605 else
11606 x = XEXP (x, 0);
11608 if (mode != inner_mode)
11610 if (REG_P (x) && HARD_REGISTER_P (x)
11611 && !can_change_dest_mode (x, 0, mode))
11612 continue;
11614 x = gen_lowpart_SUBREG (mode, x);
11617 else if (GET_CODE (x) == ZERO_EXTEND
11618 && GET_CODE (XEXP (x, 0)) == SUBREG
11619 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11620 && !paradoxical_subreg_p (XEXP (x, 0))
11621 && subreg_lowpart_p (XEXP (x, 0)))
11623 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11624 size = GET_MODE_PRECISION (inner_mode);
11625 x = SUBREG_REG (XEXP (x, 0));
11626 if (GET_MODE (x) != mode)
11628 if (REG_P (x) && HARD_REGISTER_P (x)
11629 && !can_change_dest_mode (x, 0, mode))
11630 continue;
11632 x = gen_lowpart_SUBREG (mode, x);
11635 else if (GET_CODE (x) == ZERO_EXTEND
11636 && REG_P (XEXP (x, 0))
11637 && HARD_REGISTER_P (XEXP (x, 0))
11638 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11640 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11641 size = GET_MODE_PRECISION (inner_mode);
11642 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11644 else
11645 continue;
11647 if (!(GET_CODE (x) == LSHIFTRT
11648 && CONST_INT_P (XEXP (x, 1))
11649 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11651 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11652 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11655 SUBST (**iter, x);
11656 changed = true;
11659 if (changed)
11660 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11661 maybe_swap_commutative_operands (**iter);
11663 rtx *dst = &SET_DEST (pat);
11664 scalar_int_mode mode;
11665 if (GET_CODE (*dst) == ZERO_EXTRACT
11666 && REG_P (XEXP (*dst, 0))
11667 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11668 && CONST_INT_P (XEXP (*dst, 1))
11669 && CONST_INT_P (XEXP (*dst, 2)))
11671 rtx reg = XEXP (*dst, 0);
11672 int width = INTVAL (XEXP (*dst, 1));
11673 int offset = INTVAL (XEXP (*dst, 2));
11674 int reg_width = GET_MODE_PRECISION (mode);
11675 if (BITS_BIG_ENDIAN)
11676 offset = reg_width - width - offset;
11678 rtx x, y, z, w;
11679 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11680 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11681 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11682 if (offset)
11683 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11684 else
11685 y = SET_SRC (pat);
11686 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11687 w = gen_rtx_IOR (mode, x, z);
11688 SUBST (SET_DEST (pat), reg);
11689 SUBST (SET_SRC (pat), w);
11691 changed = true;
11694 return changed;
11697 /* Like recog, but we receive the address of a pointer to a new pattern.
11698 We try to match the rtx that the pointer points to.
11699 If that fails, we may try to modify or replace the pattern,
11700 storing the replacement into the same pointer object.
11702 Modifications include deletion or addition of CLOBBERs. If the
11703 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11704 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11705 (and undo if that fails).
11707 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11708 the CLOBBERs are placed.
11710 The value is the final insn code from the pattern ultimately matched,
11711 or -1. */
11713 static int
11714 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11716 rtx pat = *pnewpat;
11717 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11718 if (insn_code_number >= 0 || check_asm_operands (pat))
11719 return insn_code_number;
11721 void *marker = get_undo_marker ();
11722 bool changed = false;
11724 if (GET_CODE (pat) == SET)
11725 changed = change_zero_ext (pat);
11726 else if (GET_CODE (pat) == PARALLEL)
11728 int i;
11729 for (i = 0; i < XVECLEN (pat, 0); i++)
11731 rtx set = XVECEXP (pat, 0, i);
11732 if (GET_CODE (set) == SET)
11733 changed |= change_zero_ext (set);
11737 if (changed)
11739 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11741 if (insn_code_number < 0)
11742 undo_to_marker (marker);
11745 return insn_code_number;
11748 /* Like gen_lowpart_general but for use by combine. In combine it
11749 is not possible to create any new pseudoregs. However, it is
11750 safe to create invalid memory addresses, because combine will
11751 try to recognize them and all they will do is make the combine
11752 attempt fail.
11754 If for some reason this cannot do its job, an rtx
11755 (clobber (const_int 0)) is returned.
11756 An insn containing that will not be recognized. */
11758 static rtx
11759 gen_lowpart_for_combine (machine_mode omode, rtx x)
11761 machine_mode imode = GET_MODE (x);
11762 rtx result;
11764 if (omode == imode)
11765 return x;
11767 /* We can only support MODE being wider than a word if X is a
11768 constant integer or has a mode the same size. */
11769 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11770 && ! (CONST_SCALAR_INT_P (x)
11771 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11772 goto fail;
11774 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11775 won't know what to do. So we will strip off the SUBREG here and
11776 process normally. */
11777 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11779 x = SUBREG_REG (x);
11781 /* For use in case we fall down into the address adjustments
11782 further below, we need to adjust the known mode and size of
11783 x; imode and isize, since we just adjusted x. */
11784 imode = GET_MODE (x);
11786 if (imode == omode)
11787 return x;
11790 result = gen_lowpart_common (omode, x);
11792 if (result)
11793 return result;
11795 if (MEM_P (x))
11797 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11798 address. */
11799 if (MEM_VOLATILE_P (x)
11800 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11801 goto fail;
11803 /* If we want to refer to something bigger than the original memref,
11804 generate a paradoxical subreg instead. That will force a reload
11805 of the original memref X. */
11806 if (paradoxical_subreg_p (omode, imode))
11807 return gen_rtx_SUBREG (omode, x, 0);
11809 poly_int64 offset = byte_lowpart_offset (omode, imode);
11810 return adjust_address_nv (x, omode, offset);
11813 /* If X is a comparison operator, rewrite it in a new mode. This
11814 probably won't match, but may allow further simplifications. */
11815 else if (COMPARISON_P (x))
11816 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11818 /* If we couldn't simplify X any other way, just enclose it in a
11819 SUBREG. Normally, this SUBREG won't match, but some patterns may
11820 include an explicit SUBREG or we may simplify it further in combine. */
11821 else
11823 rtx res;
11825 if (imode == VOIDmode)
11827 imode = int_mode_for_mode (omode).require ();
11828 x = gen_lowpart_common (imode, x);
11829 if (x == NULL)
11830 goto fail;
11832 res = lowpart_subreg (omode, x, imode);
11833 if (res)
11834 return res;
11837 fail:
11838 return gen_rtx_CLOBBER (omode, const0_rtx);
11841 /* Try to simplify a comparison between OP0 and a constant OP1,
11842 where CODE is the comparison code that will be tested, into a
11843 (CODE OP0 const0_rtx) form.
11845 The result is a possibly different comparison code to use.
11846 *POP1 may be updated. */
11848 static enum rtx_code
11849 simplify_compare_const (enum rtx_code code, machine_mode mode,
11850 rtx op0, rtx *pop1)
11852 scalar_int_mode int_mode;
11853 HOST_WIDE_INT const_op = INTVAL (*pop1);
11855 /* Get the constant we are comparing against and turn off all bits
11856 not on in our mode. */
11857 if (mode != VOIDmode)
11858 const_op = trunc_int_for_mode (const_op, mode);
11860 /* If we are comparing against a constant power of two and the value
11861 being compared can only have that single bit nonzero (e.g., it was
11862 `and'ed with that bit), we can replace this with a comparison
11863 with zero. */
11864 if (const_op
11865 && (code == EQ || code == NE || code == GE || code == GEU
11866 || code == LT || code == LTU)
11867 && is_a <scalar_int_mode> (mode, &int_mode)
11868 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11869 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11870 && (nonzero_bits (op0, int_mode)
11871 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11873 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11874 const_op = 0;
11877 /* Similarly, if we are comparing a value known to be either -1 or
11878 0 with -1, change it to the opposite comparison against zero. */
11879 if (const_op == -1
11880 && (code == EQ || code == NE || code == GT || code == LE
11881 || code == GEU || code == LTU)
11882 && is_a <scalar_int_mode> (mode, &int_mode)
11883 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11885 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11886 const_op = 0;
11889 /* Do some canonicalizations based on the comparison code. We prefer
11890 comparisons against zero and then prefer equality comparisons.
11891 If we can reduce the size of a constant, we will do that too. */
11892 switch (code)
11894 case LT:
11895 /* < C is equivalent to <= (C - 1) */
11896 if (const_op > 0)
11898 const_op -= 1;
11899 code = LE;
11900 /* ... fall through to LE case below. */
11901 gcc_fallthrough ();
11903 else
11904 break;
11906 case LE:
11907 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11908 if (const_op < 0)
11910 const_op += 1;
11911 code = LT;
11914 /* If we are doing a <= 0 comparison on a value known to have
11915 a zero sign bit, we can replace this with == 0. */
11916 else if (const_op == 0
11917 && is_a <scalar_int_mode> (mode, &int_mode)
11918 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11919 && (nonzero_bits (op0, int_mode)
11920 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11921 == 0)
11922 code = EQ;
11923 break;
11925 case GE:
11926 /* >= C is equivalent to > (C - 1). */
11927 if (const_op > 0)
11929 const_op -= 1;
11930 code = GT;
11931 /* ... fall through to GT below. */
11932 gcc_fallthrough ();
11934 else
11935 break;
11937 case GT:
11938 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11939 if (const_op < 0)
11941 const_op += 1;
11942 code = GE;
11945 /* If we are doing a > 0 comparison on a value known to have
11946 a zero sign bit, we can replace this with != 0. */
11947 else if (const_op == 0
11948 && is_a <scalar_int_mode> (mode, &int_mode)
11949 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11950 && (nonzero_bits (op0, int_mode)
11951 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11952 == 0)
11953 code = NE;
11954 break;
11956 case LTU:
11957 /* < C is equivalent to <= (C - 1). */
11958 if (const_op > 0)
11960 const_op -= 1;
11961 code = LEU;
11962 /* ... fall through ... */
11963 gcc_fallthrough ();
11965 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11966 else if (is_a <scalar_int_mode> (mode, &int_mode)
11967 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11968 && ((unsigned HOST_WIDE_INT) const_op
11969 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11971 const_op = 0;
11972 code = GE;
11973 break;
11975 else
11976 break;
11978 case LEU:
11979 /* unsigned <= 0 is equivalent to == 0 */
11980 if (const_op == 0)
11981 code = EQ;
11982 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11983 else if (is_a <scalar_int_mode> (mode, &int_mode)
11984 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11985 && ((unsigned HOST_WIDE_INT) const_op
11986 == ((HOST_WIDE_INT_1U
11987 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11989 const_op = 0;
11990 code = GE;
11992 break;
11994 case GEU:
11995 /* >= C is equivalent to > (C - 1). */
11996 if (const_op > 1)
11998 const_op -= 1;
11999 code = GTU;
12000 /* ... fall through ... */
12001 gcc_fallthrough ();
12004 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12005 else if (is_a <scalar_int_mode> (mode, &int_mode)
12006 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12007 && ((unsigned HOST_WIDE_INT) const_op
12008 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12010 const_op = 0;
12011 code = LT;
12012 break;
12014 else
12015 break;
12017 case GTU:
12018 /* unsigned > 0 is equivalent to != 0 */
12019 if (const_op == 0)
12020 code = NE;
12021 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12022 else if (is_a <scalar_int_mode> (mode, &int_mode)
12023 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12024 && ((unsigned HOST_WIDE_INT) const_op
12025 == (HOST_WIDE_INT_1U
12026 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12028 const_op = 0;
12029 code = LT;
12031 break;
12033 default:
12034 break;
12037 *pop1 = GEN_INT (const_op);
12038 return code;
12041 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12042 comparison code that will be tested.
12044 The result is a possibly different comparison code to use. *POP0 and
12045 *POP1 may be updated.
12047 It is possible that we might detect that a comparison is either always
12048 true or always false. However, we do not perform general constant
12049 folding in combine, so this knowledge isn't useful. Such tautologies
12050 should have been detected earlier. Hence we ignore all such cases. */
12052 static enum rtx_code
12053 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12055 rtx op0 = *pop0;
12056 rtx op1 = *pop1;
12057 rtx tem, tem1;
12058 int i;
12059 scalar_int_mode mode, inner_mode, tmode;
12060 opt_scalar_int_mode tmode_iter;
12062 /* Try a few ways of applying the same transformation to both operands. */
12063 while (1)
12065 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12066 so check specially. */
12067 if (!WORD_REGISTER_OPERATIONS
12068 && code != GTU && code != GEU && code != LTU && code != LEU
12069 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12070 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12071 && GET_CODE (XEXP (op1, 0)) == ASHIFT
12072 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12073 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12074 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12075 && (is_a <scalar_int_mode>
12076 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12077 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12078 && CONST_INT_P (XEXP (op0, 1))
12079 && XEXP (op0, 1) == XEXP (op1, 1)
12080 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12081 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12082 && (INTVAL (XEXP (op0, 1))
12083 == (GET_MODE_PRECISION (mode)
12084 - GET_MODE_PRECISION (inner_mode))))
12086 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12087 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12090 /* If both operands are the same constant shift, see if we can ignore the
12091 shift. We can if the shift is a rotate or if the bits shifted out of
12092 this shift are known to be zero for both inputs and if the type of
12093 comparison is compatible with the shift. */
12094 if (GET_CODE (op0) == GET_CODE (op1)
12095 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12096 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12097 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12098 && (code != GT && code != LT && code != GE && code != LE))
12099 || (GET_CODE (op0) == ASHIFTRT
12100 && (code != GTU && code != LTU
12101 && code != GEU && code != LEU)))
12102 && CONST_INT_P (XEXP (op0, 1))
12103 && INTVAL (XEXP (op0, 1)) >= 0
12104 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12105 && XEXP (op0, 1) == XEXP (op1, 1))
12107 machine_mode mode = GET_MODE (op0);
12108 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12109 int shift_count = INTVAL (XEXP (op0, 1));
12111 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12112 mask &= (mask >> shift_count) << shift_count;
12113 else if (GET_CODE (op0) == ASHIFT)
12114 mask = (mask & (mask << shift_count)) >> shift_count;
12116 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12117 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12118 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12119 else
12120 break;
12123 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12124 SUBREGs are of the same mode, and, in both cases, the AND would
12125 be redundant if the comparison was done in the narrower mode,
12126 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12127 and the operand's possibly nonzero bits are 0xffffff01; in that case
12128 if we only care about QImode, we don't need the AND). This case
12129 occurs if the output mode of an scc insn is not SImode and
12130 STORE_FLAG_VALUE == 1 (e.g., the 386).
12132 Similarly, check for a case where the AND's are ZERO_EXTEND
12133 operations from some narrower mode even though a SUBREG is not
12134 present. */
12136 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12137 && CONST_INT_P (XEXP (op0, 1))
12138 && CONST_INT_P (XEXP (op1, 1)))
12140 rtx inner_op0 = XEXP (op0, 0);
12141 rtx inner_op1 = XEXP (op1, 0);
12142 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12143 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12144 int changed = 0;
12146 if (paradoxical_subreg_p (inner_op0)
12147 && GET_CODE (inner_op1) == SUBREG
12148 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12149 && (GET_MODE (SUBREG_REG (inner_op0))
12150 == GET_MODE (SUBREG_REG (inner_op1)))
12151 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12152 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12153 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12154 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12156 op0 = SUBREG_REG (inner_op0);
12157 op1 = SUBREG_REG (inner_op1);
12159 /* The resulting comparison is always unsigned since we masked
12160 off the original sign bit. */
12161 code = unsigned_condition (code);
12163 changed = 1;
12166 else if (c0 == c1)
12167 FOR_EACH_MODE_UNTIL (tmode,
12168 as_a <scalar_int_mode> (GET_MODE (op0)))
12169 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12171 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12172 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12173 code = unsigned_condition (code);
12174 changed = 1;
12175 break;
12178 if (! changed)
12179 break;
12182 /* If both operands are NOT, we can strip off the outer operation
12183 and adjust the comparison code for swapped operands; similarly for
12184 NEG, except that this must be an equality comparison. */
12185 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12186 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12187 && (code == EQ || code == NE)))
12188 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12190 else
12191 break;
12194 /* If the first operand is a constant, swap the operands and adjust the
12195 comparison code appropriately, but don't do this if the second operand
12196 is already a constant integer. */
12197 if (swap_commutative_operands_p (op0, op1))
12199 std::swap (op0, op1);
12200 code = swap_condition (code);
12203 /* We now enter a loop during which we will try to simplify the comparison.
12204 For the most part, we only are concerned with comparisons with zero,
12205 but some things may really be comparisons with zero but not start
12206 out looking that way. */
12208 while (CONST_INT_P (op1))
12210 machine_mode raw_mode = GET_MODE (op0);
12211 scalar_int_mode int_mode;
12212 int equality_comparison_p;
12213 int sign_bit_comparison_p;
12214 int unsigned_comparison_p;
12215 HOST_WIDE_INT const_op;
12217 /* We only want to handle integral modes. This catches VOIDmode,
12218 CCmode, and the floating-point modes. An exception is that we
12219 can handle VOIDmode if OP0 is a COMPARE or a comparison
12220 operation. */
12222 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12223 && ! (raw_mode == VOIDmode
12224 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12225 break;
12227 /* Try to simplify the compare to constant, possibly changing the
12228 comparison op, and/or changing op1 to zero. */
12229 code = simplify_compare_const (code, raw_mode, op0, &op1);
12230 const_op = INTVAL (op1);
12232 /* Compute some predicates to simplify code below. */
12234 equality_comparison_p = (code == EQ || code == NE);
12235 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12236 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12237 || code == GEU);
12239 /* If this is a sign bit comparison and we can do arithmetic in
12240 MODE, say that we will only be needing the sign bit of OP0. */
12241 if (sign_bit_comparison_p
12242 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12243 && HWI_COMPUTABLE_MODE_P (int_mode))
12244 op0 = force_to_mode (op0, int_mode,
12245 HOST_WIDE_INT_1U
12246 << (GET_MODE_PRECISION (int_mode) - 1),
12249 if (COMPARISON_P (op0))
12251 /* We can't do anything if OP0 is a condition code value, rather
12252 than an actual data value. */
12253 if (const_op != 0
12254 || CC0_P (XEXP (op0, 0))
12255 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12256 break;
12258 /* Get the two operands being compared. */
12259 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12260 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12261 else
12262 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12264 /* Check for the cases where we simply want the result of the
12265 earlier test or the opposite of that result. */
12266 if (code == NE || code == EQ
12267 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12268 && (code == LT || code == GE)))
12270 enum rtx_code new_code;
12271 if (code == LT || code == NE)
12272 new_code = GET_CODE (op0);
12273 else
12274 new_code = reversed_comparison_code (op0, NULL);
12276 if (new_code != UNKNOWN)
12278 code = new_code;
12279 op0 = tem;
12280 op1 = tem1;
12281 continue;
12284 break;
12287 if (raw_mode == VOIDmode)
12288 break;
12289 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12291 /* Now try cases based on the opcode of OP0. If none of the cases
12292 does a "continue", we exit this loop immediately after the
12293 switch. */
12295 unsigned int mode_width = GET_MODE_PRECISION (mode);
12296 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12297 switch (GET_CODE (op0))
12299 case ZERO_EXTRACT:
12300 /* If we are extracting a single bit from a variable position in
12301 a constant that has only a single bit set and are comparing it
12302 with zero, we can convert this into an equality comparison
12303 between the position and the location of the single bit. */
12304 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12305 have already reduced the shift count modulo the word size. */
12306 if (!SHIFT_COUNT_TRUNCATED
12307 && CONST_INT_P (XEXP (op0, 0))
12308 && XEXP (op0, 1) == const1_rtx
12309 && equality_comparison_p && const_op == 0
12310 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12312 if (BITS_BIG_ENDIAN)
12313 i = BITS_PER_WORD - 1 - i;
12315 op0 = XEXP (op0, 2);
12316 op1 = GEN_INT (i);
12317 const_op = i;
12319 /* Result is nonzero iff shift count is equal to I. */
12320 code = reverse_condition (code);
12321 continue;
12324 /* fall through */
12326 case SIGN_EXTRACT:
12327 tem = expand_compound_operation (op0);
12328 if (tem != op0)
12330 op0 = tem;
12331 continue;
12333 break;
12335 case NOT:
12336 /* If testing for equality, we can take the NOT of the constant. */
12337 if (equality_comparison_p
12338 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12340 op0 = XEXP (op0, 0);
12341 op1 = tem;
12342 continue;
12345 /* If just looking at the sign bit, reverse the sense of the
12346 comparison. */
12347 if (sign_bit_comparison_p)
12349 op0 = XEXP (op0, 0);
12350 code = (code == GE ? LT : GE);
12351 continue;
12353 break;
12355 case NEG:
12356 /* If testing for equality, we can take the NEG of the constant. */
12357 if (equality_comparison_p
12358 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12360 op0 = XEXP (op0, 0);
12361 op1 = tem;
12362 continue;
12365 /* The remaining cases only apply to comparisons with zero. */
12366 if (const_op != 0)
12367 break;
12369 /* When X is ABS or is known positive,
12370 (neg X) is < 0 if and only if X != 0. */
12372 if (sign_bit_comparison_p
12373 && (GET_CODE (XEXP (op0, 0)) == ABS
12374 || (mode_width <= HOST_BITS_PER_WIDE_INT
12375 && (nonzero_bits (XEXP (op0, 0), mode)
12376 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12377 == 0)))
12379 op0 = XEXP (op0, 0);
12380 code = (code == LT ? NE : EQ);
12381 continue;
12384 /* If we have NEG of something whose two high-order bits are the
12385 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12386 if (num_sign_bit_copies (op0, mode) >= 2)
12388 op0 = XEXP (op0, 0);
12389 code = swap_condition (code);
12390 continue;
12392 break;
12394 case ROTATE:
12395 /* If we are testing equality and our count is a constant, we
12396 can perform the inverse operation on our RHS. */
12397 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12398 && (tem = simplify_binary_operation (ROTATERT, mode,
12399 op1, XEXP (op0, 1))) != 0)
12401 op0 = XEXP (op0, 0);
12402 op1 = tem;
12403 continue;
12406 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12407 a particular bit. Convert it to an AND of a constant of that
12408 bit. This will be converted into a ZERO_EXTRACT. */
12409 if (const_op == 0 && sign_bit_comparison_p
12410 && CONST_INT_P (XEXP (op0, 1))
12411 && mode_width <= HOST_BITS_PER_WIDE_INT)
12413 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12414 (HOST_WIDE_INT_1U
12415 << (mode_width - 1
12416 - INTVAL (XEXP (op0, 1)))));
12417 code = (code == LT ? NE : EQ);
12418 continue;
12421 /* Fall through. */
12423 case ABS:
12424 /* ABS is ignorable inside an equality comparison with zero. */
12425 if (const_op == 0 && equality_comparison_p)
12427 op0 = XEXP (op0, 0);
12428 continue;
12430 break;
12432 case SIGN_EXTEND:
12433 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12434 (compare FOO CONST) if CONST fits in FOO's mode and we
12435 are either testing inequality or have an unsigned
12436 comparison with ZERO_EXTEND or a signed comparison with
12437 SIGN_EXTEND. But don't do it if we don't have a compare
12438 insn of the given mode, since we'd have to revert it
12439 later on, and then we wouldn't know whether to sign- or
12440 zero-extend. */
12441 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12442 && ! unsigned_comparison_p
12443 && HWI_COMPUTABLE_MODE_P (mode)
12444 && trunc_int_for_mode (const_op, mode) == const_op
12445 && have_insn_for (COMPARE, mode))
12447 op0 = XEXP (op0, 0);
12448 continue;
12450 break;
12452 case SUBREG:
12453 /* Check for the case where we are comparing A - C1 with C2, that is
12455 (subreg:MODE (plus (A) (-C1))) op (C2)
12457 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12458 comparison in the wider mode. One of the following two conditions
12459 must be true in order for this to be valid:
12461 1. The mode extension results in the same bit pattern being added
12462 on both sides and the comparison is equality or unsigned. As
12463 C2 has been truncated to fit in MODE, the pattern can only be
12464 all 0s or all 1s.
12466 2. The mode extension results in the sign bit being copied on
12467 each side.
12469 The difficulty here is that we have predicates for A but not for
12470 (A - C1) so we need to check that C1 is within proper bounds so
12471 as to perturbate A as little as possible. */
12473 if (mode_width <= HOST_BITS_PER_WIDE_INT
12474 && subreg_lowpart_p (op0)
12475 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12476 &inner_mode)
12477 && GET_MODE_PRECISION (inner_mode) > mode_width
12478 && GET_CODE (SUBREG_REG (op0)) == PLUS
12479 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12481 rtx a = XEXP (SUBREG_REG (op0), 0);
12482 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12484 if ((c1 > 0
12485 && (unsigned HOST_WIDE_INT) c1
12486 < HOST_WIDE_INT_1U << (mode_width - 1)
12487 && (equality_comparison_p || unsigned_comparison_p)
12488 /* (A - C1) zero-extends if it is positive and sign-extends
12489 if it is negative, C2 both zero- and sign-extends. */
12490 && (((nonzero_bits (a, inner_mode)
12491 & ~GET_MODE_MASK (mode)) == 0
12492 && const_op >= 0)
12493 /* (A - C1) sign-extends if it is positive and 1-extends
12494 if it is negative, C2 both sign- and 1-extends. */
12495 || (num_sign_bit_copies (a, inner_mode)
12496 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12497 - mode_width)
12498 && const_op < 0)))
12499 || ((unsigned HOST_WIDE_INT) c1
12500 < HOST_WIDE_INT_1U << (mode_width - 2)
12501 /* (A - C1) always sign-extends, like C2. */
12502 && num_sign_bit_copies (a, inner_mode)
12503 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12504 - (mode_width - 1))))
12506 op0 = SUBREG_REG (op0);
12507 continue;
12511 /* If the inner mode is narrower and we are extracting the low part,
12512 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12513 if (paradoxical_subreg_p (op0))
12515 else if (subreg_lowpart_p (op0)
12516 && GET_MODE_CLASS (mode) == MODE_INT
12517 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12518 && (code == NE || code == EQ)
12519 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12520 && !paradoxical_subreg_p (op0)
12521 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12522 & ~GET_MODE_MASK (mode)) == 0)
12524 /* Remove outer subregs that don't do anything. */
12525 tem = gen_lowpart (inner_mode, op1);
12527 if ((nonzero_bits (tem, inner_mode)
12528 & ~GET_MODE_MASK (mode)) == 0)
12530 op0 = SUBREG_REG (op0);
12531 op1 = tem;
12532 continue;
12534 break;
12536 else
12537 break;
12539 /* FALLTHROUGH */
12541 case ZERO_EXTEND:
12542 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12543 && (unsigned_comparison_p || equality_comparison_p)
12544 && HWI_COMPUTABLE_MODE_P (mode)
12545 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12546 && const_op >= 0
12547 && have_insn_for (COMPARE, mode))
12549 op0 = XEXP (op0, 0);
12550 continue;
12552 break;
12554 case PLUS:
12555 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12556 this for equality comparisons due to pathological cases involving
12557 overflows. */
12558 if (equality_comparison_p
12559 && (tem = simplify_binary_operation (MINUS, mode,
12560 op1, XEXP (op0, 1))) != 0)
12562 op0 = XEXP (op0, 0);
12563 op1 = tem;
12564 continue;
12567 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12568 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12569 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12571 op0 = XEXP (XEXP (op0, 0), 0);
12572 code = (code == LT ? EQ : NE);
12573 continue;
12575 break;
12577 case MINUS:
12578 /* We used to optimize signed comparisons against zero, but that
12579 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12580 arrive here as equality comparisons, or (GEU, LTU) are
12581 optimized away. No need to special-case them. */
12583 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12584 (eq B (minus A C)), whichever simplifies. We can only do
12585 this for equality comparisons due to pathological cases involving
12586 overflows. */
12587 if (equality_comparison_p
12588 && (tem = simplify_binary_operation (PLUS, mode,
12589 XEXP (op0, 1), op1)) != 0)
12591 op0 = XEXP (op0, 0);
12592 op1 = tem;
12593 continue;
12596 if (equality_comparison_p
12597 && (tem = simplify_binary_operation (MINUS, mode,
12598 XEXP (op0, 0), op1)) != 0)
12600 op0 = XEXP (op0, 1);
12601 op1 = tem;
12602 continue;
12605 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12606 of bits in X minus 1, is one iff X > 0. */
12607 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12608 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12609 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12610 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12612 op0 = XEXP (op0, 1);
12613 code = (code == GE ? LE : GT);
12614 continue;
12616 break;
12618 case XOR:
12619 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12620 if C is zero or B is a constant. */
12621 if (equality_comparison_p
12622 && (tem = simplify_binary_operation (XOR, mode,
12623 XEXP (op0, 1), op1)) != 0)
12625 op0 = XEXP (op0, 0);
12626 op1 = tem;
12627 continue;
12629 break;
12632 case IOR:
12633 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12634 iff X <= 0. */
12635 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12636 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12637 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12639 op0 = XEXP (op0, 1);
12640 code = (code == GE ? GT : LE);
12641 continue;
12643 break;
12645 case AND:
12646 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12647 will be converted to a ZERO_EXTRACT later. */
12648 if (const_op == 0 && equality_comparison_p
12649 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12650 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12652 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12653 XEXP (XEXP (op0, 0), 1));
12654 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12655 continue;
12658 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12659 zero and X is a comparison and C1 and C2 describe only bits set
12660 in STORE_FLAG_VALUE, we can compare with X. */
12661 if (const_op == 0 && equality_comparison_p
12662 && mode_width <= HOST_BITS_PER_WIDE_INT
12663 && CONST_INT_P (XEXP (op0, 1))
12664 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12665 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12666 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12667 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12669 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12670 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12671 if ((~STORE_FLAG_VALUE & mask) == 0
12672 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12673 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12674 && COMPARISON_P (tem))))
12676 op0 = XEXP (XEXP (op0, 0), 0);
12677 continue;
12681 /* If we are doing an equality comparison of an AND of a bit equal
12682 to the sign bit, replace this with a LT or GE comparison of
12683 the underlying value. */
12684 if (equality_comparison_p
12685 && const_op == 0
12686 && CONST_INT_P (XEXP (op0, 1))
12687 && mode_width <= HOST_BITS_PER_WIDE_INT
12688 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12689 == HOST_WIDE_INT_1U << (mode_width - 1)))
12691 op0 = XEXP (op0, 0);
12692 code = (code == EQ ? GE : LT);
12693 continue;
12696 /* If this AND operation is really a ZERO_EXTEND from a narrower
12697 mode, the constant fits within that mode, and this is either an
12698 equality or unsigned comparison, try to do this comparison in
12699 the narrower mode.
12701 Note that in:
12703 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12704 -> (ne:DI (reg:SI 4) (const_int 0))
12706 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12707 known to hold a value of the required mode the
12708 transformation is invalid. */
12709 if ((equality_comparison_p || unsigned_comparison_p)
12710 && CONST_INT_P (XEXP (op0, 1))
12711 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12712 & GET_MODE_MASK (mode))
12713 + 1)) >= 0
12714 && const_op >> i == 0
12715 && int_mode_for_size (i, 1).exists (&tmode))
12717 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12718 continue;
12721 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12722 fits in both M1 and M2 and the SUBREG is either paradoxical
12723 or represents the low part, permute the SUBREG and the AND
12724 and try again. */
12725 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12726 && CONST_INT_P (XEXP (op0, 1)))
12728 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12729 /* Require an integral mode, to avoid creating something like
12730 (AND:SF ...). */
12731 if ((is_a <scalar_int_mode>
12732 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12733 /* It is unsafe to commute the AND into the SUBREG if the
12734 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12735 not defined. As originally written the upper bits
12736 have a defined value due to the AND operation.
12737 However, if we commute the AND inside the SUBREG then
12738 they no longer have defined values and the meaning of
12739 the code has been changed.
12740 Also C1 should not change value in the smaller mode,
12741 see PR67028 (a positive C1 can become negative in the
12742 smaller mode, so that the AND does no longer mask the
12743 upper bits). */
12744 && ((WORD_REGISTER_OPERATIONS
12745 && mode_width > GET_MODE_PRECISION (tmode)
12746 && mode_width <= BITS_PER_WORD
12747 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12748 || (mode_width <= GET_MODE_PRECISION (tmode)
12749 && subreg_lowpart_p (XEXP (op0, 0))))
12750 && mode_width <= HOST_BITS_PER_WIDE_INT
12751 && HWI_COMPUTABLE_MODE_P (tmode)
12752 && (c1 & ~mask) == 0
12753 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12754 && c1 != mask
12755 && c1 != GET_MODE_MASK (tmode))
12757 op0 = simplify_gen_binary (AND, tmode,
12758 SUBREG_REG (XEXP (op0, 0)),
12759 gen_int_mode (c1, tmode));
12760 op0 = gen_lowpart (mode, op0);
12761 continue;
12765 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12766 if (const_op == 0 && equality_comparison_p
12767 && XEXP (op0, 1) == const1_rtx
12768 && GET_CODE (XEXP (op0, 0)) == NOT)
12770 op0 = simplify_and_const_int (NULL_RTX, mode,
12771 XEXP (XEXP (op0, 0), 0), 1);
12772 code = (code == NE ? EQ : NE);
12773 continue;
12776 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12777 (eq (and (lshiftrt X) 1) 0).
12778 Also handle the case where (not X) is expressed using xor. */
12779 if (const_op == 0 && equality_comparison_p
12780 && XEXP (op0, 1) == const1_rtx
12781 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12783 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12784 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12786 if (GET_CODE (shift_op) == NOT
12787 || (GET_CODE (shift_op) == XOR
12788 && CONST_INT_P (XEXP (shift_op, 1))
12789 && CONST_INT_P (shift_count)
12790 && HWI_COMPUTABLE_MODE_P (mode)
12791 && (UINTVAL (XEXP (shift_op, 1))
12792 == HOST_WIDE_INT_1U
12793 << INTVAL (shift_count))))
12796 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12797 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12798 code = (code == NE ? EQ : NE);
12799 continue;
12802 break;
12804 case ASHIFT:
12805 /* If we have (compare (ashift FOO N) (const_int C)) and
12806 the high order N bits of FOO (N+1 if an inequality comparison)
12807 are known to be zero, we can do this by comparing FOO with C
12808 shifted right N bits so long as the low-order N bits of C are
12809 zero. */
12810 if (CONST_INT_P (XEXP (op0, 1))
12811 && INTVAL (XEXP (op0, 1)) >= 0
12812 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12813 < HOST_BITS_PER_WIDE_INT)
12814 && (((unsigned HOST_WIDE_INT) const_op
12815 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12816 - 1)) == 0)
12817 && mode_width <= HOST_BITS_PER_WIDE_INT
12818 && (nonzero_bits (XEXP (op0, 0), mode)
12819 & ~(mask >> (INTVAL (XEXP (op0, 1))
12820 + ! equality_comparison_p))) == 0)
12822 /* We must perform a logical shift, not an arithmetic one,
12823 as we want the top N bits of C to be zero. */
12824 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12826 temp >>= INTVAL (XEXP (op0, 1));
12827 op1 = gen_int_mode (temp, mode);
12828 op0 = XEXP (op0, 0);
12829 continue;
12832 /* If we are doing a sign bit comparison, it means we are testing
12833 a particular bit. Convert it to the appropriate AND. */
12834 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12835 && mode_width <= HOST_BITS_PER_WIDE_INT)
12837 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12838 (HOST_WIDE_INT_1U
12839 << (mode_width - 1
12840 - INTVAL (XEXP (op0, 1)))));
12841 code = (code == LT ? NE : EQ);
12842 continue;
12845 /* If this an equality comparison with zero and we are shifting
12846 the low bit to the sign bit, we can convert this to an AND of the
12847 low-order bit. */
12848 if (const_op == 0 && equality_comparison_p
12849 && CONST_INT_P (XEXP (op0, 1))
12850 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12852 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12853 continue;
12855 break;
12857 case ASHIFTRT:
12858 /* If this is an equality comparison with zero, we can do this
12859 as a logical shift, which might be much simpler. */
12860 if (equality_comparison_p && const_op == 0
12861 && CONST_INT_P (XEXP (op0, 1)))
12863 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12864 XEXP (op0, 0),
12865 INTVAL (XEXP (op0, 1)));
12866 continue;
12869 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12870 do the comparison in a narrower mode. */
12871 if (! unsigned_comparison_p
12872 && CONST_INT_P (XEXP (op0, 1))
12873 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12874 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12875 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12876 .exists (&tmode))
12877 && (((unsigned HOST_WIDE_INT) const_op
12878 + (GET_MODE_MASK (tmode) >> 1) + 1)
12879 <= GET_MODE_MASK (tmode)))
12881 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12882 continue;
12885 /* Likewise if OP0 is a PLUS of a sign extension with a
12886 constant, which is usually represented with the PLUS
12887 between the shifts. */
12888 if (! unsigned_comparison_p
12889 && CONST_INT_P (XEXP (op0, 1))
12890 && GET_CODE (XEXP (op0, 0)) == PLUS
12891 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12892 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12893 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12894 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12895 .exists (&tmode))
12896 && (((unsigned HOST_WIDE_INT) const_op
12897 + (GET_MODE_MASK (tmode) >> 1) + 1)
12898 <= GET_MODE_MASK (tmode)))
12900 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12901 rtx add_const = XEXP (XEXP (op0, 0), 1);
12902 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12903 add_const, XEXP (op0, 1));
12905 op0 = simplify_gen_binary (PLUS, tmode,
12906 gen_lowpart (tmode, inner),
12907 new_const);
12908 continue;
12911 /* FALLTHROUGH */
12912 case LSHIFTRT:
12913 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12914 the low order N bits of FOO are known to be zero, we can do this
12915 by comparing FOO with C shifted left N bits so long as no
12916 overflow occurs. Even if the low order N bits of FOO aren't known
12917 to be zero, if the comparison is >= or < we can use the same
12918 optimization and for > or <= by setting all the low
12919 order N bits in the comparison constant. */
12920 if (CONST_INT_P (XEXP (op0, 1))
12921 && INTVAL (XEXP (op0, 1)) > 0
12922 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12923 && mode_width <= HOST_BITS_PER_WIDE_INT
12924 && (((unsigned HOST_WIDE_INT) const_op
12925 + (GET_CODE (op0) != LSHIFTRT
12926 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12927 + 1)
12928 : 0))
12929 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12931 unsigned HOST_WIDE_INT low_bits
12932 = (nonzero_bits (XEXP (op0, 0), mode)
12933 & ((HOST_WIDE_INT_1U
12934 << INTVAL (XEXP (op0, 1))) - 1));
12935 if (low_bits == 0 || !equality_comparison_p)
12937 /* If the shift was logical, then we must make the condition
12938 unsigned. */
12939 if (GET_CODE (op0) == LSHIFTRT)
12940 code = unsigned_condition (code);
12942 const_op = (unsigned HOST_WIDE_INT) const_op
12943 << INTVAL (XEXP (op0, 1));
12944 if (low_bits != 0
12945 && (code == GT || code == GTU
12946 || code == LE || code == LEU))
12947 const_op
12948 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12949 op1 = GEN_INT (const_op);
12950 op0 = XEXP (op0, 0);
12951 continue;
12955 /* If we are using this shift to extract just the sign bit, we
12956 can replace this with an LT or GE comparison. */
12957 if (const_op == 0
12958 && (equality_comparison_p || sign_bit_comparison_p)
12959 && CONST_INT_P (XEXP (op0, 1))
12960 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12962 op0 = XEXP (op0, 0);
12963 code = (code == NE || code == GT ? LT : GE);
12964 continue;
12966 break;
12968 default:
12969 break;
12972 break;
12975 /* Now make any compound operations involved in this comparison. Then,
12976 check for an outmost SUBREG on OP0 that is not doing anything or is
12977 paradoxical. The latter transformation must only be performed when
12978 it is known that the "extra" bits will be the same in op0 and op1 or
12979 that they don't matter. There are three cases to consider:
12981 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12982 care bits and we can assume they have any convenient value. So
12983 making the transformation is safe.
12985 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12986 In this case the upper bits of op0 are undefined. We should not make
12987 the simplification in that case as we do not know the contents of
12988 those bits.
12990 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12991 In that case we know those bits are zeros or ones. We must also be
12992 sure that they are the same as the upper bits of op1.
12994 We can never remove a SUBREG for a non-equality comparison because
12995 the sign bit is in a different place in the underlying object. */
12997 rtx_code op0_mco_code = SET;
12998 if (op1 == const0_rtx)
12999 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13001 op0 = make_compound_operation (op0, op0_mco_code);
13002 op1 = make_compound_operation (op1, SET);
13004 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13005 && is_int_mode (GET_MODE (op0), &mode)
13006 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13007 && (code == NE || code == EQ))
13009 if (paradoxical_subreg_p (op0))
13011 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13012 implemented. */
13013 if (REG_P (SUBREG_REG (op0)))
13015 op0 = SUBREG_REG (op0);
13016 op1 = gen_lowpart (inner_mode, op1);
13019 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13020 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13021 & ~GET_MODE_MASK (mode)) == 0)
13023 tem = gen_lowpart (inner_mode, op1);
13025 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13026 op0 = SUBREG_REG (op0), op1 = tem;
13030 /* We now do the opposite procedure: Some machines don't have compare
13031 insns in all modes. If OP0's mode is an integer mode smaller than a
13032 word and we can't do a compare in that mode, see if there is a larger
13033 mode for which we can do the compare. There are a number of cases in
13034 which we can use the wider mode. */
13036 if (is_int_mode (GET_MODE (op0), &mode)
13037 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13038 && ! have_insn_for (COMPARE, mode))
13039 FOR_EACH_WIDER_MODE (tmode_iter, mode)
13041 tmode = tmode_iter.require ();
13042 if (!HWI_COMPUTABLE_MODE_P (tmode))
13043 break;
13044 if (have_insn_for (COMPARE, tmode))
13046 int zero_extended;
13048 /* If this is a test for negative, we can make an explicit
13049 test of the sign bit. Test this first so we can use
13050 a paradoxical subreg to extend OP0. */
13052 if (op1 == const0_rtx && (code == LT || code == GE)
13053 && HWI_COMPUTABLE_MODE_P (mode))
13055 unsigned HOST_WIDE_INT sign
13056 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13057 op0 = simplify_gen_binary (AND, tmode,
13058 gen_lowpart (tmode, op0),
13059 gen_int_mode (sign, tmode));
13060 code = (code == LT) ? NE : EQ;
13061 break;
13064 /* If the only nonzero bits in OP0 and OP1 are those in the
13065 narrower mode and this is an equality or unsigned comparison,
13066 we can use the wider mode. Similarly for sign-extended
13067 values, in which case it is true for all comparisons. */
13068 zero_extended = ((code == EQ || code == NE
13069 || code == GEU || code == GTU
13070 || code == LEU || code == LTU)
13071 && (nonzero_bits (op0, tmode)
13072 & ~GET_MODE_MASK (mode)) == 0
13073 && ((CONST_INT_P (op1)
13074 || (nonzero_bits (op1, tmode)
13075 & ~GET_MODE_MASK (mode)) == 0)));
13077 if (zero_extended
13078 || ((num_sign_bit_copies (op0, tmode)
13079 > (unsigned int) (GET_MODE_PRECISION (tmode)
13080 - GET_MODE_PRECISION (mode)))
13081 && (num_sign_bit_copies (op1, tmode)
13082 > (unsigned int) (GET_MODE_PRECISION (tmode)
13083 - GET_MODE_PRECISION (mode)))))
13085 /* If OP0 is an AND and we don't have an AND in MODE either,
13086 make a new AND in the proper mode. */
13087 if (GET_CODE (op0) == AND
13088 && !have_insn_for (AND, mode))
13089 op0 = simplify_gen_binary (AND, tmode,
13090 gen_lowpart (tmode,
13091 XEXP (op0, 0)),
13092 gen_lowpart (tmode,
13093 XEXP (op0, 1)));
13094 else
13096 if (zero_extended)
13098 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13099 op0, mode);
13100 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13101 op1, mode);
13103 else
13105 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13106 op0, mode);
13107 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13108 op1, mode);
13110 break;
13116 /* We may have changed the comparison operands. Re-canonicalize. */
13117 if (swap_commutative_operands_p (op0, op1))
13119 std::swap (op0, op1);
13120 code = swap_condition (code);
13123 /* If this machine only supports a subset of valid comparisons, see if we
13124 can convert an unsupported one into a supported one. */
13125 target_canonicalize_comparison (&code, &op0, &op1, 0);
13127 *pop0 = op0;
13128 *pop1 = op1;
13130 return code;
13133 /* Utility function for record_value_for_reg. Count number of
13134 rtxs in X. */
13135 static int
13136 count_rtxs (rtx x)
13138 enum rtx_code code = GET_CODE (x);
13139 const char *fmt;
13140 int i, j, ret = 1;
13142 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13143 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13145 rtx x0 = XEXP (x, 0);
13146 rtx x1 = XEXP (x, 1);
13148 if (x0 == x1)
13149 return 1 + 2 * count_rtxs (x0);
13151 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13152 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13153 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13154 return 2 + 2 * count_rtxs (x0)
13155 + count_rtxs (x == XEXP (x1, 0)
13156 ? XEXP (x1, 1) : XEXP (x1, 0));
13158 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13159 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13160 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13161 return 2 + 2 * count_rtxs (x1)
13162 + count_rtxs (x == XEXP (x0, 0)
13163 ? XEXP (x0, 1) : XEXP (x0, 0));
13166 fmt = GET_RTX_FORMAT (code);
13167 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13168 if (fmt[i] == 'e')
13169 ret += count_rtxs (XEXP (x, i));
13170 else if (fmt[i] == 'E')
13171 for (j = 0; j < XVECLEN (x, i); j++)
13172 ret += count_rtxs (XVECEXP (x, i, j));
13174 return ret;
13177 /* Utility function for following routine. Called when X is part of a value
13178 being stored into last_set_value. Sets last_set_table_tick
13179 for each register mentioned. Similar to mention_regs in cse.c */
13181 static void
13182 update_table_tick (rtx x)
13184 enum rtx_code code = GET_CODE (x);
13185 const char *fmt = GET_RTX_FORMAT (code);
13186 int i, j;
13188 if (code == REG)
13190 unsigned int regno = REGNO (x);
13191 unsigned int endregno = END_REGNO (x);
13192 unsigned int r;
13194 for (r = regno; r < endregno; r++)
13196 reg_stat_type *rsp = &reg_stat[r];
13197 rsp->last_set_table_tick = label_tick;
13200 return;
13203 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13204 if (fmt[i] == 'e')
13206 /* Check for identical subexpressions. If x contains
13207 identical subexpression we only have to traverse one of
13208 them. */
13209 if (i == 0 && ARITHMETIC_P (x))
13211 /* Note that at this point x1 has already been
13212 processed. */
13213 rtx x0 = XEXP (x, 0);
13214 rtx x1 = XEXP (x, 1);
13216 /* If x0 and x1 are identical then there is no need to
13217 process x0. */
13218 if (x0 == x1)
13219 break;
13221 /* If x0 is identical to a subexpression of x1 then while
13222 processing x1, x0 has already been processed. Thus we
13223 are done with x. */
13224 if (ARITHMETIC_P (x1)
13225 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13226 break;
13228 /* If x1 is identical to a subexpression of x0 then we
13229 still have to process the rest of x0. */
13230 if (ARITHMETIC_P (x0)
13231 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13233 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13234 break;
13238 update_table_tick (XEXP (x, i));
13240 else if (fmt[i] == 'E')
13241 for (j = 0; j < XVECLEN (x, i); j++)
13242 update_table_tick (XVECEXP (x, i, j));
13245 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13246 are saying that the register is clobbered and we no longer know its
13247 value. If INSN is zero, don't update reg_stat[].last_set; this is
13248 only permitted with VALUE also zero and is used to invalidate the
13249 register. */
13251 static void
13252 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13254 unsigned int regno = REGNO (reg);
13255 unsigned int endregno = END_REGNO (reg);
13256 unsigned int i;
13257 reg_stat_type *rsp;
13259 /* If VALUE contains REG and we have a previous value for REG, substitute
13260 the previous value. */
13261 if (value && insn && reg_overlap_mentioned_p (reg, value))
13263 rtx tem;
13265 /* Set things up so get_last_value is allowed to see anything set up to
13266 our insn. */
13267 subst_low_luid = DF_INSN_LUID (insn);
13268 tem = get_last_value (reg);
13270 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13271 it isn't going to be useful and will take a lot of time to process,
13272 so just use the CLOBBER. */
13274 if (tem)
13276 if (ARITHMETIC_P (tem)
13277 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13278 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13279 tem = XEXP (tem, 0);
13280 else if (count_occurrences (value, reg, 1) >= 2)
13282 /* If there are two or more occurrences of REG in VALUE,
13283 prevent the value from growing too much. */
13284 if (count_rtxs (tem) > param_max_last_value_rtl)
13285 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13288 value = replace_rtx (copy_rtx (value), reg, tem);
13292 /* For each register modified, show we don't know its value, that
13293 we don't know about its bitwise content, that its value has been
13294 updated, and that we don't know the location of the death of the
13295 register. */
13296 for (i = regno; i < endregno; i++)
13298 rsp = &reg_stat[i];
13300 if (insn)
13301 rsp->last_set = insn;
13303 rsp->last_set_value = 0;
13304 rsp->last_set_mode = VOIDmode;
13305 rsp->last_set_nonzero_bits = 0;
13306 rsp->last_set_sign_bit_copies = 0;
13307 rsp->last_death = 0;
13308 rsp->truncated_to_mode = VOIDmode;
13311 /* Mark registers that are being referenced in this value. */
13312 if (value)
13313 update_table_tick (value);
13315 /* Now update the status of each register being set.
13316 If someone is using this register in this block, set this register
13317 to invalid since we will get confused between the two lives in this
13318 basic block. This makes using this register always invalid. In cse, we
13319 scan the table to invalidate all entries using this register, but this
13320 is too much work for us. */
13322 for (i = regno; i < endregno; i++)
13324 rsp = &reg_stat[i];
13325 rsp->last_set_label = label_tick;
13326 if (!insn
13327 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13328 rsp->last_set_invalid = 1;
13329 else
13330 rsp->last_set_invalid = 0;
13333 /* The value being assigned might refer to X (like in "x++;"). In that
13334 case, we must replace it with (clobber (const_int 0)) to prevent
13335 infinite loops. */
13336 rsp = &reg_stat[regno];
13337 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13339 value = copy_rtx (value);
13340 if (!get_last_value_validate (&value, insn, label_tick, 1))
13341 value = 0;
13344 /* For the main register being modified, update the value, the mode, the
13345 nonzero bits, and the number of sign bit copies. */
13347 rsp->last_set_value = value;
13349 if (value)
13351 machine_mode mode = GET_MODE (reg);
13352 subst_low_luid = DF_INSN_LUID (insn);
13353 rsp->last_set_mode = mode;
13354 if (GET_MODE_CLASS (mode) == MODE_INT
13355 && HWI_COMPUTABLE_MODE_P (mode))
13356 mode = nonzero_bits_mode;
13357 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13358 rsp->last_set_sign_bit_copies
13359 = num_sign_bit_copies (value, GET_MODE (reg));
13363 /* Called via note_stores from record_dead_and_set_regs to handle one
13364 SET or CLOBBER in an insn. DATA is the instruction in which the
13365 set is occurring. */
13367 static void
13368 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13370 rtx_insn *record_dead_insn = (rtx_insn *) data;
13372 if (GET_CODE (dest) == SUBREG)
13373 dest = SUBREG_REG (dest);
13375 if (!record_dead_insn)
13377 if (REG_P (dest))
13378 record_value_for_reg (dest, NULL, NULL_RTX);
13379 return;
13382 if (REG_P (dest))
13384 /* If we are setting the whole register, we know its value. Otherwise
13385 show that we don't know the value. We can handle a SUBREG if it's
13386 the low part, but we must be careful with paradoxical SUBREGs on
13387 RISC architectures because we cannot strip e.g. an extension around
13388 a load and record the naked load since the RTL middle-end considers
13389 that the upper bits are defined according to LOAD_EXTEND_OP. */
13390 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13391 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13392 else if (GET_CODE (setter) == SET
13393 && GET_CODE (SET_DEST (setter)) == SUBREG
13394 && SUBREG_REG (SET_DEST (setter)) == dest
13395 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13396 BITS_PER_WORD)
13397 && subreg_lowpart_p (SET_DEST (setter)))
13398 record_value_for_reg (dest, record_dead_insn,
13399 WORD_REGISTER_OPERATIONS
13400 && word_register_operation_p (SET_SRC (setter))
13401 && paradoxical_subreg_p (SET_DEST (setter))
13402 ? SET_SRC (setter)
13403 : gen_lowpart (GET_MODE (dest),
13404 SET_SRC (setter)));
13405 else
13406 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13408 else if (MEM_P (dest)
13409 /* Ignore pushes, they clobber nothing. */
13410 && ! push_operand (dest, GET_MODE (dest)))
13411 mem_last_set = DF_INSN_LUID (record_dead_insn);
13414 /* Update the records of when each REG was most recently set or killed
13415 for the things done by INSN. This is the last thing done in processing
13416 INSN in the combiner loop.
13418 We update reg_stat[], in particular fields last_set, last_set_value,
13419 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13420 last_death, and also the similar information mem_last_set (which insn
13421 most recently modified memory) and last_call_luid (which insn was the
13422 most recent subroutine call). */
13424 static void
13425 record_dead_and_set_regs (rtx_insn *insn)
13427 rtx link;
13428 unsigned int i;
13430 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13432 if (REG_NOTE_KIND (link) == REG_DEAD
13433 && REG_P (XEXP (link, 0)))
13435 unsigned int regno = REGNO (XEXP (link, 0));
13436 unsigned int endregno = END_REGNO (XEXP (link, 0));
13438 for (i = regno; i < endregno; i++)
13440 reg_stat_type *rsp;
13442 rsp = &reg_stat[i];
13443 rsp->last_death = insn;
13446 else if (REG_NOTE_KIND (link) == REG_INC)
13447 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13450 if (CALL_P (insn))
13452 HARD_REG_SET callee_clobbers
13453 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
13454 hard_reg_set_iterator hrsi;
13455 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi)
13457 reg_stat_type *rsp;
13459 /* ??? We could try to preserve some information from the last
13460 set of register I if the call doesn't actually clobber
13461 (reg:last_set_mode I), which might be true for ABIs with
13462 partial clobbers. However, it would be difficult to
13463 update last_set_nonzero_bits and last_sign_bit_copies
13464 to account for the part of I that actually was clobbered.
13465 It wouldn't help much anyway, since we rarely see this
13466 situation before RA. */
13467 rsp = &reg_stat[i];
13468 rsp->last_set_invalid = 1;
13469 rsp->last_set = insn;
13470 rsp->last_set_value = 0;
13471 rsp->last_set_mode = VOIDmode;
13472 rsp->last_set_nonzero_bits = 0;
13473 rsp->last_set_sign_bit_copies = 0;
13474 rsp->last_death = 0;
13475 rsp->truncated_to_mode = VOIDmode;
13478 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13480 /* We can't combine into a call pattern. Remember, though, that
13481 the return value register is set at this LUID. We could
13482 still replace a register with the return value from the
13483 wrong subroutine call! */
13484 note_stores (insn, record_dead_and_set_regs_1, NULL_RTX);
13486 else
13487 note_stores (insn, record_dead_and_set_regs_1, insn);
13490 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13491 register present in the SUBREG, so for each such SUBREG go back and
13492 adjust nonzero and sign bit information of the registers that are
13493 known to have some zero/sign bits set.
13495 This is needed because when combine blows the SUBREGs away, the
13496 information on zero/sign bits is lost and further combines can be
13497 missed because of that. */
13499 static void
13500 record_promoted_value (rtx_insn *insn, rtx subreg)
13502 struct insn_link *links;
13503 rtx set;
13504 unsigned int regno = REGNO (SUBREG_REG (subreg));
13505 machine_mode mode = GET_MODE (subreg);
13507 if (!HWI_COMPUTABLE_MODE_P (mode))
13508 return;
13510 for (links = LOG_LINKS (insn); links;)
13512 reg_stat_type *rsp;
13514 insn = links->insn;
13515 set = single_set (insn);
13517 if (! set || !REG_P (SET_DEST (set))
13518 || REGNO (SET_DEST (set)) != regno
13519 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13521 links = links->next;
13522 continue;
13525 rsp = &reg_stat[regno];
13526 if (rsp->last_set == insn)
13528 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13529 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13532 if (REG_P (SET_SRC (set)))
13534 regno = REGNO (SET_SRC (set));
13535 links = LOG_LINKS (insn);
13537 else
13538 break;
13542 /* Check if X, a register, is known to contain a value already
13543 truncated to MODE. In this case we can use a subreg to refer to
13544 the truncated value even though in the generic case we would need
13545 an explicit truncation. */
13547 static bool
13548 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13550 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13551 machine_mode truncated = rsp->truncated_to_mode;
13553 if (truncated == 0
13554 || rsp->truncation_label < label_tick_ebb_start)
13555 return false;
13556 if (!partial_subreg_p (mode, truncated))
13557 return true;
13558 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13559 return true;
13560 return false;
13563 /* If X is a hard reg or a subreg record the mode that the register is
13564 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13565 able to turn a truncate into a subreg using this information. Return true
13566 if traversing X is complete. */
13568 static bool
13569 record_truncated_value (rtx x)
13571 machine_mode truncated_mode;
13572 reg_stat_type *rsp;
13574 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13576 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13577 truncated_mode = GET_MODE (x);
13579 if (!partial_subreg_p (truncated_mode, original_mode))
13580 return true;
13582 truncated_mode = GET_MODE (x);
13583 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13584 return true;
13586 x = SUBREG_REG (x);
13588 /* ??? For hard-regs we now record everything. We might be able to
13589 optimize this using last_set_mode. */
13590 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13591 truncated_mode = GET_MODE (x);
13592 else
13593 return false;
13595 rsp = &reg_stat[REGNO (x)];
13596 if (rsp->truncated_to_mode == 0
13597 || rsp->truncation_label < label_tick_ebb_start
13598 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13600 rsp->truncated_to_mode = truncated_mode;
13601 rsp->truncation_label = label_tick;
13604 return true;
13607 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13608 the modes they are used in. This can help truning TRUNCATEs into
13609 SUBREGs. */
13611 static void
13612 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13614 subrtx_var_iterator::array_type array;
13615 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13616 if (record_truncated_value (*iter))
13617 iter.skip_subrtxes ();
13620 /* Scan X for promoted SUBREGs. For each one found,
13621 note what it implies to the registers used in it. */
13623 static void
13624 check_promoted_subreg (rtx_insn *insn, rtx x)
13626 if (GET_CODE (x) == SUBREG
13627 && SUBREG_PROMOTED_VAR_P (x)
13628 && REG_P (SUBREG_REG (x)))
13629 record_promoted_value (insn, x);
13630 else
13632 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13633 int i, j;
13635 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13636 switch (format[i])
13638 case 'e':
13639 check_promoted_subreg (insn, XEXP (x, i));
13640 break;
13641 case 'V':
13642 case 'E':
13643 if (XVEC (x, i) != 0)
13644 for (j = 0; j < XVECLEN (x, i); j++)
13645 check_promoted_subreg (insn, XVECEXP (x, i, j));
13646 break;
13651 /* Verify that all the registers and memory references mentioned in *LOC are
13652 still valid. *LOC was part of a value set in INSN when label_tick was
13653 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13654 the invalid references with (clobber (const_int 0)) and return 1. This
13655 replacement is useful because we often can get useful information about
13656 the form of a value (e.g., if it was produced by a shift that always
13657 produces -1 or 0) even though we don't know exactly what registers it
13658 was produced from. */
13660 static int
13661 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13663 rtx x = *loc;
13664 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13665 int len = GET_RTX_LENGTH (GET_CODE (x));
13666 int i, j;
13668 if (REG_P (x))
13670 unsigned int regno = REGNO (x);
13671 unsigned int endregno = END_REGNO (x);
13672 unsigned int j;
13674 for (j = regno; j < endregno; j++)
13676 reg_stat_type *rsp = &reg_stat[j];
13677 if (rsp->last_set_invalid
13678 /* If this is a pseudo-register that was only set once and not
13679 live at the beginning of the function, it is always valid. */
13680 || (! (regno >= FIRST_PSEUDO_REGISTER
13681 && regno < reg_n_sets_max
13682 && REG_N_SETS (regno) == 1
13683 && (!REGNO_REG_SET_P
13684 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13685 regno)))
13686 && rsp->last_set_label > tick))
13688 if (replace)
13689 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13690 return replace;
13694 return 1;
13696 /* If this is a memory reference, make sure that there were no stores after
13697 it that might have clobbered the value. We don't have alias info, so we
13698 assume any store invalidates it. Moreover, we only have local UIDs, so
13699 we also assume that there were stores in the intervening basic blocks. */
13700 else if (MEM_P (x) && !MEM_READONLY_P (x)
13701 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13703 if (replace)
13704 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13705 return replace;
13708 for (i = 0; i < len; i++)
13710 if (fmt[i] == 'e')
13712 /* Check for identical subexpressions. If x contains
13713 identical subexpression we only have to traverse one of
13714 them. */
13715 if (i == 1 && ARITHMETIC_P (x))
13717 /* Note that at this point x0 has already been checked
13718 and found valid. */
13719 rtx x0 = XEXP (x, 0);
13720 rtx x1 = XEXP (x, 1);
13722 /* If x0 and x1 are identical then x is also valid. */
13723 if (x0 == x1)
13724 return 1;
13726 /* If x1 is identical to a subexpression of x0 then
13727 while checking x0, x1 has already been checked. Thus
13728 it is valid and so as x. */
13729 if (ARITHMETIC_P (x0)
13730 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13731 return 1;
13733 /* If x0 is identical to a subexpression of x1 then x is
13734 valid iff the rest of x1 is valid. */
13735 if (ARITHMETIC_P (x1)
13736 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13737 return
13738 get_last_value_validate (&XEXP (x1,
13739 x0 == XEXP (x1, 0) ? 1 : 0),
13740 insn, tick, replace);
13743 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13744 replace) == 0)
13745 return 0;
13747 else if (fmt[i] == 'E')
13748 for (j = 0; j < XVECLEN (x, i); j++)
13749 if (get_last_value_validate (&XVECEXP (x, i, j),
13750 insn, tick, replace) == 0)
13751 return 0;
13754 /* If we haven't found a reason for it to be invalid, it is valid. */
13755 return 1;
13758 /* Get the last value assigned to X, if known. Some registers
13759 in the value may be replaced with (clobber (const_int 0)) if their value
13760 is known longer known reliably. */
13762 static rtx
13763 get_last_value (const_rtx x)
13765 unsigned int regno;
13766 rtx value;
13767 reg_stat_type *rsp;
13769 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13770 then convert it to the desired mode. If this is a paradoxical SUBREG,
13771 we cannot predict what values the "extra" bits might have. */
13772 if (GET_CODE (x) == SUBREG
13773 && subreg_lowpart_p (x)
13774 && !paradoxical_subreg_p (x)
13775 && (value = get_last_value (SUBREG_REG (x))) != 0)
13776 return gen_lowpart (GET_MODE (x), value);
13778 if (!REG_P (x))
13779 return 0;
13781 regno = REGNO (x);
13782 rsp = &reg_stat[regno];
13783 value = rsp->last_set_value;
13785 /* If we don't have a value, or if it isn't for this basic block and
13786 it's either a hard register, set more than once, or it's a live
13787 at the beginning of the function, return 0.
13789 Because if it's not live at the beginning of the function then the reg
13790 is always set before being used (is never used without being set).
13791 And, if it's set only once, and it's always set before use, then all
13792 uses must have the same last value, even if it's not from this basic
13793 block. */
13795 if (value == 0
13796 || (rsp->last_set_label < label_tick_ebb_start
13797 && (regno < FIRST_PSEUDO_REGISTER
13798 || regno >= reg_n_sets_max
13799 || REG_N_SETS (regno) != 1
13800 || REGNO_REG_SET_P
13801 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13802 return 0;
13804 /* If the value was set in a later insn than the ones we are processing,
13805 we can't use it even if the register was only set once. */
13806 if (rsp->last_set_label == label_tick
13807 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13808 return 0;
13810 /* If fewer bits were set than what we are asked for now, we cannot use
13811 the value. */
13812 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13813 GET_MODE_PRECISION (GET_MODE (x))))
13814 return 0;
13816 /* If the value has all its registers valid, return it. */
13817 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13818 return value;
13820 /* Otherwise, make a copy and replace any invalid register with
13821 (clobber (const_int 0)). If that fails for some reason, return 0. */
13823 value = copy_rtx (value);
13824 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13825 return value;
13827 return 0;
13830 /* Define three variables used for communication between the following
13831 routines. */
13833 static unsigned int reg_dead_regno, reg_dead_endregno;
13834 static int reg_dead_flag;
13835 rtx reg_dead_reg;
13837 /* Function called via note_stores from reg_dead_at_p.
13839 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13840 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13842 static void
13843 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13845 unsigned int regno, endregno;
13847 if (!REG_P (dest))
13848 return;
13850 regno = REGNO (dest);
13851 endregno = END_REGNO (dest);
13852 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13853 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13856 /* Return nonzero if REG is known to be dead at INSN.
13858 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13859 referencing REG, it is dead. If we hit a SET referencing REG, it is
13860 live. Otherwise, see if it is live or dead at the start of the basic
13861 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13862 must be assumed to be always live. */
13864 static int
13865 reg_dead_at_p (rtx reg, rtx_insn *insn)
13867 basic_block block;
13868 unsigned int i;
13870 /* Set variables for reg_dead_at_p_1. */
13871 reg_dead_regno = REGNO (reg);
13872 reg_dead_endregno = END_REGNO (reg);
13873 reg_dead_reg = reg;
13875 reg_dead_flag = 0;
13877 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13878 we allow the machine description to decide whether use-and-clobber
13879 patterns are OK. */
13880 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13882 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13883 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13884 return 0;
13887 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13888 beginning of basic block. */
13889 block = BLOCK_FOR_INSN (insn);
13890 for (;;)
13892 if (INSN_P (insn))
13894 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13895 return 1;
13897 note_stores (insn, reg_dead_at_p_1, NULL);
13898 if (reg_dead_flag)
13899 return reg_dead_flag == 1 ? 1 : 0;
13901 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13902 return 1;
13905 if (insn == BB_HEAD (block))
13906 break;
13908 insn = PREV_INSN (insn);
13911 /* Look at live-in sets for the basic block that we were in. */
13912 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13913 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13914 return 0;
13916 return 1;
13919 /* Note hard registers in X that are used. */
13921 static void
13922 mark_used_regs_combine (rtx x)
13924 RTX_CODE code = GET_CODE (x);
13925 unsigned int regno;
13926 int i;
13928 switch (code)
13930 case LABEL_REF:
13931 case SYMBOL_REF:
13932 case CONST:
13933 CASE_CONST_ANY:
13934 case PC:
13935 case ADDR_VEC:
13936 case ADDR_DIFF_VEC:
13937 case ASM_INPUT:
13938 /* CC0 must die in the insn after it is set, so we don't need to take
13939 special note of it here. */
13940 case CC0:
13941 return;
13943 case CLOBBER:
13944 /* If we are clobbering a MEM, mark any hard registers inside the
13945 address as used. */
13946 if (MEM_P (XEXP (x, 0)))
13947 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13948 return;
13950 case REG:
13951 regno = REGNO (x);
13952 /* A hard reg in a wide mode may really be multiple registers.
13953 If so, mark all of them just like the first. */
13954 if (regno < FIRST_PSEUDO_REGISTER)
13956 /* None of this applies to the stack, frame or arg pointers. */
13957 if (regno == STACK_POINTER_REGNUM
13958 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13959 && regno == HARD_FRAME_POINTER_REGNUM)
13960 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13961 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13962 || regno == FRAME_POINTER_REGNUM)
13963 return;
13965 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13967 return;
13969 case SET:
13971 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13972 the address. */
13973 rtx testreg = SET_DEST (x);
13975 while (GET_CODE (testreg) == SUBREG
13976 || GET_CODE (testreg) == ZERO_EXTRACT
13977 || GET_CODE (testreg) == STRICT_LOW_PART)
13978 testreg = XEXP (testreg, 0);
13980 if (MEM_P (testreg))
13981 mark_used_regs_combine (XEXP (testreg, 0));
13983 mark_used_regs_combine (SET_SRC (x));
13985 return;
13987 default:
13988 break;
13991 /* Recursively scan the operands of this expression. */
13994 const char *fmt = GET_RTX_FORMAT (code);
13996 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13998 if (fmt[i] == 'e')
13999 mark_used_regs_combine (XEXP (x, i));
14000 else if (fmt[i] == 'E')
14002 int j;
14004 for (j = 0; j < XVECLEN (x, i); j++)
14005 mark_used_regs_combine (XVECEXP (x, i, j));
14011 /* Remove register number REGNO from the dead registers list of INSN.
14013 Return the note used to record the death, if there was one. */
14016 remove_death (unsigned int regno, rtx_insn *insn)
14018 rtx note = find_regno_note (insn, REG_DEAD, regno);
14020 if (note)
14021 remove_note (insn, note);
14023 return note;
14026 /* For each register (hardware or pseudo) used within expression X, if its
14027 death is in an instruction with luid between FROM_LUID (inclusive) and
14028 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14029 list headed by PNOTES.
14031 That said, don't move registers killed by maybe_kill_insn.
14033 This is done when X is being merged by combination into TO_INSN. These
14034 notes will then be distributed as needed. */
14036 static void
14037 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14038 rtx *pnotes)
14040 const char *fmt;
14041 int len, i;
14042 enum rtx_code code = GET_CODE (x);
14044 if (code == REG)
14046 unsigned int regno = REGNO (x);
14047 rtx_insn *where_dead = reg_stat[regno].last_death;
14049 /* If we do not know where the register died, it may still die between
14050 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14051 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14053 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14054 while (insn
14055 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14056 && DF_INSN_LUID (insn) >= from_luid)
14058 if (dead_or_set_regno_p (insn, regno))
14060 if (find_regno_note (insn, REG_DEAD, regno))
14061 where_dead = insn;
14062 break;
14065 insn = prev_real_nondebug_insn (insn);
14069 /* Don't move the register if it gets killed in between from and to. */
14070 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14071 && ! reg_referenced_p (x, maybe_kill_insn))
14072 return;
14074 if (where_dead
14075 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14076 && DF_INSN_LUID (where_dead) >= from_luid
14077 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14079 rtx note = remove_death (regno, where_dead);
14081 /* It is possible for the call above to return 0. This can occur
14082 when last_death points to I2 or I1 that we combined with.
14083 In that case make a new note.
14085 We must also check for the case where X is a hard register
14086 and NOTE is a death note for a range of hard registers
14087 including X. In that case, we must put REG_DEAD notes for
14088 the remaining registers in place of NOTE. */
14090 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14091 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14093 unsigned int deadregno = REGNO (XEXP (note, 0));
14094 unsigned int deadend = END_REGNO (XEXP (note, 0));
14095 unsigned int ourend = END_REGNO (x);
14096 unsigned int i;
14098 for (i = deadregno; i < deadend; i++)
14099 if (i < regno || i >= ourend)
14100 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14103 /* If we didn't find any note, or if we found a REG_DEAD note that
14104 covers only part of the given reg, and we have a multi-reg hard
14105 register, then to be safe we must check for REG_DEAD notes
14106 for each register other than the first. They could have
14107 their own REG_DEAD notes lying around. */
14108 else if ((note == 0
14109 || (note != 0
14110 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14111 GET_MODE (x))))
14112 && regno < FIRST_PSEUDO_REGISTER
14113 && REG_NREGS (x) > 1)
14115 unsigned int ourend = END_REGNO (x);
14116 unsigned int i, offset;
14117 rtx oldnotes = 0;
14119 if (note)
14120 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14121 else
14122 offset = 1;
14124 for (i = regno + offset; i < ourend; i++)
14125 move_deaths (regno_reg_rtx[i],
14126 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14129 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14131 XEXP (note, 1) = *pnotes;
14132 *pnotes = note;
14134 else
14135 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14138 return;
14141 else if (GET_CODE (x) == SET)
14143 rtx dest = SET_DEST (x);
14145 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14147 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14148 that accesses one word of a multi-word item, some
14149 piece of everything register in the expression is used by
14150 this insn, so remove any old death. */
14151 /* ??? So why do we test for equality of the sizes? */
14153 if (GET_CODE (dest) == ZERO_EXTRACT
14154 || GET_CODE (dest) == STRICT_LOW_PART
14155 || (GET_CODE (dest) == SUBREG
14156 && !read_modify_subreg_p (dest)))
14158 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14159 return;
14162 /* If this is some other SUBREG, we know it replaces the entire
14163 value, so use that as the destination. */
14164 if (GET_CODE (dest) == SUBREG)
14165 dest = SUBREG_REG (dest);
14167 /* If this is a MEM, adjust deaths of anything used in the address.
14168 For a REG (the only other possibility), the entire value is
14169 being replaced so the old value is not used in this insn. */
14171 if (MEM_P (dest))
14172 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14173 to_insn, pnotes);
14174 return;
14177 else if (GET_CODE (x) == CLOBBER)
14178 return;
14180 len = GET_RTX_LENGTH (code);
14181 fmt = GET_RTX_FORMAT (code);
14183 for (i = 0; i < len; i++)
14185 if (fmt[i] == 'E')
14187 int j;
14188 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14189 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14190 to_insn, pnotes);
14192 else if (fmt[i] == 'e')
14193 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14197 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14198 pattern of an insn. X must be a REG. */
14200 static int
14201 reg_bitfield_target_p (rtx x, rtx body)
14203 int i;
14205 if (GET_CODE (body) == SET)
14207 rtx dest = SET_DEST (body);
14208 rtx target;
14209 unsigned int regno, tregno, endregno, endtregno;
14211 if (GET_CODE (dest) == ZERO_EXTRACT)
14212 target = XEXP (dest, 0);
14213 else if (GET_CODE (dest) == STRICT_LOW_PART)
14214 target = SUBREG_REG (XEXP (dest, 0));
14215 else
14216 return 0;
14218 if (GET_CODE (target) == SUBREG)
14219 target = SUBREG_REG (target);
14221 if (!REG_P (target))
14222 return 0;
14224 tregno = REGNO (target), regno = REGNO (x);
14225 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14226 return target == x;
14228 endtregno = end_hard_regno (GET_MODE (target), tregno);
14229 endregno = end_hard_regno (GET_MODE (x), regno);
14231 return endregno > tregno && regno < endtregno;
14234 else if (GET_CODE (body) == PARALLEL)
14235 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14236 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14237 return 1;
14239 return 0;
14242 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14243 as appropriate. I3 and I2 are the insns resulting from the combination
14244 insns including FROM (I2 may be zero).
14246 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14247 not need REG_DEAD notes because they are being substituted for. This
14248 saves searching in the most common cases.
14250 Each note in the list is either ignored or placed on some insns, depending
14251 on the type of note. */
14253 static void
14254 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14255 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14257 rtx note, next_note;
14258 rtx tem_note;
14259 rtx_insn *tem_insn;
14261 for (note = notes; note; note = next_note)
14263 rtx_insn *place = 0, *place2 = 0;
14265 next_note = XEXP (note, 1);
14266 switch (REG_NOTE_KIND (note))
14268 case REG_BR_PROB:
14269 case REG_BR_PRED:
14270 /* Doesn't matter much where we put this, as long as it's somewhere.
14271 It is preferable to keep these notes on branches, which is most
14272 likely to be i3. */
14273 place = i3;
14274 break;
14276 case REG_NON_LOCAL_GOTO:
14277 if (JUMP_P (i3))
14278 place = i3;
14279 else
14281 gcc_assert (i2 && JUMP_P (i2));
14282 place = i2;
14284 break;
14286 case REG_EH_REGION:
14287 /* These notes must remain with the call or trapping instruction. */
14288 if (CALL_P (i3))
14289 place = i3;
14290 else if (i2 && CALL_P (i2))
14291 place = i2;
14292 else
14294 gcc_assert (cfun->can_throw_non_call_exceptions);
14295 if (may_trap_p (i3))
14296 place = i3;
14297 else if (i2 && may_trap_p (i2))
14298 place = i2;
14299 /* ??? Otherwise assume we've combined things such that we
14300 can now prove that the instructions can't trap. Drop the
14301 note in this case. */
14303 break;
14305 case REG_ARGS_SIZE:
14306 /* ??? How to distribute between i3-i1. Assume i3 contains the
14307 entire adjustment. Assert i3 contains at least some adjust. */
14308 if (!noop_move_p (i3))
14310 poly_int64 old_size, args_size = get_args_size (note);
14311 /* fixup_args_size_notes looks at REG_NORETURN note,
14312 so ensure the note is placed there first. */
14313 if (CALL_P (i3))
14315 rtx *np;
14316 for (np = &next_note; *np; np = &XEXP (*np, 1))
14317 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14319 rtx n = *np;
14320 *np = XEXP (n, 1);
14321 XEXP (n, 1) = REG_NOTES (i3);
14322 REG_NOTES (i3) = n;
14323 break;
14326 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14327 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14328 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14329 gcc_assert (maybe_ne (old_size, args_size)
14330 || (CALL_P (i3)
14331 && !ACCUMULATE_OUTGOING_ARGS
14332 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14334 break;
14336 case REG_NORETURN:
14337 case REG_SETJMP:
14338 case REG_TM:
14339 case REG_CALL_DECL:
14340 case REG_CALL_NOCF_CHECK:
14341 /* These notes must remain with the call. It should not be
14342 possible for both I2 and I3 to be a call. */
14343 if (CALL_P (i3))
14344 place = i3;
14345 else
14347 gcc_assert (i2 && CALL_P (i2));
14348 place = i2;
14350 break;
14352 case REG_UNUSED:
14353 /* Any clobbers for i3 may still exist, and so we must process
14354 REG_UNUSED notes from that insn.
14356 Any clobbers from i2 or i1 can only exist if they were added by
14357 recog_for_combine. In that case, recog_for_combine created the
14358 necessary REG_UNUSED notes. Trying to keep any original
14359 REG_UNUSED notes from these insns can cause incorrect output
14360 if it is for the same register as the original i3 dest.
14361 In that case, we will notice that the register is set in i3,
14362 and then add a REG_UNUSED note for the destination of i3, which
14363 is wrong. However, it is possible to have REG_UNUSED notes from
14364 i2 or i1 for register which were both used and clobbered, so
14365 we keep notes from i2 or i1 if they will turn into REG_DEAD
14366 notes. */
14368 /* If this register is set or clobbered in I3, put the note there
14369 unless there is one already. */
14370 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14372 if (from_insn != i3)
14373 break;
14375 if (! (REG_P (XEXP (note, 0))
14376 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14377 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14378 place = i3;
14380 /* Otherwise, if this register is used by I3, then this register
14381 now dies here, so we must put a REG_DEAD note here unless there
14382 is one already. */
14383 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14384 && ! (REG_P (XEXP (note, 0))
14385 ? find_regno_note (i3, REG_DEAD,
14386 REGNO (XEXP (note, 0)))
14387 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14389 PUT_REG_NOTE_KIND (note, REG_DEAD);
14390 place = i3;
14393 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14394 but we can't tell which at this point. We must reset any
14395 expectations we had about the value that was previously
14396 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14397 and, if appropriate, restore its previous value, but we
14398 don't have enough information for that at this point. */
14399 else
14401 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14403 /* Otherwise, if this register is now referenced in i2
14404 then the register used to be modified in one of the
14405 original insns. If it was i3 (say, in an unused
14406 parallel), it's now completely gone, so the note can
14407 be discarded. But if it was modified in i2, i1 or i0
14408 and we still reference it in i2, then we're
14409 referencing the previous value, and since the
14410 register was modified and REG_UNUSED, we know that
14411 the previous value is now dead. So, if we only
14412 reference the register in i2, we change the note to
14413 REG_DEAD, to reflect the previous value. However, if
14414 we're also setting or clobbering the register as
14415 scratch, we know (because the register was not
14416 referenced in i3) that it's unused, just as it was
14417 unused before, and we place the note in i2. */
14418 if (from_insn != i3 && i2 && INSN_P (i2)
14419 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14421 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14422 PUT_REG_NOTE_KIND (note, REG_DEAD);
14423 if (! (REG_P (XEXP (note, 0))
14424 ? find_regno_note (i2, REG_NOTE_KIND (note),
14425 REGNO (XEXP (note, 0)))
14426 : find_reg_note (i2, REG_NOTE_KIND (note),
14427 XEXP (note, 0))))
14428 place = i2;
14432 break;
14434 case REG_EQUAL:
14435 case REG_EQUIV:
14436 case REG_NOALIAS:
14437 /* These notes say something about results of an insn. We can
14438 only support them if they used to be on I3 in which case they
14439 remain on I3. Otherwise they are ignored.
14441 If the note refers to an expression that is not a constant, we
14442 must also ignore the note since we cannot tell whether the
14443 equivalence is still true. It might be possible to do
14444 slightly better than this (we only have a problem if I2DEST
14445 or I1DEST is present in the expression), but it doesn't
14446 seem worth the trouble. */
14448 if (from_insn == i3
14449 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14450 place = i3;
14451 break;
14453 case REG_INC:
14454 /* These notes say something about how a register is used. They must
14455 be present on any use of the register in I2 or I3. */
14456 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14457 place = i3;
14459 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14461 if (place)
14462 place2 = i2;
14463 else
14464 place = i2;
14466 break;
14468 case REG_LABEL_TARGET:
14469 case REG_LABEL_OPERAND:
14470 /* This can show up in several ways -- either directly in the
14471 pattern, or hidden off in the constant pool with (or without?)
14472 a REG_EQUAL note. */
14473 /* ??? Ignore the without-reg_equal-note problem for now. */
14474 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14475 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14476 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14477 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14478 place = i3;
14480 if (i2
14481 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14482 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14483 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14484 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14486 if (place)
14487 place2 = i2;
14488 else
14489 place = i2;
14492 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14493 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14494 there. */
14495 if (place && JUMP_P (place)
14496 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14497 && (JUMP_LABEL (place) == NULL
14498 || JUMP_LABEL (place) == XEXP (note, 0)))
14500 rtx label = JUMP_LABEL (place);
14502 if (!label)
14503 JUMP_LABEL (place) = XEXP (note, 0);
14504 else if (LABEL_P (label))
14505 LABEL_NUSES (label)--;
14508 if (place2 && JUMP_P (place2)
14509 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14510 && (JUMP_LABEL (place2) == NULL
14511 || JUMP_LABEL (place2) == XEXP (note, 0)))
14513 rtx label = JUMP_LABEL (place2);
14515 if (!label)
14516 JUMP_LABEL (place2) = XEXP (note, 0);
14517 else if (LABEL_P (label))
14518 LABEL_NUSES (label)--;
14519 place2 = 0;
14521 break;
14523 case REG_NONNEG:
14524 /* This note says something about the value of a register prior
14525 to the execution of an insn. It is too much trouble to see
14526 if the note is still correct in all situations. It is better
14527 to simply delete it. */
14528 break;
14530 case REG_DEAD:
14531 /* If we replaced the right hand side of FROM_INSN with a
14532 REG_EQUAL note, the original use of the dying register
14533 will not have been combined into I3 and I2. In such cases,
14534 FROM_INSN is guaranteed to be the first of the combined
14535 instructions, so we simply need to search back before
14536 FROM_INSN for the previous use or set of this register,
14537 then alter the notes there appropriately.
14539 If the register is used as an input in I3, it dies there.
14540 Similarly for I2, if it is nonzero and adjacent to I3.
14542 If the register is not used as an input in either I3 or I2
14543 and it is not one of the registers we were supposed to eliminate,
14544 there are two possibilities. We might have a non-adjacent I2
14545 or we might have somehow eliminated an additional register
14546 from a computation. For example, we might have had A & B where
14547 we discover that B will always be zero. In this case we will
14548 eliminate the reference to A.
14550 In both cases, we must search to see if we can find a previous
14551 use of A and put the death note there. */
14553 if (from_insn
14554 && from_insn == i2mod
14555 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14556 tem_insn = from_insn;
14557 else
14559 if (from_insn
14560 && CALL_P (from_insn)
14561 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14562 place = from_insn;
14563 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14565 /* If the new I2 sets the same register that is marked
14566 dead in the note, we do not in general know where to
14567 put the note. One important case we _can_ handle is
14568 when the note comes from I3. */
14569 if (from_insn == i3)
14570 place = i3;
14571 else
14572 break;
14574 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14575 place = i3;
14576 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14577 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14578 place = i2;
14579 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14580 && !(i2mod
14581 && reg_overlap_mentioned_p (XEXP (note, 0),
14582 i2mod_old_rhs)))
14583 || rtx_equal_p (XEXP (note, 0), elim_i1)
14584 || rtx_equal_p (XEXP (note, 0), elim_i0))
14585 break;
14586 tem_insn = i3;
14589 if (place == 0)
14591 basic_block bb = this_basic_block;
14593 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14595 if (!NONDEBUG_INSN_P (tem_insn))
14597 if (tem_insn == BB_HEAD (bb))
14598 break;
14599 continue;
14602 /* If the register is being set at TEM_INSN, see if that is all
14603 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14604 into a REG_UNUSED note instead. Don't delete sets to
14605 global register vars. */
14606 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14607 || !global_regs[REGNO (XEXP (note, 0))])
14608 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14610 rtx set = single_set (tem_insn);
14611 rtx inner_dest = 0;
14612 rtx_insn *cc0_setter = NULL;
14614 if (set != 0)
14615 for (inner_dest = SET_DEST (set);
14616 (GET_CODE (inner_dest) == STRICT_LOW_PART
14617 || GET_CODE (inner_dest) == SUBREG
14618 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14619 inner_dest = XEXP (inner_dest, 0))
14622 /* Verify that it was the set, and not a clobber that
14623 modified the register.
14625 CC0 targets must be careful to maintain setter/user
14626 pairs. If we cannot delete the setter due to side
14627 effects, mark the user with an UNUSED note instead
14628 of deleting it. */
14630 if (set != 0 && ! side_effects_p (SET_SRC (set))
14631 && rtx_equal_p (XEXP (note, 0), inner_dest)
14632 && (!HAVE_cc0
14633 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14634 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14635 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14637 /* Move the notes and links of TEM_INSN elsewhere.
14638 This might delete other dead insns recursively.
14639 First set the pattern to something that won't use
14640 any register. */
14641 rtx old_notes = REG_NOTES (tem_insn);
14643 PATTERN (tem_insn) = pc_rtx;
14644 REG_NOTES (tem_insn) = NULL;
14646 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14647 NULL_RTX, NULL_RTX, NULL_RTX);
14648 distribute_links (LOG_LINKS (tem_insn));
14650 unsigned int regno = REGNO (XEXP (note, 0));
14651 reg_stat_type *rsp = &reg_stat[regno];
14652 if (rsp->last_set == tem_insn)
14653 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14655 SET_INSN_DELETED (tem_insn);
14656 if (tem_insn == i2)
14657 i2 = NULL;
14659 /* Delete the setter too. */
14660 if (cc0_setter)
14662 PATTERN (cc0_setter) = pc_rtx;
14663 old_notes = REG_NOTES (cc0_setter);
14664 REG_NOTES (cc0_setter) = NULL;
14666 distribute_notes (old_notes, cc0_setter,
14667 cc0_setter, NULL,
14668 NULL_RTX, NULL_RTX, NULL_RTX);
14669 distribute_links (LOG_LINKS (cc0_setter));
14671 SET_INSN_DELETED (cc0_setter);
14672 if (cc0_setter == i2)
14673 i2 = NULL;
14676 else
14678 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14680 /* If there isn't already a REG_UNUSED note, put one
14681 here. Do not place a REG_DEAD note, even if
14682 the register is also used here; that would not
14683 match the algorithm used in lifetime analysis
14684 and can cause the consistency check in the
14685 scheduler to fail. */
14686 if (! find_regno_note (tem_insn, REG_UNUSED,
14687 REGNO (XEXP (note, 0))))
14688 place = tem_insn;
14689 break;
14692 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14693 || (CALL_P (tem_insn)
14694 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14696 place = tem_insn;
14698 /* If we are doing a 3->2 combination, and we have a
14699 register which formerly died in i3 and was not used
14700 by i2, which now no longer dies in i3 and is used in
14701 i2 but does not die in i2, and place is between i2
14702 and i3, then we may need to move a link from place to
14703 i2. */
14704 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14705 && from_insn
14706 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14707 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14709 struct insn_link *links = LOG_LINKS (place);
14710 LOG_LINKS (place) = NULL;
14711 distribute_links (links);
14713 break;
14716 if (tem_insn == BB_HEAD (bb))
14717 break;
14722 /* If the register is set or already dead at PLACE, we needn't do
14723 anything with this note if it is still a REG_DEAD note.
14724 We check here if it is set at all, not if is it totally replaced,
14725 which is what `dead_or_set_p' checks, so also check for it being
14726 set partially. */
14728 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14730 unsigned int regno = REGNO (XEXP (note, 0));
14731 reg_stat_type *rsp = &reg_stat[regno];
14733 if (dead_or_set_p (place, XEXP (note, 0))
14734 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14736 /* Unless the register previously died in PLACE, clear
14737 last_death. [I no longer understand why this is
14738 being done.] */
14739 if (rsp->last_death != place)
14740 rsp->last_death = 0;
14741 place = 0;
14743 else
14744 rsp->last_death = place;
14746 /* If this is a death note for a hard reg that is occupying
14747 multiple registers, ensure that we are still using all
14748 parts of the object. If we find a piece of the object
14749 that is unused, we must arrange for an appropriate REG_DEAD
14750 note to be added for it. However, we can't just emit a USE
14751 and tag the note to it, since the register might actually
14752 be dead; so we recourse, and the recursive call then finds
14753 the previous insn that used this register. */
14755 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14757 unsigned int endregno = END_REGNO (XEXP (note, 0));
14758 bool all_used = true;
14759 unsigned int i;
14761 for (i = regno; i < endregno; i++)
14762 if ((! refers_to_regno_p (i, PATTERN (place))
14763 && ! find_regno_fusage (place, USE, i))
14764 || dead_or_set_regno_p (place, i))
14766 all_used = false;
14767 break;
14770 if (! all_used)
14772 /* Put only REG_DEAD notes for pieces that are
14773 not already dead or set. */
14775 for (i = regno; i < endregno;
14776 i += hard_regno_nregs (i, reg_raw_mode[i]))
14778 rtx piece = regno_reg_rtx[i];
14779 basic_block bb = this_basic_block;
14781 if (! dead_or_set_p (place, piece)
14782 && ! reg_bitfield_target_p (piece,
14783 PATTERN (place)))
14785 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14786 NULL_RTX);
14788 distribute_notes (new_note, place, place,
14789 NULL, NULL_RTX, NULL_RTX,
14790 NULL_RTX);
14792 else if (! refers_to_regno_p (i, PATTERN (place))
14793 && ! find_regno_fusage (place, USE, i))
14794 for (tem_insn = PREV_INSN (place); ;
14795 tem_insn = PREV_INSN (tem_insn))
14797 if (!NONDEBUG_INSN_P (tem_insn))
14799 if (tem_insn == BB_HEAD (bb))
14800 break;
14801 continue;
14803 if (dead_or_set_p (tem_insn, piece)
14804 || reg_bitfield_target_p (piece,
14805 PATTERN (tem_insn)))
14807 add_reg_note (tem_insn, REG_UNUSED, piece);
14808 break;
14813 place = 0;
14817 break;
14819 default:
14820 /* Any other notes should not be present at this point in the
14821 compilation. */
14822 gcc_unreachable ();
14825 if (place)
14827 XEXP (note, 1) = REG_NOTES (place);
14828 REG_NOTES (place) = note;
14830 /* Set added_notes_insn to the earliest insn we added a note to. */
14831 if (added_notes_insn == 0
14832 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14833 added_notes_insn = place;
14836 if (place2)
14838 add_shallow_copy_of_reg_note (place2, note);
14840 /* Set added_notes_insn to the earliest insn we added a note to. */
14841 if (added_notes_insn == 0
14842 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14843 added_notes_insn = place2;
14848 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14849 I3, I2, and I1 to new locations. This is also called to add a link
14850 pointing at I3 when I3's destination is changed. */
14852 static void
14853 distribute_links (struct insn_link *links)
14855 struct insn_link *link, *next_link;
14857 for (link = links; link; link = next_link)
14859 rtx_insn *place = 0;
14860 rtx_insn *insn;
14861 rtx set, reg;
14863 next_link = link->next;
14865 /* If the insn that this link points to is a NOTE, ignore it. */
14866 if (NOTE_P (link->insn))
14867 continue;
14869 set = 0;
14870 rtx pat = PATTERN (link->insn);
14871 if (GET_CODE (pat) == SET)
14872 set = pat;
14873 else if (GET_CODE (pat) == PARALLEL)
14875 int i;
14876 for (i = 0; i < XVECLEN (pat, 0); i++)
14878 set = XVECEXP (pat, 0, i);
14879 if (GET_CODE (set) != SET)
14880 continue;
14882 reg = SET_DEST (set);
14883 while (GET_CODE (reg) == ZERO_EXTRACT
14884 || GET_CODE (reg) == STRICT_LOW_PART
14885 || GET_CODE (reg) == SUBREG)
14886 reg = XEXP (reg, 0);
14888 if (!REG_P (reg))
14889 continue;
14891 if (REGNO (reg) == link->regno)
14892 break;
14894 if (i == XVECLEN (pat, 0))
14895 continue;
14897 else
14898 continue;
14900 reg = SET_DEST (set);
14902 while (GET_CODE (reg) == ZERO_EXTRACT
14903 || GET_CODE (reg) == STRICT_LOW_PART
14904 || GET_CODE (reg) == SUBREG)
14905 reg = XEXP (reg, 0);
14907 if (reg == pc_rtx)
14908 continue;
14910 /* A LOG_LINK is defined as being placed on the first insn that uses
14911 a register and points to the insn that sets the register. Start
14912 searching at the next insn after the target of the link and stop
14913 when we reach a set of the register or the end of the basic block.
14915 Note that this correctly handles the link that used to point from
14916 I3 to I2. Also note that not much searching is typically done here
14917 since most links don't point very far away. */
14919 for (insn = NEXT_INSN (link->insn);
14920 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14921 || BB_HEAD (this_basic_block->next_bb) != insn));
14922 insn = NEXT_INSN (insn))
14923 if (DEBUG_INSN_P (insn))
14924 continue;
14925 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14927 if (reg_referenced_p (reg, PATTERN (insn)))
14928 place = insn;
14929 break;
14931 else if (CALL_P (insn)
14932 && find_reg_fusage (insn, USE, reg))
14934 place = insn;
14935 break;
14937 else if (INSN_P (insn) && reg_set_p (reg, insn))
14938 break;
14940 /* If we found a place to put the link, place it there unless there
14941 is already a link to the same insn as LINK at that point. */
14943 if (place)
14945 struct insn_link *link2;
14947 FOR_EACH_LOG_LINK (link2, place)
14948 if (link2->insn == link->insn && link2->regno == link->regno)
14949 break;
14951 if (link2 == NULL)
14953 link->next = LOG_LINKS (place);
14954 LOG_LINKS (place) = link;
14956 /* Set added_links_insn to the earliest insn we added a
14957 link to. */
14958 if (added_links_insn == 0
14959 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14960 added_links_insn = place;
14966 /* Check for any register or memory mentioned in EQUIV that is not
14967 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14968 of EXPR where some registers may have been replaced by constants. */
14970 static bool
14971 unmentioned_reg_p (rtx equiv, rtx expr)
14973 subrtx_iterator::array_type array;
14974 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14976 const_rtx x = *iter;
14977 if ((REG_P (x) || MEM_P (x))
14978 && !reg_mentioned_p (x, expr))
14979 return true;
14981 return false;
14984 DEBUG_FUNCTION void
14985 dump_combine_stats (FILE *file)
14987 fprintf
14988 (file,
14989 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14990 combine_attempts, combine_merges, combine_extras, combine_successes);
14993 void
14994 dump_combine_total_stats (FILE *file)
14996 fprintf
14997 (file,
14998 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14999 total_attempts, total_merges, total_extras, total_successes);
15002 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15003 the reg-to-reg copy can usefully combine with later instructions, but we
15004 do not want to combine the hard reg into later instructions, for that
15005 restricts register allocation. */
15006 static void
15007 make_more_copies (void)
15009 basic_block bb;
15011 FOR_EACH_BB_FN (bb, cfun)
15013 rtx_insn *insn;
15015 FOR_BB_INSNS (bb, insn)
15017 if (!NONDEBUG_INSN_P (insn))
15018 continue;
15020 rtx set = single_set (insn);
15021 if (!set)
15022 continue;
15024 rtx dest = SET_DEST (set);
15025 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15026 continue;
15028 rtx src = SET_SRC (set);
15029 if (!(REG_P (src) && HARD_REGISTER_P (src)))
15030 continue;
15031 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15032 continue;
15034 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15035 rtx_insn *new_insn = gen_move_insn (new_reg, src);
15036 SET_SRC (set) = new_reg;
15037 emit_insn_before (new_insn, insn);
15038 df_insn_rescan (insn);
15043 /* Try combining insns through substitution. */
15044 static unsigned int
15045 rest_of_handle_combine (void)
15047 make_more_copies ();
15049 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15050 df_note_add_problem ();
15051 df_analyze ();
15053 regstat_init_n_sets_and_refs ();
15054 reg_n_sets_max = max_reg_num ();
15056 int rebuild_jump_labels_after_combine
15057 = combine_instructions (get_insns (), max_reg_num ());
15059 /* Combining insns may have turned an indirect jump into a
15060 direct jump. Rebuild the JUMP_LABEL fields of jumping
15061 instructions. */
15062 if (rebuild_jump_labels_after_combine)
15064 if (dom_info_available_p (CDI_DOMINATORS))
15065 free_dominance_info (CDI_DOMINATORS);
15066 timevar_push (TV_JUMP);
15067 rebuild_jump_labels (get_insns ());
15068 cleanup_cfg (0);
15069 timevar_pop (TV_JUMP);
15072 regstat_free_n_sets_and_refs ();
15073 return 0;
15076 namespace {
15078 const pass_data pass_data_combine =
15080 RTL_PASS, /* type */
15081 "combine", /* name */
15082 OPTGROUP_NONE, /* optinfo_flags */
15083 TV_COMBINE, /* tv_id */
15084 PROP_cfglayout, /* properties_required */
15085 0, /* properties_provided */
15086 0, /* properties_destroyed */
15087 0, /* todo_flags_start */
15088 TODO_df_finish, /* todo_flags_finish */
15091 class pass_combine : public rtl_opt_pass
15093 public:
15094 pass_combine (gcc::context *ctxt)
15095 : rtl_opt_pass (pass_data_combine, ctxt)
15098 /* opt_pass methods: */
15099 virtual bool gate (function *) { return (optimize > 0); }
15100 virtual unsigned int execute (function *)
15102 return rest_of_handle_combine ();
15105 }; // class pass_combine
15107 } // anon namespace
15109 rtl_opt_pass *
15110 make_pass_combine (gcc::context *ctxt)
15112 return new pass_combine (ctxt);