Remove extra newline
[official-gcc.git] / gcc / combine.c
blob4c324f38660807432ce2873cce18d5b398b5d4b3
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107 #include "function-abi.h"
109 /* Number of attempts to combine instructions in this function. */
111 static int combine_attempts;
113 /* Number of attempts that got as far as substitution in this function. */
115 static int combine_merges;
117 /* Number of instructions combined with added SETs in this function. */
119 static int combine_extras;
121 /* Number of instructions combined in this function. */
123 static int combine_successes;
125 /* Totals over entire compilation. */
127 static int total_attempts, total_merges, total_extras, total_successes;
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
136 static rtx_insn *i2mod;
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
140 static rtx i2mod_old_rhs;
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
144 static rtx i2mod_new_rhs;
146 struct reg_stat_type {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx_insn *last_death;
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx_insn *last_set;
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
166 Therefore, we maintain the following fields:
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
182 (The next two parameters are out of date).
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
196 /* Record last value assigned to (hard or pseudo) register n. */
198 rtx last_set_value;
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
203 int last_set_table_tick;
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
208 int last_set_label;
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
223 char last_set_invalid;
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
235 If an entry is zero, it means that we don't know anything special. */
237 unsigned char sign_bit_copies;
239 unsigned HOST_WIDE_INT nonzero_bits;
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
245 int truncation_label;
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
256 static vec<reg_stat_type> reg_stat;
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260 but during combine_split_insns new pseudos can be created. As we don't have
261 updated DF information in that case, it is hard to initialize the array
262 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
263 so instead of growing the arrays, just assume all newly created pseudos
264 during combine might be set multiple times. */
266 static unsigned int reg_n_sets_max;
268 /* Record the luid of the last insn that invalidated memory
269 (anything that writes memory, and subroutine calls, but not pushes). */
271 static int mem_last_set;
273 /* Record the luid of the last CALL_INSN
274 so we can tell whether a potential combination crosses any calls. */
276 static int last_call_luid;
278 /* When `subst' is called, this is the insn that is being modified
279 (by combining in a previous insn). The PATTERN of this insn
280 is still the old pattern partially modified and it should not be
281 looked at, but this may be used to examine the successors of the insn
282 to judge whether a simplification is valid. */
284 static rtx_insn *subst_insn;
286 /* This is the lowest LUID that `subst' is currently dealing with.
287 get_last_value will not return a value if the register was set at or
288 after this LUID. If not for this mechanism, we could get confused if
289 I2 or I1 in try_combine were an insn that used the old value of a register
290 to obtain a new value. In that case, we might erroneously get the
291 new value of the register when we wanted the old one. */
293 static int subst_low_luid;
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296 must consider all these registers to be always live. */
298 static HARD_REG_SET newpat_used_regs;
300 /* This is an insn to which a LOG_LINKS entry has been added. If this
301 insn is the earlier than I2 or I3, combine should rescan starting at
302 that location. */
304 static rtx_insn *added_links_insn;
306 /* And similarly, for notes. */
308 static rtx_insn *added_notes_insn;
310 /* Basic block in which we are performing combines. */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
315 /* Length of the currently allocated uid_insn_cost array. */
317 static int max_uid_known;
319 /* The following array records the insn_cost for every insn
320 in the instruction stream. */
322 static int *uid_insn_cost;
324 /* The following array records the LOG_LINKS for every insn in the
325 instruction stream as struct insn_link pointers. */
327 struct insn_link {
328 rtx_insn *insn;
329 unsigned int regno;
330 struct insn_link *next;
333 static struct insn_link **uid_log_links;
335 static inline int
336 insn_uid_check (const_rtx insn)
338 int uid = INSN_UID (insn);
339 gcc_checking_assert (uid <= max_uid_known);
340 return uid;
343 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
349 /* Links for LOG_LINKS are allocated from this obstack. */
351 static struct obstack insn_link_obstack;
353 /* Allocate a link. */
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
358 struct insn_link *l
359 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 sizeof (struct insn_link));
361 l->insn = insn;
362 l->regno = regno;
363 l->next = next;
364 return l;
367 /* Incremented for each basic block. */
369 static int label_tick;
371 /* Reset to label_tick for each extended basic block in scanning order. */
373 static int label_tick_ebb_start;
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
378 static scalar_int_mode nonzero_bits_mode;
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
384 in a loop. */
386 static int nonzero_sign_valid;
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
394 struct undo
396 struct undo *next;
397 enum undo_kind kind;
398 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399 union { rtx *r; int *i; struct insn_link **l; } where;
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
408 struct undobuf
410 struct undo *undos;
411 struct undo *frees;
412 rtx_insn *other_insn;
415 static struct undobuf undobuf;
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
420 static int n_occurrences;
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 scalar_int_mode,
424 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 scalar_int_mode,
427 unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN cannot be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
541 rtx_insn *ret;
542 unsigned int nregs;
544 ret = split_insns (pattern, insn);
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
567 switch (code)
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 return 0;
576 case SET:
577 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
578 of a REG that occupies all of the REG, the insn uses DEST if
579 it is mentioned in the destination or the source. Otherwise, we
580 need just check the source. */
581 if (GET_CODE (SET_DEST (x)) != CC0
582 && GET_CODE (SET_DEST (x)) != PC
583 && !REG_P (SET_DEST (x))
584 && ! (GET_CODE (SET_DEST (x)) == SUBREG
585 && REG_P (SUBREG_REG (SET_DEST (x)))
586 && !read_modify_subreg_p (SET_DEST (x))))
587 break;
589 return find_single_use_1 (dest, &SET_SRC (x));
591 case MEM:
592 case SUBREG:
593 return find_single_use_1 (dest, &XEXP (x, 0));
595 default:
596 break;
599 /* If it wasn't one of the common cases above, check each expression and
600 vector of this code. Look for a unique usage of DEST. */
602 fmt = GET_RTX_FORMAT (code);
603 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605 if (fmt[i] == 'e')
607 if (dest == XEXP (x, i)
608 || (REG_P (dest) && REG_P (XEXP (x, i))
609 && REGNO (dest) == REGNO (XEXP (x, i))))
610 this_result = loc;
611 else
612 this_result = find_single_use_1 (dest, &XEXP (x, i));
614 if (result == NULL)
615 result = this_result;
616 else if (this_result)
617 /* Duplicate usage. */
618 return NULL;
620 else if (fmt[i] == 'E')
622 int j;
624 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 if (XVECEXP (x, i, j) == dest
627 || (REG_P (dest)
628 && REG_P (XVECEXP (x, i, j))
629 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
630 this_result = loc;
631 else
632 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 if (result == NULL)
635 result = this_result;
636 else if (this_result)
637 return NULL;
642 return result;
646 /* See if DEST, produced in INSN, is used only a single time in the
647 sequel. If so, return a pointer to the innermost rtx expression in which
648 it is used.
650 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
653 care about REG_DEAD notes or LOG_LINKS.
655 Otherwise, we find the single use by finding an insn that has a
656 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
657 only referenced once in that insn, we know that it must be the first
658 and last insn referencing DEST. */
660 static rtx *
661 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 basic_block bb;
664 rtx_insn *next;
665 rtx *result;
666 struct insn_link *link;
668 if (dest == cc0_rtx)
670 next = NEXT_INSN (insn);
671 if (next == 0
672 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
673 return 0;
675 result = find_single_use_1 (dest, &PATTERN (next));
676 if (result && ploc)
677 *ploc = next;
678 return result;
681 if (!REG_P (dest))
682 return 0;
684 bb = BLOCK_FOR_INSN (insn);
685 for (next = NEXT_INSN (insn);
686 next && BLOCK_FOR_INSN (next) == bb;
687 next = NEXT_INSN (next))
688 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690 FOR_EACH_LOG_LINK (link, next)
691 if (link->insn == insn && link->regno == REGNO (dest))
692 break;
694 if (link)
696 result = find_single_use_1 (dest, &PATTERN (next));
697 if (ploc)
698 *ploc = next;
699 return result;
703 return 0;
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
710 the undo table. */
712 static void
713 do_SUBST (rtx *into, rtx newval)
715 struct undo *buf;
716 rtx oldval = *into;
718 if (oldval == newval)
719 return;
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
727 && CONST_INT_P (newval))
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval)
732 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval))));
742 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval, 0))));
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
751 buf->kind = UNDO_RTX;
752 buf->where.r = into;
753 buf->old_contents.r = oldval;
754 *into = newval;
756 buf->next = undobuf.undos, undobuf.undos = buf;
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
763 not safe. */
765 static void
766 do_SUBST_INT (int *into, int newval)
768 struct undo *buf;
769 int oldval = *into;
771 if (oldval == newval)
772 return;
774 if (undobuf.frees)
775 buf = undobuf.frees, undobuf.frees = buf->next;
776 else
777 buf = XNEW (struct undo);
779 buf->kind = UNDO_INT;
780 buf->where.i = into;
781 buf->old_contents.i = oldval;
782 *into = newval;
784 buf->next = undobuf.undos, undobuf.undos = buf;
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
792 well. */
794 static void
795 do_SUBST_MODE (rtx *into, machine_mode newval)
797 struct undo *buf;
798 machine_mode oldval = GET_MODE (*into);
800 if (oldval == newval)
801 return;
803 if (undobuf.frees)
804 buf = undobuf.frees, undobuf.frees = buf->next;
805 else
806 buf = XNEW (struct undo);
808 buf->kind = UNDO_MODE;
809 buf->where.r = into;
810 buf->old_contents.m = oldval;
811 adjust_reg_mode (*into, newval);
813 buf->next = undobuf.undos, undobuf.undos = buf;
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
818 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820 static void
821 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 struct undo *buf;
824 struct insn_link * oldval = *into;
826 if (oldval == newval)
827 return;
829 if (undobuf.frees)
830 buf = undobuf.frees, undobuf.frees = buf->next;
831 else
832 buf = XNEW (struct undo);
834 buf->kind = UNDO_LINKS;
835 buf->where.l = into;
836 buf->old_contents.l = oldval;
837 *into = newval;
839 buf->next = undobuf.undos, undobuf.undos = buf;
842 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 /* Subroutine of try_combine. Determine whether the replacement patterns
845 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
846 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
847 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
848 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
849 of all the instructions can be estimated and the replacements are more
850 expensive than the original sequence. */
852 static bool
853 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
854 rtx newpat, rtx newi2pat, rtx newotherpat)
856 int i0_cost, i1_cost, i2_cost, i3_cost;
857 int new_i2_cost, new_i3_cost;
858 int old_cost, new_cost;
860 /* Lookup the original insn_costs. */
861 i2_cost = INSN_COST (i2);
862 i3_cost = INSN_COST (i3);
864 if (i1)
866 i1_cost = INSN_COST (i1);
867 if (i0)
869 i0_cost = INSN_COST (i0);
870 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
871 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 else
875 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
876 ? i1_cost + i2_cost + i3_cost : 0);
877 i0_cost = 0;
880 else
882 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
883 i1_cost = i0_cost = 0;
886 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
887 correct that. */
888 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
889 old_cost -= i1_cost;
892 /* Calculate the replacement insn_costs. */
893 rtx tmp = PATTERN (i3);
894 PATTERN (i3) = newpat;
895 int tmpi = INSN_CODE (i3);
896 INSN_CODE (i3) = -1;
897 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
898 PATTERN (i3) = tmp;
899 INSN_CODE (i3) = tmpi;
900 if (newi2pat)
902 tmp = PATTERN (i2);
903 PATTERN (i2) = newi2pat;
904 tmpi = INSN_CODE (i2);
905 INSN_CODE (i2) = -1;
906 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
907 PATTERN (i2) = tmp;
908 INSN_CODE (i2) = tmpi;
909 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
910 ? new_i2_cost + new_i3_cost : 0;
912 else
914 new_cost = new_i3_cost;
915 new_i2_cost = 0;
918 if (undobuf.other_insn)
920 int old_other_cost, new_other_cost;
922 old_other_cost = INSN_COST (undobuf.other_insn);
923 tmp = PATTERN (undobuf.other_insn);
924 PATTERN (undobuf.other_insn) = newotherpat;
925 tmpi = INSN_CODE (undobuf.other_insn);
926 INSN_CODE (undobuf.other_insn) = -1;
927 new_other_cost = insn_cost (undobuf.other_insn,
928 optimize_this_for_speed_p);
929 PATTERN (undobuf.other_insn) = tmp;
930 INSN_CODE (undobuf.other_insn) = tmpi;
931 if (old_other_cost > 0 && new_other_cost > 0)
933 old_cost += old_other_cost;
934 new_cost += new_other_cost;
936 else
937 old_cost = 0;
940 /* Disallow this combination if both new_cost and old_cost are greater than
941 zero, and new_cost is greater than old cost. */
942 int reject = old_cost > 0 && new_cost > old_cost;
944 if (dump_file)
946 fprintf (dump_file, "%s combination of insns ",
947 reject ? "rejecting" : "allowing");
948 if (i0)
949 fprintf (dump_file, "%d, ", INSN_UID (i0));
950 if (i1 && INSN_UID (i1) != INSN_UID (i2))
951 fprintf (dump_file, "%d, ", INSN_UID (i1));
952 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
954 fprintf (dump_file, "original costs ");
955 if (i0)
956 fprintf (dump_file, "%d + ", i0_cost);
957 if (i1 && INSN_UID (i1) != INSN_UID (i2))
958 fprintf (dump_file, "%d + ", i1_cost);
959 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
961 if (newi2pat)
962 fprintf (dump_file, "replacement costs %d + %d = %d\n",
963 new_i2_cost, new_i3_cost, new_cost);
964 else
965 fprintf (dump_file, "replacement cost %d\n", new_cost);
968 if (reject)
969 return false;
971 /* Update the uid_insn_cost array with the replacement costs. */
972 INSN_COST (i2) = new_i2_cost;
973 INSN_COST (i3) = new_i3_cost;
974 if (i1)
976 INSN_COST (i1) = 0;
977 if (i0)
978 INSN_COST (i0) = 0;
981 return true;
985 /* Delete any insns that copy a register to itself.
986 Return true if the CFG was changed. */
988 static bool
989 delete_noop_moves (void)
991 rtx_insn *insn, *next;
992 basic_block bb;
994 bool edges_deleted = false;
996 FOR_EACH_BB_FN (bb, cfun)
998 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
1000 next = NEXT_INSN (insn);
1001 if (INSN_P (insn) && noop_move_p (insn))
1003 if (dump_file)
1004 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1006 edges_deleted |= delete_insn_and_edges (insn);
1011 return edges_deleted;
1015 /* Return false if we do not want to (or cannot) combine DEF. */
1016 static bool
1017 can_combine_def_p (df_ref def)
1019 /* Do not consider if it is pre/post modification in MEM. */
1020 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1021 return false;
1023 unsigned int regno = DF_REF_REGNO (def);
1025 /* Do not combine frame pointer adjustments. */
1026 if ((regno == FRAME_POINTER_REGNUM
1027 && (!reload_completed || frame_pointer_needed))
1028 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1029 && regno == HARD_FRAME_POINTER_REGNUM
1030 && (!reload_completed || frame_pointer_needed))
1031 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1032 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1033 return false;
1035 return true;
1038 /* Return false if we do not want to (or cannot) combine USE. */
1039 static bool
1040 can_combine_use_p (df_ref use)
1042 /* Do not consider the usage of the stack pointer by function call. */
1043 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1044 return false;
1046 return true;
1049 /* Fill in log links field for all insns. */
1051 static void
1052 create_log_links (void)
1054 basic_block bb;
1055 rtx_insn **next_use;
1056 rtx_insn *insn;
1057 df_ref def, use;
1059 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1061 /* Pass through each block from the end, recording the uses of each
1062 register and establishing log links when def is encountered.
1063 Note that we do not clear next_use array in order to save time,
1064 so we have to test whether the use is in the same basic block as def.
1066 There are a few cases below when we do not consider the definition or
1067 usage -- these are taken from original flow.c did. Don't ask me why it is
1068 done this way; I don't know and if it works, I don't want to know. */
1070 FOR_EACH_BB_FN (bb, cfun)
1072 FOR_BB_INSNS_REVERSE (bb, insn)
1074 if (!NONDEBUG_INSN_P (insn))
1075 continue;
1077 /* Log links are created only once. */
1078 gcc_assert (!LOG_LINKS (insn));
1080 FOR_EACH_INSN_DEF (def, insn)
1082 unsigned int regno = DF_REF_REGNO (def);
1083 rtx_insn *use_insn;
1085 if (!next_use[regno])
1086 continue;
1088 if (!can_combine_def_p (def))
1089 continue;
1091 use_insn = next_use[regno];
1092 next_use[regno] = NULL;
1094 if (BLOCK_FOR_INSN (use_insn) != bb)
1095 continue;
1097 /* flow.c claimed:
1099 We don't build a LOG_LINK for hard registers contained
1100 in ASM_OPERANDs. If these registers get replaced,
1101 we might wind up changing the semantics of the insn,
1102 even if reload can make what appear to be valid
1103 assignments later. */
1104 if (regno < FIRST_PSEUDO_REGISTER
1105 && asm_noperands (PATTERN (use_insn)) >= 0)
1106 continue;
1108 /* Don't add duplicate links between instructions. */
1109 struct insn_link *links;
1110 FOR_EACH_LOG_LINK (links, use_insn)
1111 if (insn == links->insn && regno == links->regno)
1112 break;
1114 if (!links)
1115 LOG_LINKS (use_insn)
1116 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1119 FOR_EACH_INSN_USE (use, insn)
1120 if (can_combine_use_p (use))
1121 next_use[DF_REF_REGNO (use)] = insn;
1125 free (next_use);
1128 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1129 true if we found a LOG_LINK that proves that A feeds B. This only works
1130 if there are no instructions between A and B which could have a link
1131 depending on A, since in that case we would not record a link for B.
1132 We also check the implicit dependency created by a cc0 setter/user
1133 pair. */
1135 static bool
1136 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1138 struct insn_link *links;
1139 FOR_EACH_LOG_LINK (links, b)
1140 if (links->insn == a)
1141 return true;
1142 if (HAVE_cc0 && sets_cc0_p (a))
1143 return true;
1144 return false;
1147 /* Main entry point for combiner. F is the first insn of the function.
1148 NREGS is the first unused pseudo-reg number.
1150 Return nonzero if the CFG was changed (e.g. if the combiner has
1151 turned an indirect jump instruction into a direct jump). */
1152 static int
1153 combine_instructions (rtx_insn *f, unsigned int nregs)
1155 rtx_insn *insn, *next;
1156 rtx_insn *prev;
1157 struct insn_link *links, *nextlinks;
1158 rtx_insn *first;
1159 basic_block last_bb;
1161 int new_direct_jump_p = 0;
1163 for (first = f; first && !NONDEBUG_INSN_P (first); )
1164 first = NEXT_INSN (first);
1165 if (!first)
1166 return 0;
1168 combine_attempts = 0;
1169 combine_merges = 0;
1170 combine_extras = 0;
1171 combine_successes = 0;
1173 rtl_hooks = combine_rtl_hooks;
1175 reg_stat.safe_grow_cleared (nregs);
1177 init_recog_no_volatile ();
1179 /* Allocate array for insn info. */
1180 max_uid_known = get_max_uid ();
1181 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1182 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1183 gcc_obstack_init (&insn_link_obstack);
1185 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1187 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1188 problems when, for example, we have j <<= 1 in a loop. */
1190 nonzero_sign_valid = 0;
1191 label_tick = label_tick_ebb_start = 1;
1193 /* Scan all SETs and see if we can deduce anything about what
1194 bits are known to be zero for some registers and how many copies
1195 of the sign bit are known to exist for those registers.
1197 Also set any known values so that we can use it while searching
1198 for what bits are known to be set. */
1200 setup_incoming_promotions (first);
1201 /* Allow the entry block and the first block to fall into the same EBB.
1202 Conceptually the incoming promotions are assigned to the entry block. */
1203 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1205 create_log_links ();
1206 FOR_EACH_BB_FN (this_basic_block, cfun)
1208 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1209 last_call_luid = 0;
1210 mem_last_set = -1;
1212 label_tick++;
1213 if (!single_pred_p (this_basic_block)
1214 || single_pred (this_basic_block) != last_bb)
1215 label_tick_ebb_start = label_tick;
1216 last_bb = this_basic_block;
1218 FOR_BB_INSNS (this_basic_block, insn)
1219 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1221 rtx links;
1223 subst_low_luid = DF_INSN_LUID (insn);
1224 subst_insn = insn;
1226 note_stores (insn, set_nonzero_bits_and_sign_copies, insn);
1227 record_dead_and_set_regs (insn);
1229 if (AUTO_INC_DEC)
1230 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1231 if (REG_NOTE_KIND (links) == REG_INC)
1232 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1233 insn);
1235 /* Record the current insn_cost of this instruction. */
1236 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1237 if (dump_file)
1239 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1240 dump_insn_slim (dump_file, insn);
1245 nonzero_sign_valid = 1;
1247 /* Now scan all the insns in forward order. */
1248 label_tick = label_tick_ebb_start = 1;
1249 init_reg_last ();
1250 setup_incoming_promotions (first);
1251 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1252 int max_combine = param_max_combine_insns;
1254 FOR_EACH_BB_FN (this_basic_block, cfun)
1256 rtx_insn *last_combined_insn = NULL;
1258 /* Ignore instruction combination in basic blocks that are going to
1259 be removed as unreachable anyway. See PR82386. */
1260 if (EDGE_COUNT (this_basic_block->preds) == 0)
1261 continue;
1263 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1264 last_call_luid = 0;
1265 mem_last_set = -1;
1267 label_tick++;
1268 if (!single_pred_p (this_basic_block)
1269 || single_pred (this_basic_block) != last_bb)
1270 label_tick_ebb_start = label_tick;
1271 last_bb = this_basic_block;
1273 rtl_profile_for_bb (this_basic_block);
1274 for (insn = BB_HEAD (this_basic_block);
1275 insn != NEXT_INSN (BB_END (this_basic_block));
1276 insn = next ? next : NEXT_INSN (insn))
1278 next = 0;
1279 if (!NONDEBUG_INSN_P (insn))
1280 continue;
1282 while (last_combined_insn
1283 && (!NONDEBUG_INSN_P (last_combined_insn)
1284 || last_combined_insn->deleted ()))
1285 last_combined_insn = PREV_INSN (last_combined_insn);
1286 if (last_combined_insn == NULL_RTX
1287 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1288 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1289 last_combined_insn = insn;
1291 /* See if we know about function return values before this
1292 insn based upon SUBREG flags. */
1293 check_promoted_subreg (insn, PATTERN (insn));
1295 /* See if we can find hardregs and subreg of pseudos in
1296 narrower modes. This could help turning TRUNCATEs
1297 into SUBREGs. */
1298 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1300 /* Try this insn with each insn it links back to. */
1302 FOR_EACH_LOG_LINK (links, insn)
1303 if ((next = try_combine (insn, links->insn, NULL,
1304 NULL, &new_direct_jump_p,
1305 last_combined_insn)) != 0)
1307 statistics_counter_event (cfun, "two-insn combine", 1);
1308 goto retry;
1311 /* Try each sequence of three linked insns ending with this one. */
1313 if (max_combine >= 3)
1314 FOR_EACH_LOG_LINK (links, insn)
1316 rtx_insn *link = links->insn;
1318 /* If the linked insn has been replaced by a note, then there
1319 is no point in pursuing this chain any further. */
1320 if (NOTE_P (link))
1321 continue;
1323 FOR_EACH_LOG_LINK (nextlinks, link)
1324 if ((next = try_combine (insn, link, nextlinks->insn,
1325 NULL, &new_direct_jump_p,
1326 last_combined_insn)) != 0)
1328 statistics_counter_event (cfun, "three-insn combine", 1);
1329 goto retry;
1333 /* Try to combine a jump insn that uses CC0
1334 with a preceding insn that sets CC0, and maybe with its
1335 logical predecessor as well.
1336 This is how we make decrement-and-branch insns.
1337 We need this special code because data flow connections
1338 via CC0 do not get entered in LOG_LINKS. */
1340 if (HAVE_cc0
1341 && JUMP_P (insn)
1342 && (prev = prev_nonnote_insn (insn)) != 0
1343 && NONJUMP_INSN_P (prev)
1344 && sets_cc0_p (PATTERN (prev)))
1346 if ((next = try_combine (insn, prev, NULL, NULL,
1347 &new_direct_jump_p,
1348 last_combined_insn)) != 0)
1349 goto retry;
1351 FOR_EACH_LOG_LINK (nextlinks, prev)
1352 if ((next = try_combine (insn, prev, nextlinks->insn,
1353 NULL, &new_direct_jump_p,
1354 last_combined_insn)) != 0)
1355 goto retry;
1358 /* Do the same for an insn that explicitly references CC0. */
1359 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1360 && (prev = prev_nonnote_insn (insn)) != 0
1361 && NONJUMP_INSN_P (prev)
1362 && sets_cc0_p (PATTERN (prev))
1363 && GET_CODE (PATTERN (insn)) == SET
1364 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1366 if ((next = try_combine (insn, prev, NULL, NULL,
1367 &new_direct_jump_p,
1368 last_combined_insn)) != 0)
1369 goto retry;
1371 FOR_EACH_LOG_LINK (nextlinks, prev)
1372 if ((next = try_combine (insn, prev, nextlinks->insn,
1373 NULL, &new_direct_jump_p,
1374 last_combined_insn)) != 0)
1375 goto retry;
1378 /* Finally, see if any of the insns that this insn links to
1379 explicitly references CC0. If so, try this insn, that insn,
1380 and its predecessor if it sets CC0. */
1381 if (HAVE_cc0)
1383 FOR_EACH_LOG_LINK (links, insn)
1384 if (NONJUMP_INSN_P (links->insn)
1385 && GET_CODE (PATTERN (links->insn)) == SET
1386 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1387 && (prev = prev_nonnote_insn (links->insn)) != 0
1388 && NONJUMP_INSN_P (prev)
1389 && sets_cc0_p (PATTERN (prev))
1390 && (next = try_combine (insn, links->insn,
1391 prev, NULL, &new_direct_jump_p,
1392 last_combined_insn)) != 0)
1393 goto retry;
1396 /* Try combining an insn with two different insns whose results it
1397 uses. */
1398 if (max_combine >= 3)
1399 FOR_EACH_LOG_LINK (links, insn)
1400 for (nextlinks = links->next; nextlinks;
1401 nextlinks = nextlinks->next)
1402 if ((next = try_combine (insn, links->insn,
1403 nextlinks->insn, NULL,
1404 &new_direct_jump_p,
1405 last_combined_insn)) != 0)
1408 statistics_counter_event (cfun, "three-insn combine", 1);
1409 goto retry;
1412 /* Try four-instruction combinations. */
1413 if (max_combine >= 4)
1414 FOR_EACH_LOG_LINK (links, insn)
1416 struct insn_link *next1;
1417 rtx_insn *link = links->insn;
1419 /* If the linked insn has been replaced by a note, then there
1420 is no point in pursuing this chain any further. */
1421 if (NOTE_P (link))
1422 continue;
1424 FOR_EACH_LOG_LINK (next1, link)
1426 rtx_insn *link1 = next1->insn;
1427 if (NOTE_P (link1))
1428 continue;
1429 /* I0 -> I1 -> I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link1)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1439 /* I0, I1 -> I2, I2 -> I3. */
1440 for (nextlinks = next1->next; nextlinks;
1441 nextlinks = nextlinks->next)
1442 if ((next = try_combine (insn, link, link1,
1443 nextlinks->insn,
1444 &new_direct_jump_p,
1445 last_combined_insn)) != 0)
1447 statistics_counter_event (cfun, "four-insn combine", 1);
1448 goto retry;
1452 for (next1 = links->next; next1; next1 = next1->next)
1454 rtx_insn *link1 = next1->insn;
1455 if (NOTE_P (link1))
1456 continue;
1457 /* I0 -> I2; I1, I2 -> I3. */
1458 FOR_EACH_LOG_LINK (nextlinks, link)
1459 if ((next = try_combine (insn, link, link1,
1460 nextlinks->insn,
1461 &new_direct_jump_p,
1462 last_combined_insn)) != 0)
1464 statistics_counter_event (cfun, "four-insn combine", 1);
1465 goto retry;
1467 /* I0 -> I1; I1, I2 -> I3. */
1468 FOR_EACH_LOG_LINK (nextlinks, link1)
1469 if ((next = try_combine (insn, link, link1,
1470 nextlinks->insn,
1471 &new_direct_jump_p,
1472 last_combined_insn)) != 0)
1474 statistics_counter_event (cfun, "four-insn combine", 1);
1475 goto retry;
1480 /* Try this insn with each REG_EQUAL note it links back to. */
1481 FOR_EACH_LOG_LINK (links, insn)
1483 rtx set, note;
1484 rtx_insn *temp = links->insn;
1485 if ((set = single_set (temp)) != 0
1486 && (note = find_reg_equal_equiv_note (temp)) != 0
1487 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1488 /* Avoid using a register that may already been marked
1489 dead by an earlier instruction. */
1490 && ! unmentioned_reg_p (note, SET_SRC (set))
1491 && (GET_MODE (note) == VOIDmode
1492 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1493 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1494 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1495 || (GET_MODE (XEXP (SET_DEST (set), 0))
1496 == GET_MODE (note))))))
1498 /* Temporarily replace the set's source with the
1499 contents of the REG_EQUAL note. The insn will
1500 be deleted or recognized by try_combine. */
1501 rtx orig_src = SET_SRC (set);
1502 rtx orig_dest = SET_DEST (set);
1503 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1504 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1505 SET_SRC (set) = note;
1506 i2mod = temp;
1507 i2mod_old_rhs = copy_rtx (orig_src);
1508 i2mod_new_rhs = copy_rtx (note);
1509 next = try_combine (insn, i2mod, NULL, NULL,
1510 &new_direct_jump_p,
1511 last_combined_insn);
1512 i2mod = NULL;
1513 if (next)
1515 statistics_counter_event (cfun, "insn-with-note combine", 1);
1516 goto retry;
1518 SET_SRC (set) = orig_src;
1519 SET_DEST (set) = orig_dest;
1523 if (!NOTE_P (insn))
1524 record_dead_and_set_regs (insn);
1526 retry:
1531 default_rtl_profile ();
1532 clear_bb_flags ();
1533 new_direct_jump_p |= purge_all_dead_edges ();
1534 new_direct_jump_p |= delete_noop_moves ();
1536 /* Clean up. */
1537 obstack_free (&insn_link_obstack, NULL);
1538 free (uid_log_links);
1539 free (uid_insn_cost);
1540 reg_stat.release ();
1543 struct undo *undo, *next;
1544 for (undo = undobuf.frees; undo; undo = next)
1546 next = undo->next;
1547 free (undo);
1549 undobuf.frees = 0;
1552 total_attempts += combine_attempts;
1553 total_merges += combine_merges;
1554 total_extras += combine_extras;
1555 total_successes += combine_successes;
1557 nonzero_sign_valid = 0;
1558 rtl_hooks = general_rtl_hooks;
1560 /* Make recognizer allow volatile MEMs again. */
1561 init_recog ();
1563 return new_direct_jump_p;
1566 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1568 static void
1569 init_reg_last (void)
1571 unsigned int i;
1572 reg_stat_type *p;
1574 FOR_EACH_VEC_ELT (reg_stat, i, p)
1575 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1578 /* Set up any promoted values for incoming argument registers. */
1580 static void
1581 setup_incoming_promotions (rtx_insn *first)
1583 tree arg;
1584 bool strictly_local = false;
1586 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1587 arg = DECL_CHAIN (arg))
1589 rtx x, reg = DECL_INCOMING_RTL (arg);
1590 int uns1, uns3;
1591 machine_mode mode1, mode2, mode3, mode4;
1593 /* Only continue if the incoming argument is in a register. */
1594 if (!REG_P (reg))
1595 continue;
1597 /* Determine, if possible, whether all call sites of the current
1598 function lie within the current compilation unit. (This does
1599 take into account the exporting of a function via taking its
1600 address, and so forth.) */
1601 strictly_local
1602 = cgraph_node::local_info_node (current_function_decl)->local;
1604 /* The mode and signedness of the argument before any promotions happen
1605 (equal to the mode of the pseudo holding it at that stage). */
1606 mode1 = TYPE_MODE (TREE_TYPE (arg));
1607 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1609 /* The mode and signedness of the argument after any source language and
1610 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1611 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1612 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1614 /* The mode and signedness of the argument as it is actually passed,
1615 see assign_parm_setup_reg in function.c. */
1616 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1617 TREE_TYPE (cfun->decl), 0);
1619 /* The mode of the register in which the argument is being passed. */
1620 mode4 = GET_MODE (reg);
1622 /* Eliminate sign extensions in the callee when:
1623 (a) A mode promotion has occurred; */
1624 if (mode1 == mode3)
1625 continue;
1626 /* (b) The mode of the register is the same as the mode of
1627 the argument as it is passed; */
1628 if (mode3 != mode4)
1629 continue;
1630 /* (c) There's no language level extension; */
1631 if (mode1 == mode2)
1633 /* (c.1) All callers are from the current compilation unit. If that's
1634 the case we don't have to rely on an ABI, we only have to know
1635 what we're generating right now, and we know that we will do the
1636 mode1 to mode2 promotion with the given sign. */
1637 else if (!strictly_local)
1638 continue;
1639 /* (c.2) The combination of the two promotions is useful. This is
1640 true when the signs match, or if the first promotion is unsigned.
1641 In the later case, (sign_extend (zero_extend x)) is the same as
1642 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1643 else if (uns1)
1644 uns3 = true;
1645 else if (uns3)
1646 continue;
1648 /* Record that the value was promoted from mode1 to mode3,
1649 so that any sign extension at the head of the current
1650 function may be eliminated. */
1651 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1652 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1653 record_value_for_reg (reg, first, x);
1657 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1658 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1659 because some machines (maybe most) will actually do the sign-extension and
1660 this is the conservative approach.
1662 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1663 kludge. */
1665 static rtx
1666 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1668 scalar_int_mode int_mode;
1669 if (CONST_INT_P (src)
1670 && is_a <scalar_int_mode> (mode, &int_mode)
1671 && GET_MODE_PRECISION (int_mode) < prec
1672 && INTVAL (src) > 0
1673 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1674 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1676 return src;
1679 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1680 and SET. */
1682 static void
1683 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1684 rtx x)
1686 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1687 unsigned HOST_WIDE_INT bits = 0;
1688 rtx reg_equal = NULL, src = SET_SRC (set);
1689 unsigned int num = 0;
1691 if (reg_equal_note)
1692 reg_equal = XEXP (reg_equal_note, 0);
1694 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1696 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1697 if (reg_equal)
1698 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1701 /* Don't call nonzero_bits if it cannot change anything. */
1702 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1704 machine_mode mode = GET_MODE (x);
1705 if (GET_MODE_CLASS (mode) == MODE_INT
1706 && HWI_COMPUTABLE_MODE_P (mode))
1707 mode = nonzero_bits_mode;
1708 bits = nonzero_bits (src, mode);
1709 if (reg_equal && bits)
1710 bits &= nonzero_bits (reg_equal, mode);
1711 rsp->nonzero_bits |= bits;
1714 /* Don't call num_sign_bit_copies if it cannot change anything. */
1715 if (rsp->sign_bit_copies != 1)
1717 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1718 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1720 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1721 if (num == 0 || numeq > num)
1722 num = numeq;
1724 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1725 rsp->sign_bit_copies = num;
1729 /* Called via note_stores. If X is a pseudo that is narrower than
1730 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1732 If we are setting only a portion of X and we can't figure out what
1733 portion, assume all bits will be used since we don't know what will
1734 be happening.
1736 Similarly, set how many bits of X are known to be copies of the sign bit
1737 at all locations in the function. This is the smallest number implied
1738 by any set of X. */
1740 static void
1741 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1743 rtx_insn *insn = (rtx_insn *) data;
1744 scalar_int_mode mode;
1746 if (REG_P (x)
1747 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1748 /* If this register is undefined at the start of the file, we can't
1749 say what its contents were. */
1750 && ! REGNO_REG_SET_P
1751 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1752 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1753 && HWI_COMPUTABLE_MODE_P (mode))
1755 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1757 if (set == 0 || GET_CODE (set) == CLOBBER)
1759 rsp->nonzero_bits = GET_MODE_MASK (mode);
1760 rsp->sign_bit_copies = 1;
1761 return;
1764 /* If this register is being initialized using itself, and the
1765 register is uninitialized in this basic block, and there are
1766 no LOG_LINKS which set the register, then part of the
1767 register is uninitialized. In that case we can't assume
1768 anything about the number of nonzero bits.
1770 ??? We could do better if we checked this in
1771 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1772 could avoid making assumptions about the insn which initially
1773 sets the register, while still using the information in other
1774 insns. We would have to be careful to check every insn
1775 involved in the combination. */
1777 if (insn
1778 && reg_referenced_p (x, PATTERN (insn))
1779 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1780 REGNO (x)))
1782 struct insn_link *link;
1784 FOR_EACH_LOG_LINK (link, insn)
1785 if (dead_or_set_p (link->insn, x))
1786 break;
1787 if (!link)
1789 rsp->nonzero_bits = GET_MODE_MASK (mode);
1790 rsp->sign_bit_copies = 1;
1791 return;
1795 /* If this is a complex assignment, see if we can convert it into a
1796 simple assignment. */
1797 set = expand_field_assignment (set);
1799 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1800 set what we know about X. */
1802 if (SET_DEST (set) == x
1803 || (paradoxical_subreg_p (SET_DEST (set))
1804 && SUBREG_REG (SET_DEST (set)) == x))
1805 update_rsp_from_reg_equal (rsp, insn, set, x);
1806 else
1808 rsp->nonzero_bits = GET_MODE_MASK (mode);
1809 rsp->sign_bit_copies = 1;
1814 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1815 optionally insns that were previously combined into I3 or that will be
1816 combined into the merger of INSN and I3. The order is PRED, PRED2,
1817 INSN, SUCC, SUCC2, I3.
1819 Return 0 if the combination is not allowed for any reason.
1821 If the combination is allowed, *PDEST will be set to the single
1822 destination of INSN and *PSRC to the single source, and this function
1823 will return 1. */
1825 static int
1826 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1827 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1828 rtx *pdest, rtx *psrc)
1830 int i;
1831 const_rtx set = 0;
1832 rtx src, dest;
1833 rtx_insn *p;
1834 rtx link;
1835 bool all_adjacent = true;
1836 int (*is_volatile_p) (const_rtx);
1838 if (succ)
1840 if (succ2)
1842 if (next_active_insn (succ2) != i3)
1843 all_adjacent = false;
1844 if (next_active_insn (succ) != succ2)
1845 all_adjacent = false;
1847 else if (next_active_insn (succ) != i3)
1848 all_adjacent = false;
1849 if (next_active_insn (insn) != succ)
1850 all_adjacent = false;
1852 else if (next_active_insn (insn) != i3)
1853 all_adjacent = false;
1855 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1856 or a PARALLEL consisting of such a SET and CLOBBERs.
1858 If INSN has CLOBBER parallel parts, ignore them for our processing.
1859 By definition, these happen during the execution of the insn. When it
1860 is merged with another insn, all bets are off. If they are, in fact,
1861 needed and aren't also supplied in I3, they may be added by
1862 recog_for_combine. Otherwise, it won't match.
1864 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1865 note.
1867 Get the source and destination of INSN. If more than one, can't
1868 combine. */
1870 if (GET_CODE (PATTERN (insn)) == SET)
1871 set = PATTERN (insn);
1872 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1873 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1875 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1877 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1879 switch (GET_CODE (elt))
1881 /* This is important to combine floating point insns
1882 for the SH4 port. */
1883 case USE:
1884 /* Combining an isolated USE doesn't make sense.
1885 We depend here on combinable_i3pat to reject them. */
1886 /* The code below this loop only verifies that the inputs of
1887 the SET in INSN do not change. We call reg_set_between_p
1888 to verify that the REG in the USE does not change between
1889 I3 and INSN.
1890 If the USE in INSN was for a pseudo register, the matching
1891 insn pattern will likely match any register; combining this
1892 with any other USE would only be safe if we knew that the
1893 used registers have identical values, or if there was
1894 something to tell them apart, e.g. different modes. For
1895 now, we forgo such complicated tests and simply disallow
1896 combining of USES of pseudo registers with any other USE. */
1897 if (REG_P (XEXP (elt, 0))
1898 && GET_CODE (PATTERN (i3)) == PARALLEL)
1900 rtx i3pat = PATTERN (i3);
1901 int i = XVECLEN (i3pat, 0) - 1;
1902 unsigned int regno = REGNO (XEXP (elt, 0));
1906 rtx i3elt = XVECEXP (i3pat, 0, i);
1908 if (GET_CODE (i3elt) == USE
1909 && REG_P (XEXP (i3elt, 0))
1910 && (REGNO (XEXP (i3elt, 0)) == regno
1911 ? reg_set_between_p (XEXP (elt, 0),
1912 PREV_INSN (insn), i3)
1913 : regno >= FIRST_PSEUDO_REGISTER))
1914 return 0;
1916 while (--i >= 0);
1918 break;
1920 /* We can ignore CLOBBERs. */
1921 case CLOBBER:
1922 break;
1924 case SET:
1925 /* Ignore SETs whose result isn't used but not those that
1926 have side-effects. */
1927 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1928 && insn_nothrow_p (insn)
1929 && !side_effects_p (elt))
1930 break;
1932 /* If we have already found a SET, this is a second one and
1933 so we cannot combine with this insn. */
1934 if (set)
1935 return 0;
1937 set = elt;
1938 break;
1940 default:
1941 /* Anything else means we can't combine. */
1942 return 0;
1946 if (set == 0
1947 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1948 so don't do anything with it. */
1949 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1950 return 0;
1952 else
1953 return 0;
1955 if (set == 0)
1956 return 0;
1958 /* The simplification in expand_field_assignment may call back to
1959 get_last_value, so set safe guard here. */
1960 subst_low_luid = DF_INSN_LUID (insn);
1962 set = expand_field_assignment (set);
1963 src = SET_SRC (set), dest = SET_DEST (set);
1965 /* Do not eliminate user-specified register if it is in an
1966 asm input because we may break the register asm usage defined
1967 in GCC manual if allow to do so.
1968 Be aware that this may cover more cases than we expect but this
1969 should be harmless. */
1970 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1971 && extract_asm_operands (PATTERN (i3)))
1972 return 0;
1974 /* Don't eliminate a store in the stack pointer. */
1975 if (dest == stack_pointer_rtx
1976 /* Don't combine with an insn that sets a register to itself if it has
1977 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1978 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1979 /* Can't merge an ASM_OPERANDS. */
1980 || GET_CODE (src) == ASM_OPERANDS
1981 /* Can't merge a function call. */
1982 || GET_CODE (src) == CALL
1983 /* Don't eliminate a function call argument. */
1984 || (CALL_P (i3)
1985 && (find_reg_fusage (i3, USE, dest)
1986 || (REG_P (dest)
1987 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1988 && global_regs[REGNO (dest)])))
1989 /* Don't substitute into an incremented register. */
1990 || FIND_REG_INC_NOTE (i3, dest)
1991 || (succ && FIND_REG_INC_NOTE (succ, dest))
1992 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1993 /* Don't substitute into a non-local goto, this confuses CFG. */
1994 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1995 /* Make sure that DEST is not used after INSN but before SUCC, or
1996 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1997 || (!all_adjacent
1998 && ((succ2
1999 && (reg_used_between_p (dest, succ2, i3)
2000 || reg_used_between_p (dest, succ, succ2)))
2001 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2002 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2003 || (succ
2004 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2005 that case SUCC is not in the insn stream, so use SUCC2
2006 instead for this test. */
2007 && reg_used_between_p (dest, insn,
2008 succ2
2009 && INSN_UID (succ) == INSN_UID (succ2)
2010 ? succ2 : succ))))
2011 /* Make sure that the value that is to be substituted for the register
2012 does not use any registers whose values alter in between. However,
2013 If the insns are adjacent, a use can't cross a set even though we
2014 think it might (this can happen for a sequence of insns each setting
2015 the same destination; last_set of that register might point to
2016 a NOTE). If INSN has a REG_EQUIV note, the register is always
2017 equivalent to the memory so the substitution is valid even if there
2018 are intervening stores. Also, don't move a volatile asm or
2019 UNSPEC_VOLATILE across any other insns. */
2020 || (! all_adjacent
2021 && (((!MEM_P (src)
2022 || ! find_reg_note (insn, REG_EQUIV, src))
2023 && modified_between_p (src, insn, i3))
2024 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2025 || GET_CODE (src) == UNSPEC_VOLATILE))
2026 /* Don't combine across a CALL_INSN, because that would possibly
2027 change whether the life span of some REGs crosses calls or not,
2028 and it is a pain to update that information.
2029 Exception: if source is a constant, moving it later can't hurt.
2030 Accept that as a special case. */
2031 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2032 return 0;
2034 /* DEST must either be a REG or CC0. */
2035 if (REG_P (dest))
2037 /* If register alignment is being enforced for multi-word items in all
2038 cases except for parameters, it is possible to have a register copy
2039 insn referencing a hard register that is not allowed to contain the
2040 mode being copied and which would not be valid as an operand of most
2041 insns. Eliminate this problem by not combining with such an insn.
2043 Also, on some machines we don't want to extend the life of a hard
2044 register. */
2046 if (REG_P (src)
2047 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2048 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2049 /* Don't extend the life of a hard register unless it is
2050 user variable (if we have few registers) or it can't
2051 fit into the desired register (meaning something special
2052 is going on).
2053 Also avoid substituting a return register into I3, because
2054 reload can't handle a conflict with constraints of other
2055 inputs. */
2056 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2057 && !targetm.hard_regno_mode_ok (REGNO (src),
2058 GET_MODE (src)))))
2059 return 0;
2061 else if (GET_CODE (dest) != CC0)
2062 return 0;
2065 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2066 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2067 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2069 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2071 /* If the clobber represents an earlyclobber operand, we must not
2072 substitute an expression containing the clobbered register.
2073 As we do not analyze the constraint strings here, we have to
2074 make the conservative assumption. However, if the register is
2075 a fixed hard reg, the clobber cannot represent any operand;
2076 we leave it up to the machine description to either accept or
2077 reject use-and-clobber patterns. */
2078 if (!REG_P (reg)
2079 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2080 || !fixed_regs[REGNO (reg)])
2081 if (reg_overlap_mentioned_p (reg, src))
2082 return 0;
2085 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2086 or not), reject, unless nothing volatile comes between it and I3 */
2088 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2090 /* Make sure neither succ nor succ2 contains a volatile reference. */
2091 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2092 return 0;
2093 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2094 return 0;
2095 /* We'll check insns between INSN and I3 below. */
2098 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2099 to be an explicit register variable, and was chosen for a reason. */
2101 if (GET_CODE (src) == ASM_OPERANDS
2102 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2103 return 0;
2105 /* If INSN contains volatile references (specifically volatile MEMs),
2106 we cannot combine across any other volatile references.
2107 Even if INSN doesn't contain volatile references, any intervening
2108 volatile insn might affect machine state. */
2110 is_volatile_p = volatile_refs_p (PATTERN (insn))
2111 ? volatile_refs_p
2112 : volatile_insn_p;
2114 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2115 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2116 return 0;
2118 /* If INSN contains an autoincrement or autodecrement, make sure that
2119 register is not used between there and I3, and not already used in
2120 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2121 Also insist that I3 not be a jump if using LRA; if it were one
2122 and the incremented register were spilled, we would lose.
2123 Reload handles this correctly. */
2125 if (AUTO_INC_DEC)
2126 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2127 if (REG_NOTE_KIND (link) == REG_INC
2128 && ((JUMP_P (i3) && targetm.lra_p ())
2129 || reg_used_between_p (XEXP (link, 0), insn, i3)
2130 || (pred != NULL_RTX
2131 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2132 || (pred2 != NULL_RTX
2133 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2134 || (succ != NULL_RTX
2135 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2136 || (succ2 != NULL_RTX
2137 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2138 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2139 return 0;
2141 /* Don't combine an insn that follows a CC0-setting insn.
2142 An insn that uses CC0 must not be separated from the one that sets it.
2143 We do, however, allow I2 to follow a CC0-setting insn if that insn
2144 is passed as I1; in that case it will be deleted also.
2145 We also allow combining in this case if all the insns are adjacent
2146 because that would leave the two CC0 insns adjacent as well.
2147 It would be more logical to test whether CC0 occurs inside I1 or I2,
2148 but that would be much slower, and this ought to be equivalent. */
2150 if (HAVE_cc0)
2152 p = prev_nonnote_insn (insn);
2153 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2154 && ! all_adjacent)
2155 return 0;
2158 /* If we get here, we have passed all the tests and the combination is
2159 to be allowed. */
2161 *pdest = dest;
2162 *psrc = src;
2164 return 1;
2167 /* LOC is the location within I3 that contains its pattern or the component
2168 of a PARALLEL of the pattern. We validate that it is valid for combining.
2170 One problem is if I3 modifies its output, as opposed to replacing it
2171 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2172 doing so would produce an insn that is not equivalent to the original insns.
2174 Consider:
2176 (set (reg:DI 101) (reg:DI 100))
2177 (set (subreg:SI (reg:DI 101) 0) <foo>)
2179 This is NOT equivalent to:
2181 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2182 (set (reg:DI 101) (reg:DI 100))])
2184 Not only does this modify 100 (in which case it might still be valid
2185 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2187 We can also run into a problem if I2 sets a register that I1
2188 uses and I1 gets directly substituted into I3 (not via I2). In that
2189 case, we would be getting the wrong value of I2DEST into I3, so we
2190 must reject the combination. This case occurs when I2 and I1 both
2191 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2192 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2193 of a SET must prevent combination from occurring. The same situation
2194 can occur for I0, in which case I0_NOT_IN_SRC is set.
2196 Before doing the above check, we first try to expand a field assignment
2197 into a set of logical operations.
2199 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2200 we place a register that is both set and used within I3. If more than one
2201 such register is detected, we fail.
2203 Return 1 if the combination is valid, zero otherwise. */
2205 static int
2206 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2207 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2209 rtx x = *loc;
2211 if (GET_CODE (x) == SET)
2213 rtx set = x ;
2214 rtx dest = SET_DEST (set);
2215 rtx src = SET_SRC (set);
2216 rtx inner_dest = dest;
2217 rtx subdest;
2219 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2220 || GET_CODE (inner_dest) == SUBREG
2221 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2222 inner_dest = XEXP (inner_dest, 0);
2224 /* Check for the case where I3 modifies its output, as discussed
2225 above. We don't want to prevent pseudos from being combined
2226 into the address of a MEM, so only prevent the combination if
2227 i1 or i2 set the same MEM. */
2228 if ((inner_dest != dest &&
2229 (!MEM_P (inner_dest)
2230 || rtx_equal_p (i2dest, inner_dest)
2231 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2232 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2233 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2234 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2235 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2237 /* This is the same test done in can_combine_p except we can't test
2238 all_adjacent; we don't have to, since this instruction will stay
2239 in place, thus we are not considering increasing the lifetime of
2240 INNER_DEST.
2242 Also, if this insn sets a function argument, combining it with
2243 something that might need a spill could clobber a previous
2244 function argument; the all_adjacent test in can_combine_p also
2245 checks this; here, we do a more specific test for this case. */
2247 || (REG_P (inner_dest)
2248 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2249 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2250 GET_MODE (inner_dest)))
2251 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2252 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2253 return 0;
2255 /* If DEST is used in I3, it is being killed in this insn, so
2256 record that for later. We have to consider paradoxical
2257 subregs here, since they kill the whole register, but we
2258 ignore partial subregs, STRICT_LOW_PART, etc.
2259 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2260 STACK_POINTER_REGNUM, since these are always considered to be
2261 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2262 subdest = dest;
2263 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2264 subdest = SUBREG_REG (subdest);
2265 if (pi3dest_killed
2266 && REG_P (subdest)
2267 && reg_referenced_p (subdest, PATTERN (i3))
2268 && REGNO (subdest) != FRAME_POINTER_REGNUM
2269 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2270 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2271 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2272 || (REGNO (subdest) != ARG_POINTER_REGNUM
2273 || ! fixed_regs [REGNO (subdest)]))
2274 && REGNO (subdest) != STACK_POINTER_REGNUM)
2276 if (*pi3dest_killed)
2277 return 0;
2279 *pi3dest_killed = subdest;
2283 else if (GET_CODE (x) == PARALLEL)
2285 int i;
2287 for (i = 0; i < XVECLEN (x, 0); i++)
2288 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2289 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2290 return 0;
2293 return 1;
2296 /* Return 1 if X is an arithmetic expression that contains a multiplication
2297 and division. We don't count multiplications by powers of two here. */
2299 static int
2300 contains_muldiv (rtx x)
2302 switch (GET_CODE (x))
2304 case MOD: case DIV: case UMOD: case UDIV:
2305 return 1;
2307 case MULT:
2308 return ! (CONST_INT_P (XEXP (x, 1))
2309 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2310 default:
2311 if (BINARY_P (x))
2312 return contains_muldiv (XEXP (x, 0))
2313 || contains_muldiv (XEXP (x, 1));
2315 if (UNARY_P (x))
2316 return contains_muldiv (XEXP (x, 0));
2318 return 0;
2322 /* Determine whether INSN can be used in a combination. Return nonzero if
2323 not. This is used in try_combine to detect early some cases where we
2324 can't perform combinations. */
2326 static int
2327 cant_combine_insn_p (rtx_insn *insn)
2329 rtx set;
2330 rtx src, dest;
2332 /* If this isn't really an insn, we can't do anything.
2333 This can occur when flow deletes an insn that it has merged into an
2334 auto-increment address. */
2335 if (!NONDEBUG_INSN_P (insn))
2336 return 1;
2338 /* Never combine loads and stores involving hard regs that are likely
2339 to be spilled. The register allocator can usually handle such
2340 reg-reg moves by tying. If we allow the combiner to make
2341 substitutions of likely-spilled regs, reload might die.
2342 As an exception, we allow combinations involving fixed regs; these are
2343 not available to the register allocator so there's no risk involved. */
2345 set = single_set (insn);
2346 if (! set)
2347 return 0;
2348 src = SET_SRC (set);
2349 dest = SET_DEST (set);
2350 if (GET_CODE (src) == SUBREG)
2351 src = SUBREG_REG (src);
2352 if (GET_CODE (dest) == SUBREG)
2353 dest = SUBREG_REG (dest);
2354 if (REG_P (src) && REG_P (dest)
2355 && ((HARD_REGISTER_P (src)
2356 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2357 #ifdef LEAF_REGISTERS
2358 && ! LEAF_REGISTERS [REGNO (src)])
2359 #else
2361 #endif
2362 || (HARD_REGISTER_P (dest)
2363 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2364 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2365 return 1;
2367 return 0;
2370 struct likely_spilled_retval_info
2372 unsigned regno, nregs;
2373 unsigned mask;
2376 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2377 hard registers that are known to be written to / clobbered in full. */
2378 static void
2379 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2381 struct likely_spilled_retval_info *const info =
2382 (struct likely_spilled_retval_info *) data;
2383 unsigned regno, nregs;
2384 unsigned new_mask;
2386 if (!REG_P (XEXP (set, 0)))
2387 return;
2388 regno = REGNO (x);
2389 if (regno >= info->regno + info->nregs)
2390 return;
2391 nregs = REG_NREGS (x);
2392 if (regno + nregs <= info->regno)
2393 return;
2394 new_mask = (2U << (nregs - 1)) - 1;
2395 if (regno < info->regno)
2396 new_mask >>= info->regno - regno;
2397 else
2398 new_mask <<= regno - info->regno;
2399 info->mask &= ~new_mask;
2402 /* Return nonzero iff part of the return value is live during INSN, and
2403 it is likely spilled. This can happen when more than one insn is needed
2404 to copy the return value, e.g. when we consider to combine into the
2405 second copy insn for a complex value. */
2407 static int
2408 likely_spilled_retval_p (rtx_insn *insn)
2410 rtx_insn *use = BB_END (this_basic_block);
2411 rtx reg;
2412 rtx_insn *p;
2413 unsigned regno, nregs;
2414 /* We assume here that no machine mode needs more than
2415 32 hard registers when the value overlaps with a register
2416 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2417 unsigned mask;
2418 struct likely_spilled_retval_info info;
2420 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2421 return 0;
2422 reg = XEXP (PATTERN (use), 0);
2423 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2424 return 0;
2425 regno = REGNO (reg);
2426 nregs = REG_NREGS (reg);
2427 if (nregs == 1)
2428 return 0;
2429 mask = (2U << (nregs - 1)) - 1;
2431 /* Disregard parts of the return value that are set later. */
2432 info.regno = regno;
2433 info.nregs = nregs;
2434 info.mask = mask;
2435 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2436 if (INSN_P (p))
2437 note_stores (p, likely_spilled_retval_1, &info);
2438 mask = info.mask;
2440 /* Check if any of the (probably) live return value registers is
2441 likely spilled. */
2442 nregs --;
2445 if ((mask & 1 << nregs)
2446 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2447 return 1;
2448 } while (nregs--);
2449 return 0;
2452 /* Adjust INSN after we made a change to its destination.
2454 Changing the destination can invalidate notes that say something about
2455 the results of the insn and a LOG_LINK pointing to the insn. */
2457 static void
2458 adjust_for_new_dest (rtx_insn *insn)
2460 /* For notes, be conservative and simply remove them. */
2461 remove_reg_equal_equiv_notes (insn);
2463 /* The new insn will have a destination that was previously the destination
2464 of an insn just above it. Call distribute_links to make a LOG_LINK from
2465 the next use of that destination. */
2467 rtx set = single_set (insn);
2468 gcc_assert (set);
2470 rtx reg = SET_DEST (set);
2472 while (GET_CODE (reg) == ZERO_EXTRACT
2473 || GET_CODE (reg) == STRICT_LOW_PART
2474 || GET_CODE (reg) == SUBREG)
2475 reg = XEXP (reg, 0);
2476 gcc_assert (REG_P (reg));
2478 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2480 df_insn_rescan (insn);
2483 /* Return TRUE if combine can reuse reg X in mode MODE.
2484 ADDED_SETS is nonzero if the original set is still required. */
2485 static bool
2486 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2488 unsigned int regno;
2490 if (!REG_P (x))
2491 return false;
2493 /* Don't change between modes with different underlying register sizes,
2494 since this could lead to invalid subregs. */
2495 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2496 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2497 return false;
2499 regno = REGNO (x);
2500 /* Allow hard registers if the new mode is legal, and occupies no more
2501 registers than the old mode. */
2502 if (regno < FIRST_PSEUDO_REGISTER)
2503 return (targetm.hard_regno_mode_ok (regno, mode)
2504 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2506 /* Or a pseudo that is only used once. */
2507 return (regno < reg_n_sets_max
2508 && REG_N_SETS (regno) == 1
2509 && !added_sets
2510 && !REG_USERVAR_P (x));
2514 /* Check whether X, the destination of a set, refers to part of
2515 the register specified by REG. */
2517 static bool
2518 reg_subword_p (rtx x, rtx reg)
2520 /* Check that reg is an integer mode register. */
2521 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2522 return false;
2524 if (GET_CODE (x) == STRICT_LOW_PART
2525 || GET_CODE (x) == ZERO_EXTRACT)
2526 x = XEXP (x, 0);
2528 return GET_CODE (x) == SUBREG
2529 && SUBREG_REG (x) == reg
2530 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2533 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2534 Note that the INSN should be deleted *after* removing dead edges, so
2535 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2536 but not for a (set (pc) (label_ref FOO)). */
2538 static void
2539 update_cfg_for_uncondjump (rtx_insn *insn)
2541 basic_block bb = BLOCK_FOR_INSN (insn);
2542 gcc_assert (BB_END (bb) == insn);
2544 purge_dead_edges (bb);
2546 delete_insn (insn);
2547 if (EDGE_COUNT (bb->succs) == 1)
2549 rtx_insn *insn;
2551 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2553 /* Remove barriers from the footer if there are any. */
2554 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2555 if (BARRIER_P (insn))
2557 if (PREV_INSN (insn))
2558 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2559 else
2560 BB_FOOTER (bb) = NEXT_INSN (insn);
2561 if (NEXT_INSN (insn))
2562 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2564 else if (LABEL_P (insn))
2565 break;
2569 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2570 by an arbitrary number of CLOBBERs. */
2571 static bool
2572 is_parallel_of_n_reg_sets (rtx pat, int n)
2574 if (GET_CODE (pat) != PARALLEL)
2575 return false;
2577 int len = XVECLEN (pat, 0);
2578 if (len < n)
2579 return false;
2581 int i;
2582 for (i = 0; i < n; i++)
2583 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2584 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2585 return false;
2586 for ( ; i < len; i++)
2587 switch (GET_CODE (XVECEXP (pat, 0, i)))
2589 case CLOBBER:
2590 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2591 return false;
2592 break;
2593 default:
2594 return false;
2596 return true;
2599 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2600 CLOBBERs), can be split into individual SETs in that order, without
2601 changing semantics. */
2602 static bool
2603 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2605 if (!insn_nothrow_p (insn))
2606 return false;
2608 rtx pat = PATTERN (insn);
2610 int i, j;
2611 for (i = 0; i < n; i++)
2613 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2614 return false;
2616 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2618 for (j = i + 1; j < n; j++)
2619 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2620 return false;
2623 return true;
2626 /* Return whether X is just a single set, with the source
2627 a general_operand. */
2628 static bool
2629 is_just_move (rtx x)
2631 if (INSN_P (x))
2632 x = PATTERN (x);
2634 return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2637 /* Callback function to count autoincs. */
2639 static int
2640 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2642 (*((int *) arg))++;
2644 return 0;
2647 /* Try to combine the insns I0, I1 and I2 into I3.
2648 Here I0, I1 and I2 appear earlier than I3.
2649 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2652 If we are combining more than two insns and the resulting insn is not
2653 recognized, try splitting it into two insns. If that happens, I2 and I3
2654 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2655 Otherwise, I0, I1 and I2 are pseudo-deleted.
2657 Return 0 if the combination does not work. Then nothing is changed.
2658 If we did the combination, return the insn at which combine should
2659 resume scanning.
2661 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2662 new direct jump instruction.
2664 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2665 been I3 passed to an earlier try_combine within the same basic
2666 block. */
2668 static rtx_insn *
2669 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2670 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2672 /* New patterns for I3 and I2, respectively. */
2673 rtx newpat, newi2pat = 0;
2674 rtvec newpat_vec_with_clobbers = 0;
2675 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2676 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2677 dead. */
2678 int added_sets_0, added_sets_1, added_sets_2;
2679 /* Total number of SETs to put into I3. */
2680 int total_sets;
2681 /* Nonzero if I2's or I1's body now appears in I3. */
2682 int i2_is_used = 0, i1_is_used = 0;
2683 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2684 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2685 /* Contains I3 if the destination of I3 is used in its source, which means
2686 that the old life of I3 is being killed. If that usage is placed into
2687 I2 and not in I3, a REG_DEAD note must be made. */
2688 rtx i3dest_killed = 0;
2689 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2690 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2691 /* Copy of SET_SRC of I1 and I0, if needed. */
2692 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2693 /* Set if I2DEST was reused as a scratch register. */
2694 bool i2scratch = false;
2695 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2696 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2697 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2698 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2699 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2700 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2701 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2702 /* Notes that must be added to REG_NOTES in I3 and I2. */
2703 rtx new_i3_notes, new_i2_notes;
2704 /* Notes that we substituted I3 into I2 instead of the normal case. */
2705 int i3_subst_into_i2 = 0;
2706 /* Notes that I1, I2 or I3 is a MULT operation. */
2707 int have_mult = 0;
2708 int swap_i2i3 = 0;
2709 int split_i2i3 = 0;
2710 int changed_i3_dest = 0;
2711 bool i2_was_move = false, i3_was_move = false;
2712 int n_auto_inc = 0;
2714 int maxreg;
2715 rtx_insn *temp_insn;
2716 rtx temp_expr;
2717 struct insn_link *link;
2718 rtx other_pat = 0;
2719 rtx new_other_notes;
2720 int i;
2721 scalar_int_mode dest_mode, temp_mode;
2723 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2724 never be). */
2725 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2726 return 0;
2728 /* Only try four-insn combinations when there's high likelihood of
2729 success. Look for simple insns, such as loads of constants or
2730 binary operations involving a constant. */
2731 if (i0)
2733 int i;
2734 int ngood = 0;
2735 int nshift = 0;
2736 rtx set0, set3;
2738 if (!flag_expensive_optimizations)
2739 return 0;
2741 for (i = 0; i < 4; i++)
2743 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2744 rtx set = single_set (insn);
2745 rtx src;
2746 if (!set)
2747 continue;
2748 src = SET_SRC (set);
2749 if (CONSTANT_P (src))
2751 ngood += 2;
2752 break;
2754 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2755 ngood++;
2756 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2757 || GET_CODE (src) == LSHIFTRT)
2758 nshift++;
2761 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2762 are likely manipulating its value. Ideally we'll be able to combine
2763 all four insns into a bitfield insertion of some kind.
2765 Note the source in I0 might be inside a sign/zero extension and the
2766 memory modes in I0 and I3 might be different. So extract the address
2767 from the destination of I3 and search for it in the source of I0.
2769 In the event that there's a match but the source/dest do not actually
2770 refer to the same memory, the worst that happens is we try some
2771 combinations that we wouldn't have otherwise. */
2772 if ((set0 = single_set (i0))
2773 /* Ensure the source of SET0 is a MEM, possibly buried inside
2774 an extension. */
2775 && (GET_CODE (SET_SRC (set0)) == MEM
2776 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2777 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2778 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2779 && (set3 = single_set (i3))
2780 /* Ensure the destination of SET3 is a MEM. */
2781 && GET_CODE (SET_DEST (set3)) == MEM
2782 /* Would it be better to extract the base address for the MEM
2783 in SET3 and look for that? I don't have cases where it matters
2784 but I could envision such cases. */
2785 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2786 ngood += 2;
2788 if (ngood < 2 && nshift < 2)
2789 return 0;
2792 /* Exit early if one of the insns involved can't be used for
2793 combinations. */
2794 if (CALL_P (i2)
2795 || (i1 && CALL_P (i1))
2796 || (i0 && CALL_P (i0))
2797 || cant_combine_insn_p (i3)
2798 || cant_combine_insn_p (i2)
2799 || (i1 && cant_combine_insn_p (i1))
2800 || (i0 && cant_combine_insn_p (i0))
2801 || likely_spilled_retval_p (i3))
2802 return 0;
2804 combine_attempts++;
2805 undobuf.other_insn = 0;
2807 /* Reset the hard register usage information. */
2808 CLEAR_HARD_REG_SET (newpat_used_regs);
2810 if (dump_file && (dump_flags & TDF_DETAILS))
2812 if (i0)
2813 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2814 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2815 else if (i1)
2816 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2817 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2818 else
2819 fprintf (dump_file, "\nTrying %d -> %d:\n",
2820 INSN_UID (i2), INSN_UID (i3));
2822 if (i0)
2823 dump_insn_slim (dump_file, i0);
2824 if (i1)
2825 dump_insn_slim (dump_file, i1);
2826 dump_insn_slim (dump_file, i2);
2827 dump_insn_slim (dump_file, i3);
2830 /* If multiple insns feed into one of I2 or I3, they can be in any
2831 order. To simplify the code below, reorder them in sequence. */
2832 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2833 std::swap (i0, i2);
2834 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2835 std::swap (i0, i1);
2836 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2837 std::swap (i1, i2);
2839 added_links_insn = 0;
2840 added_notes_insn = 0;
2842 /* First check for one important special case that the code below will
2843 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2844 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2845 we may be able to replace that destination with the destination of I3.
2846 This occurs in the common code where we compute both a quotient and
2847 remainder into a structure, in which case we want to do the computation
2848 directly into the structure to avoid register-register copies.
2850 Note that this case handles both multiple sets in I2 and also cases
2851 where I2 has a number of CLOBBERs inside the PARALLEL.
2853 We make very conservative checks below and only try to handle the
2854 most common cases of this. For example, we only handle the case
2855 where I2 and I3 are adjacent to avoid making difficult register
2856 usage tests. */
2858 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2859 && REG_P (SET_SRC (PATTERN (i3)))
2860 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2861 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2862 && GET_CODE (PATTERN (i2)) == PARALLEL
2863 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2864 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2865 below would need to check what is inside (and reg_overlap_mentioned_p
2866 doesn't support those codes anyway). Don't allow those destinations;
2867 the resulting insn isn't likely to be recognized anyway. */
2868 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2869 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2870 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2871 SET_DEST (PATTERN (i3)))
2872 && next_active_insn (i2) == i3)
2874 rtx p2 = PATTERN (i2);
2876 /* Make sure that the destination of I3,
2877 which we are going to substitute into one output of I2,
2878 is not used within another output of I2. We must avoid making this:
2879 (parallel [(set (mem (reg 69)) ...)
2880 (set (reg 69) ...)])
2881 which is not well-defined as to order of actions.
2882 (Besides, reload can't handle output reloads for this.)
2884 The problem can also happen if the dest of I3 is a memory ref,
2885 if another dest in I2 is an indirect memory ref.
2887 Neither can this PARALLEL be an asm. We do not allow combining
2888 that usually (see can_combine_p), so do not here either. */
2889 bool ok = true;
2890 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2892 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2893 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2894 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2895 SET_DEST (XVECEXP (p2, 0, i))))
2896 ok = false;
2897 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2898 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2899 ok = false;
2902 if (ok)
2903 for (i = 0; i < XVECLEN (p2, 0); i++)
2904 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2905 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2907 combine_merges++;
2909 subst_insn = i3;
2910 subst_low_luid = DF_INSN_LUID (i2);
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2src = SET_SRC (XVECEXP (p2, 0, i));
2914 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2915 i2dest_killed = dead_or_set_p (i2, i2dest);
2917 /* Replace the dest in I2 with our dest and make the resulting
2918 insn the new pattern for I3. Then skip to where we validate
2919 the pattern. Everything was set up above. */
2920 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2921 newpat = p2;
2922 i3_subst_into_i2 = 1;
2923 goto validate_replacement;
2927 /* If I2 is setting a pseudo to a constant and I3 is setting some
2928 sub-part of it to another constant, merge them by making a new
2929 constant. */
2930 if (i1 == 0
2931 && (temp_expr = single_set (i2)) != 0
2932 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2933 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2934 && GET_CODE (PATTERN (i3)) == SET
2935 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2936 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2938 rtx dest = SET_DEST (PATTERN (i3));
2939 rtx temp_dest = SET_DEST (temp_expr);
2940 int offset = -1;
2941 int width = 0;
2943 if (GET_CODE (dest) == ZERO_EXTRACT)
2945 if (CONST_INT_P (XEXP (dest, 1))
2946 && CONST_INT_P (XEXP (dest, 2))
2947 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2948 &dest_mode))
2950 width = INTVAL (XEXP (dest, 1));
2951 offset = INTVAL (XEXP (dest, 2));
2952 dest = XEXP (dest, 0);
2953 if (BITS_BIG_ENDIAN)
2954 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2957 else
2959 if (GET_CODE (dest) == STRICT_LOW_PART)
2960 dest = XEXP (dest, 0);
2961 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2963 width = GET_MODE_PRECISION (dest_mode);
2964 offset = 0;
2968 if (offset >= 0)
2970 /* If this is the low part, we're done. */
2971 if (subreg_lowpart_p (dest))
2973 /* Handle the case where inner is twice the size of outer. */
2974 else if (GET_MODE_PRECISION (temp_mode)
2975 == 2 * GET_MODE_PRECISION (dest_mode))
2976 offset += GET_MODE_PRECISION (dest_mode);
2977 /* Otherwise give up for now. */
2978 else
2979 offset = -1;
2982 if (offset >= 0)
2984 rtx inner = SET_SRC (PATTERN (i3));
2985 rtx outer = SET_SRC (temp_expr);
2987 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2988 rtx_mode_t (inner, dest_mode),
2989 offset, width);
2991 combine_merges++;
2992 subst_insn = i3;
2993 subst_low_luid = DF_INSN_LUID (i2);
2994 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2995 i2dest = temp_dest;
2996 i2dest_killed = dead_or_set_p (i2, i2dest);
2998 /* Replace the source in I2 with the new constant and make the
2999 resulting insn the new pattern for I3. Then skip to where we
3000 validate the pattern. Everything was set up above. */
3001 SUBST (SET_SRC (temp_expr),
3002 immed_wide_int_const (o, temp_mode));
3004 newpat = PATTERN (i2);
3006 /* The dest of I3 has been replaced with the dest of I2. */
3007 changed_i3_dest = 1;
3008 goto validate_replacement;
3012 /* If we have no I1 and I2 looks like:
3013 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3014 (set Y OP)])
3015 make up a dummy I1 that is
3016 (set Y OP)
3017 and change I2 to be
3018 (set (reg:CC X) (compare:CC Y (const_int 0)))
3020 (We can ignore any trailing CLOBBERs.)
3022 This undoes a previous combination and allows us to match a branch-and-
3023 decrement insn. */
3025 if (!HAVE_cc0 && i1 == 0
3026 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3027 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3028 == MODE_CC)
3029 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3030 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3031 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3032 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3033 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3034 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3036 /* We make I1 with the same INSN_UID as I2. This gives it
3037 the same DF_INSN_LUID for value tracking. Our fake I1 will
3038 never appear in the insn stream so giving it the same INSN_UID
3039 as I2 will not cause a problem. */
3041 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3042 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3043 -1, NULL_RTX);
3044 INSN_UID (i1) = INSN_UID (i2);
3046 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3047 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3048 SET_DEST (PATTERN (i1)));
3049 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3050 SUBST_LINK (LOG_LINKS (i2),
3051 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3054 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3055 make those two SETs separate I1 and I2 insns, and make an I0 that is
3056 the original I1. */
3057 if (!HAVE_cc0 && i0 == 0
3058 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3059 && can_split_parallel_of_n_reg_sets (i2, 2)
3060 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3061 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3062 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3063 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3065 /* If there is no I1, there is no I0 either. */
3066 i0 = i1;
3068 /* We make I1 with the same INSN_UID as I2. This gives it
3069 the same DF_INSN_LUID for value tracking. Our fake I1 will
3070 never appear in the insn stream so giving it the same INSN_UID
3071 as I2 will not cause a problem. */
3073 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3074 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3075 -1, NULL_RTX);
3076 INSN_UID (i1) = INSN_UID (i2);
3078 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3081 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3082 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3084 if (dump_file && (dump_flags & TDF_DETAILS))
3085 fprintf (dump_file, "Can't combine i2 into i3\n");
3086 undo_all ();
3087 return 0;
3089 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3091 if (dump_file && (dump_flags & TDF_DETAILS))
3092 fprintf (dump_file, "Can't combine i1 into i3\n");
3093 undo_all ();
3094 return 0;
3096 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3098 if (dump_file && (dump_flags & TDF_DETAILS))
3099 fprintf (dump_file, "Can't combine i0 into i3\n");
3100 undo_all ();
3101 return 0;
3104 /* Record whether i2 and i3 are trivial moves. */
3105 i2_was_move = is_just_move (i2);
3106 i3_was_move = is_just_move (i3);
3108 /* Record whether I2DEST is used in I2SRC and similarly for the other
3109 cases. Knowing this will help in register status updating below. */
3110 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3111 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3112 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3113 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3114 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3115 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3116 i2dest_killed = dead_or_set_p (i2, i2dest);
3117 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3118 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3120 /* For the earlier insns, determine which of the subsequent ones they
3121 feed. */
3122 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3123 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3124 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3125 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3126 && reg_overlap_mentioned_p (i0dest, i2src))));
3128 /* Ensure that I3's pattern can be the destination of combines. */
3129 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3130 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3131 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3132 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3133 &i3dest_killed))
3135 undo_all ();
3136 return 0;
3139 /* See if any of the insns is a MULT operation. Unless one is, we will
3140 reject a combination that is, since it must be slower. Be conservative
3141 here. */
3142 if (GET_CODE (i2src) == MULT
3143 || (i1 != 0 && GET_CODE (i1src) == MULT)
3144 || (i0 != 0 && GET_CODE (i0src) == MULT)
3145 || (GET_CODE (PATTERN (i3)) == SET
3146 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3147 have_mult = 1;
3149 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3150 We used to do this EXCEPT in one case: I3 has a post-inc in an
3151 output operand. However, that exception can give rise to insns like
3152 mov r3,(r3)+
3153 which is a famous insn on the PDP-11 where the value of r3 used as the
3154 source was model-dependent. Avoid this sort of thing. */
3156 #if 0
3157 if (!(GET_CODE (PATTERN (i3)) == SET
3158 && REG_P (SET_SRC (PATTERN (i3)))
3159 && MEM_P (SET_DEST (PATTERN (i3)))
3160 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3161 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3162 /* It's not the exception. */
3163 #endif
3164 if (AUTO_INC_DEC)
3166 rtx link;
3167 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3168 if (REG_NOTE_KIND (link) == REG_INC
3169 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3170 || (i1 != 0
3171 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3173 undo_all ();
3174 return 0;
3178 /* See if the SETs in I1 or I2 need to be kept around in the merged
3179 instruction: whenever the value set there is still needed past I3.
3180 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3182 For the SET in I1, we have two cases: if I1 and I2 independently feed
3183 into I3, the set in I1 needs to be kept around unless I1DEST dies
3184 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3185 in I1 needs to be kept around unless I1DEST dies or is set in either
3186 I2 or I3. The same considerations apply to I0. */
3188 added_sets_2 = !dead_or_set_p (i3, i2dest);
3190 if (i1)
3191 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3192 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3193 else
3194 added_sets_1 = 0;
3196 if (i0)
3197 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3198 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3199 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3200 && dead_or_set_p (i2, i0dest)));
3201 else
3202 added_sets_0 = 0;
3204 /* We are about to copy insns for the case where they need to be kept
3205 around. Check that they can be copied in the merged instruction. */
3207 if (targetm.cannot_copy_insn_p
3208 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3209 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3210 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3212 undo_all ();
3213 return 0;
3216 /* Count how many auto_inc expressions there were in the original insns;
3217 we need to have the same number in the resulting patterns. */
3219 if (i0)
3220 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3221 if (i1)
3222 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3223 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3224 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3226 /* If the set in I2 needs to be kept around, we must make a copy of
3227 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3228 PATTERN (I2), we are only substituting for the original I1DEST, not into
3229 an already-substituted copy. This also prevents making self-referential
3230 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3231 I2DEST. */
3233 if (added_sets_2)
3235 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3236 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3237 else
3238 i2pat = copy_rtx (PATTERN (i2));
3241 if (added_sets_1)
3243 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3244 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3245 else
3246 i1pat = copy_rtx (PATTERN (i1));
3249 if (added_sets_0)
3251 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3252 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3253 else
3254 i0pat = copy_rtx (PATTERN (i0));
3257 combine_merges++;
3259 /* Substitute in the latest insn for the regs set by the earlier ones. */
3261 maxreg = max_reg_num ();
3263 subst_insn = i3;
3265 /* Many machines that don't use CC0 have insns that can both perform an
3266 arithmetic operation and set the condition code. These operations will
3267 be represented as a PARALLEL with the first element of the vector
3268 being a COMPARE of an arithmetic operation with the constant zero.
3269 The second element of the vector will set some pseudo to the result
3270 of the same arithmetic operation. If we simplify the COMPARE, we won't
3271 match such a pattern and so will generate an extra insn. Here we test
3272 for this case, where both the comparison and the operation result are
3273 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3274 I2SRC. Later we will make the PARALLEL that contains I2. */
3276 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3277 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3278 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3279 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3281 rtx newpat_dest;
3282 rtx *cc_use_loc = NULL;
3283 rtx_insn *cc_use_insn = NULL;
3284 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3285 machine_mode compare_mode, orig_compare_mode;
3286 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3287 scalar_int_mode mode;
3289 newpat = PATTERN (i3);
3290 newpat_dest = SET_DEST (newpat);
3291 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3293 if (undobuf.other_insn == 0
3294 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3295 &cc_use_insn)))
3297 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3298 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3299 compare_code = simplify_compare_const (compare_code, mode,
3300 op0, &op1);
3301 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3304 /* Do the rest only if op1 is const0_rtx, which may be the
3305 result of simplification. */
3306 if (op1 == const0_rtx)
3308 /* If a single use of the CC is found, prepare to modify it
3309 when SELECT_CC_MODE returns a new CC-class mode, or when
3310 the above simplify_compare_const() returned a new comparison
3311 operator. undobuf.other_insn is assigned the CC use insn
3312 when modifying it. */
3313 if (cc_use_loc)
3315 #ifdef SELECT_CC_MODE
3316 machine_mode new_mode
3317 = SELECT_CC_MODE (compare_code, op0, op1);
3318 if (new_mode != orig_compare_mode
3319 && can_change_dest_mode (SET_DEST (newpat),
3320 added_sets_2, new_mode))
3322 unsigned int regno = REGNO (newpat_dest);
3323 compare_mode = new_mode;
3324 if (regno < FIRST_PSEUDO_REGISTER)
3325 newpat_dest = gen_rtx_REG (compare_mode, regno);
3326 else
3328 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3329 newpat_dest = regno_reg_rtx[regno];
3332 #endif
3333 /* Cases for modifying the CC-using comparison. */
3334 if (compare_code != orig_compare_code
3335 /* ??? Do we need to verify the zero rtx? */
3336 && XEXP (*cc_use_loc, 1) == const0_rtx)
3338 /* Replace cc_use_loc with entire new RTX. */
3339 SUBST (*cc_use_loc,
3340 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3341 newpat_dest, const0_rtx));
3342 undobuf.other_insn = cc_use_insn;
3344 else if (compare_mode != orig_compare_mode)
3346 /* Just replace the CC reg with a new mode. */
3347 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3348 undobuf.other_insn = cc_use_insn;
3352 /* Now we modify the current newpat:
3353 First, SET_DEST(newpat) is updated if the CC mode has been
3354 altered. For targets without SELECT_CC_MODE, this should be
3355 optimized away. */
3356 if (compare_mode != orig_compare_mode)
3357 SUBST (SET_DEST (newpat), newpat_dest);
3358 /* This is always done to propagate i2src into newpat. */
3359 SUBST (SET_SRC (newpat),
3360 gen_rtx_COMPARE (compare_mode, op0, op1));
3361 /* Create new version of i2pat if needed; the below PARALLEL
3362 creation needs this to work correctly. */
3363 if (! rtx_equal_p (i2src, op0))
3364 i2pat = gen_rtx_SET (i2dest, op0);
3365 i2_is_used = 1;
3369 if (i2_is_used == 0)
3371 /* It is possible that the source of I2 or I1 may be performing
3372 an unneeded operation, such as a ZERO_EXTEND of something
3373 that is known to have the high part zero. Handle that case
3374 by letting subst look at the inner insns.
3376 Another way to do this would be to have a function that tries
3377 to simplify a single insn instead of merging two or more
3378 insns. We don't do this because of the potential of infinite
3379 loops and because of the potential extra memory required.
3380 However, doing it the way we are is a bit of a kludge and
3381 doesn't catch all cases.
3383 But only do this if -fexpensive-optimizations since it slows
3384 things down and doesn't usually win.
3386 This is not done in the COMPARE case above because the
3387 unmodified I2PAT is used in the PARALLEL and so a pattern
3388 with a modified I2SRC would not match. */
3390 if (flag_expensive_optimizations)
3392 /* Pass pc_rtx so no substitutions are done, just
3393 simplifications. */
3394 if (i1)
3396 subst_low_luid = DF_INSN_LUID (i1);
3397 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3400 subst_low_luid = DF_INSN_LUID (i2);
3401 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3404 n_occurrences = 0; /* `subst' counts here */
3405 subst_low_luid = DF_INSN_LUID (i2);
3407 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3408 copy of I2SRC each time we substitute it, in order to avoid creating
3409 self-referential RTL when we will be substituting I1SRC for I1DEST
3410 later. Likewise if I0 feeds into I2, either directly or indirectly
3411 through I1, and I0DEST is in I0SRC. */
3412 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3413 (i1_feeds_i2_n && i1dest_in_i1src)
3414 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3415 && i0dest_in_i0src));
3416 substed_i2 = 1;
3418 /* Record whether I2's body now appears within I3's body. */
3419 i2_is_used = n_occurrences;
3422 /* If we already got a failure, don't try to do more. Otherwise, try to
3423 substitute I1 if we have it. */
3425 if (i1 && GET_CODE (newpat) != CLOBBER)
3427 /* Before we can do this substitution, we must redo the test done
3428 above (see detailed comments there) that ensures I1DEST isn't
3429 mentioned in any SETs in NEWPAT that are field assignments. */
3430 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3431 0, 0, 0))
3433 undo_all ();
3434 return 0;
3437 n_occurrences = 0;
3438 subst_low_luid = DF_INSN_LUID (i1);
3440 /* If the following substitution will modify I1SRC, make a copy of it
3441 for the case where it is substituted for I1DEST in I2PAT later. */
3442 if (added_sets_2 && i1_feeds_i2_n)
3443 i1src_copy = copy_rtx (i1src);
3445 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3446 copy of I1SRC each time we substitute it, in order to avoid creating
3447 self-referential RTL when we will be substituting I0SRC for I0DEST
3448 later. */
3449 newpat = subst (newpat, i1dest, i1src, 0, 0,
3450 i0_feeds_i1_n && i0dest_in_i0src);
3451 substed_i1 = 1;
3453 /* Record whether I1's body now appears within I3's body. */
3454 i1_is_used = n_occurrences;
3457 /* Likewise for I0 if we have it. */
3459 if (i0 && GET_CODE (newpat) != CLOBBER)
3461 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3462 0, 0, 0))
3464 undo_all ();
3465 return 0;
3468 /* If the following substitution will modify I0SRC, make a copy of it
3469 for the case where it is substituted for I0DEST in I1PAT later. */
3470 if (added_sets_1 && i0_feeds_i1_n)
3471 i0src_copy = copy_rtx (i0src);
3472 /* And a copy for I0DEST in I2PAT substitution. */
3473 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3474 || (i0_feeds_i2_n)))
3475 i0src_copy2 = copy_rtx (i0src);
3477 n_occurrences = 0;
3478 subst_low_luid = DF_INSN_LUID (i0);
3479 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3480 substed_i0 = 1;
3483 if (n_auto_inc)
3485 int new_n_auto_inc = 0;
3486 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3488 if (n_auto_inc != new_n_auto_inc)
3490 if (dump_file && (dump_flags & TDF_DETAILS))
3491 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3492 undo_all ();
3493 return 0;
3497 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3498 to count all the ways that I2SRC and I1SRC can be used. */
3499 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3500 && i2_is_used + added_sets_2 > 1)
3501 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3502 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3503 > 1))
3504 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3505 && (n_occurrences + added_sets_0
3506 + (added_sets_1 && i0_feeds_i1_n)
3507 + (added_sets_2 && i0_feeds_i2_n)
3508 > 1))
3509 /* Fail if we tried to make a new register. */
3510 || max_reg_num () != maxreg
3511 /* Fail if we couldn't do something and have a CLOBBER. */
3512 || GET_CODE (newpat) == CLOBBER
3513 /* Fail if this new pattern is a MULT and we didn't have one before
3514 at the outer level. */
3515 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3516 && ! have_mult))
3518 undo_all ();
3519 return 0;
3522 /* If the actions of the earlier insns must be kept
3523 in addition to substituting them into the latest one,
3524 we must make a new PARALLEL for the latest insn
3525 to hold additional the SETs. */
3527 if (added_sets_0 || added_sets_1 || added_sets_2)
3529 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3530 combine_extras++;
3532 if (GET_CODE (newpat) == PARALLEL)
3534 rtvec old = XVEC (newpat, 0);
3535 total_sets = XVECLEN (newpat, 0) + extra_sets;
3536 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3537 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3538 sizeof (old->elem[0]) * old->num_elem);
3540 else
3542 rtx old = newpat;
3543 total_sets = 1 + extra_sets;
3544 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 XVECEXP (newpat, 0, 0) = old;
3548 if (added_sets_0)
3549 XVECEXP (newpat, 0, --total_sets) = i0pat;
3551 if (added_sets_1)
3553 rtx t = i1pat;
3554 if (i0_feeds_i1_n)
3555 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3557 XVECEXP (newpat, 0, --total_sets) = t;
3559 if (added_sets_2)
3561 rtx t = i2pat;
3562 if (i1_feeds_i2_n)
3563 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3564 i0_feeds_i1_n && i0dest_in_i0src);
3565 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3566 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3568 XVECEXP (newpat, 0, --total_sets) = t;
3572 validate_replacement:
3574 /* Note which hard regs this insn has as inputs. */
3575 mark_used_regs_combine (newpat);
3577 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3578 consider splitting this pattern, we might need these clobbers. */
3579 if (i1 && GET_CODE (newpat) == PARALLEL
3580 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3582 int len = XVECLEN (newpat, 0);
3584 newpat_vec_with_clobbers = rtvec_alloc (len);
3585 for (i = 0; i < len; i++)
3586 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3589 /* We have recognized nothing yet. */
3590 insn_code_number = -1;
3592 /* See if this is a PARALLEL of two SETs where one SET's destination is
3593 a register that is unused and this isn't marked as an instruction that
3594 might trap in an EH region. In that case, we just need the other SET.
3595 We prefer this over the PARALLEL.
3597 This can occur when simplifying a divmod insn. We *must* test for this
3598 case here because the code below that splits two independent SETs doesn't
3599 handle this case correctly when it updates the register status.
3601 It's pointless doing this if we originally had two sets, one from
3602 i3, and one from i2. Combining then splitting the parallel results
3603 in the original i2 again plus an invalid insn (which we delete).
3604 The net effect is only to move instructions around, which makes
3605 debug info less accurate.
3607 If the remaining SET came from I2 its destination should not be used
3608 between I2 and I3. See PR82024. */
3610 if (!(added_sets_2 && i1 == 0)
3611 && is_parallel_of_n_reg_sets (newpat, 2)
3612 && asm_noperands (newpat) < 0)
3614 rtx set0 = XVECEXP (newpat, 0, 0);
3615 rtx set1 = XVECEXP (newpat, 0, 1);
3616 rtx oldpat = newpat;
3618 if (((REG_P (SET_DEST (set1))
3619 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3620 || (GET_CODE (SET_DEST (set1)) == SUBREG
3621 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3622 && insn_nothrow_p (i3)
3623 && !side_effects_p (SET_SRC (set1)))
3625 newpat = set0;
3626 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3629 else if (((REG_P (SET_DEST (set0))
3630 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3631 || (GET_CODE (SET_DEST (set0)) == SUBREG
3632 && find_reg_note (i3, REG_UNUSED,
3633 SUBREG_REG (SET_DEST (set0)))))
3634 && insn_nothrow_p (i3)
3635 && !side_effects_p (SET_SRC (set0)))
3637 rtx dest = SET_DEST (set1);
3638 if (GET_CODE (dest) == SUBREG)
3639 dest = SUBREG_REG (dest);
3640 if (!reg_used_between_p (dest, i2, i3))
3642 newpat = set1;
3643 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3645 if (insn_code_number >= 0)
3646 changed_i3_dest = 1;
3650 if (insn_code_number < 0)
3651 newpat = oldpat;
3654 /* Is the result of combination a valid instruction? */
3655 if (insn_code_number < 0)
3656 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3658 /* If we were combining three insns and the result is a simple SET
3659 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3660 insns. There are two ways to do this. It can be split using a
3661 machine-specific method (like when you have an addition of a large
3662 constant) or by combine in the function find_split_point. */
3664 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3665 && asm_noperands (newpat) < 0)
3667 rtx parallel, *split;
3668 rtx_insn *m_split_insn;
3670 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3671 use I2DEST as a scratch register will help. In the latter case,
3672 convert I2DEST to the mode of the source of NEWPAT if we can. */
3674 m_split_insn = combine_split_insns (newpat, i3);
3676 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3677 inputs of NEWPAT. */
3679 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3680 possible to try that as a scratch reg. This would require adding
3681 more code to make it work though. */
3683 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3685 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3687 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3688 (temporarily, until we are committed to this instruction
3689 combination) does not work: for example, any call to nonzero_bits
3690 on the register (from a splitter in the MD file, for example)
3691 will get the old information, which is invalid.
3693 Since nowadays we can create registers during combine just fine,
3694 we should just create a new one here, not reuse i2dest. */
3696 /* First try to split using the original register as a
3697 scratch register. */
3698 parallel = gen_rtx_PARALLEL (VOIDmode,
3699 gen_rtvec (2, newpat,
3700 gen_rtx_CLOBBER (VOIDmode,
3701 i2dest)));
3702 m_split_insn = combine_split_insns (parallel, i3);
3704 /* If that didn't work, try changing the mode of I2DEST if
3705 we can. */
3706 if (m_split_insn == 0
3707 && new_mode != GET_MODE (i2dest)
3708 && new_mode != VOIDmode
3709 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3711 machine_mode old_mode = GET_MODE (i2dest);
3712 rtx ni2dest;
3714 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3715 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3716 else
3718 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3719 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3722 parallel = (gen_rtx_PARALLEL
3723 (VOIDmode,
3724 gen_rtvec (2, newpat,
3725 gen_rtx_CLOBBER (VOIDmode,
3726 ni2dest))));
3727 m_split_insn = combine_split_insns (parallel, i3);
3729 if (m_split_insn == 0
3730 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3732 struct undo *buf;
3734 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3735 buf = undobuf.undos;
3736 undobuf.undos = buf->next;
3737 buf->next = undobuf.frees;
3738 undobuf.frees = buf;
3742 i2scratch = m_split_insn != 0;
3745 /* If recog_for_combine has discarded clobbers, try to use them
3746 again for the split. */
3747 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3749 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3750 m_split_insn = combine_split_insns (parallel, i3);
3753 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3755 rtx m_split_pat = PATTERN (m_split_insn);
3756 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3757 if (insn_code_number >= 0)
3758 newpat = m_split_pat;
3760 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3761 && (next_nonnote_nondebug_insn (i2) == i3
3762 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3764 rtx i2set, i3set;
3765 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3766 newi2pat = PATTERN (m_split_insn);
3768 i3set = single_set (NEXT_INSN (m_split_insn));
3769 i2set = single_set (m_split_insn);
3771 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3773 /* If I2 or I3 has multiple SETs, we won't know how to track
3774 register status, so don't use these insns. If I2's destination
3775 is used between I2 and I3, we also can't use these insns. */
3777 if (i2_code_number >= 0 && i2set && i3set
3778 && (next_nonnote_nondebug_insn (i2) == i3
3779 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3780 insn_code_number = recog_for_combine (&newi3pat, i3,
3781 &new_i3_notes);
3782 if (insn_code_number >= 0)
3783 newpat = newi3pat;
3785 /* It is possible that both insns now set the destination of I3.
3786 If so, we must show an extra use of it. */
3788 if (insn_code_number >= 0)
3790 rtx new_i3_dest = SET_DEST (i3set);
3791 rtx new_i2_dest = SET_DEST (i2set);
3793 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3794 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3795 || GET_CODE (new_i3_dest) == SUBREG)
3796 new_i3_dest = XEXP (new_i3_dest, 0);
3798 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3799 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3800 || GET_CODE (new_i2_dest) == SUBREG)
3801 new_i2_dest = XEXP (new_i2_dest, 0);
3803 if (REG_P (new_i3_dest)
3804 && REG_P (new_i2_dest)
3805 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3806 && REGNO (new_i2_dest) < reg_n_sets_max)
3807 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3811 /* If we can split it and use I2DEST, go ahead and see if that
3812 helps things be recognized. Verify that none of the registers
3813 are set between I2 and I3. */
3814 if (insn_code_number < 0
3815 && (split = find_split_point (&newpat, i3, false)) != 0
3816 && (!HAVE_cc0 || REG_P (i2dest))
3817 /* We need I2DEST in the proper mode. If it is a hard register
3818 or the only use of a pseudo, we can change its mode.
3819 Make sure we don't change a hard register to have a mode that
3820 isn't valid for it, or change the number of registers. */
3821 && (GET_MODE (*split) == GET_MODE (i2dest)
3822 || GET_MODE (*split) == VOIDmode
3823 || can_change_dest_mode (i2dest, added_sets_2,
3824 GET_MODE (*split)))
3825 && (next_nonnote_nondebug_insn (i2) == i3
3826 || !modified_between_p (*split, i2, i3))
3827 /* We can't overwrite I2DEST if its value is still used by
3828 NEWPAT. */
3829 && ! reg_referenced_p (i2dest, newpat))
3831 rtx newdest = i2dest;
3832 enum rtx_code split_code = GET_CODE (*split);
3833 machine_mode split_mode = GET_MODE (*split);
3834 bool subst_done = false;
3835 newi2pat = NULL_RTX;
3837 i2scratch = true;
3839 /* *SPLIT may be part of I2SRC, so make sure we have the
3840 original expression around for later debug processing.
3841 We should not need I2SRC any more in other cases. */
3842 if (MAY_HAVE_DEBUG_BIND_INSNS)
3843 i2src = copy_rtx (i2src);
3844 else
3845 i2src = NULL;
3847 /* Get NEWDEST as a register in the proper mode. We have already
3848 validated that we can do this. */
3849 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3851 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3852 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3853 else
3855 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3856 newdest = regno_reg_rtx[REGNO (i2dest)];
3860 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3861 an ASHIFT. This can occur if it was inside a PLUS and hence
3862 appeared to be a memory address. This is a kludge. */
3863 if (split_code == MULT
3864 && CONST_INT_P (XEXP (*split, 1))
3865 && INTVAL (XEXP (*split, 1)) > 0
3866 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3868 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3869 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3870 XEXP (*split, 0), i_rtx));
3871 /* Update split_code because we may not have a multiply
3872 anymore. */
3873 split_code = GET_CODE (*split);
3876 /* Similarly for (plus (mult FOO (const_int pow2))). */
3877 if (split_code == PLUS
3878 && GET_CODE (XEXP (*split, 0)) == MULT
3879 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3880 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3881 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3883 rtx nsplit = XEXP (*split, 0);
3884 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3885 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3886 XEXP (nsplit, 0),
3887 i_rtx));
3888 /* Update split_code because we may not have a multiply
3889 anymore. */
3890 split_code = GET_CODE (*split);
3893 #ifdef INSN_SCHEDULING
3894 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3895 be written as a ZERO_EXTEND. */
3896 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3898 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3899 what it really is. */
3900 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3901 == SIGN_EXTEND)
3902 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3903 SUBREG_REG (*split)));
3904 else
3905 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3906 SUBREG_REG (*split)));
3908 #endif
3910 /* Attempt to split binary operators using arithmetic identities. */
3911 if (BINARY_P (SET_SRC (newpat))
3912 && split_mode == GET_MODE (SET_SRC (newpat))
3913 && ! side_effects_p (SET_SRC (newpat)))
3915 rtx setsrc = SET_SRC (newpat);
3916 machine_mode mode = GET_MODE (setsrc);
3917 enum rtx_code code = GET_CODE (setsrc);
3918 rtx src_op0 = XEXP (setsrc, 0);
3919 rtx src_op1 = XEXP (setsrc, 1);
3921 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3922 if (rtx_equal_p (src_op0, src_op1))
3924 newi2pat = gen_rtx_SET (newdest, src_op0);
3925 SUBST (XEXP (setsrc, 0), newdest);
3926 SUBST (XEXP (setsrc, 1), newdest);
3927 subst_done = true;
3929 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3930 else if ((code == PLUS || code == MULT)
3931 && GET_CODE (src_op0) == code
3932 && GET_CODE (XEXP (src_op0, 0)) == code
3933 && (INTEGRAL_MODE_P (mode)
3934 || (FLOAT_MODE_P (mode)
3935 && flag_unsafe_math_optimizations)))
3937 rtx p = XEXP (XEXP (src_op0, 0), 0);
3938 rtx q = XEXP (XEXP (src_op0, 0), 1);
3939 rtx r = XEXP (src_op0, 1);
3940 rtx s = src_op1;
3942 /* Split both "((X op Y) op X) op Y" and
3943 "((X op Y) op Y) op X" as "T op T" where T is
3944 "X op Y". */
3945 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3946 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3948 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3949 SUBST (XEXP (setsrc, 0), newdest);
3950 SUBST (XEXP (setsrc, 1), newdest);
3951 subst_done = true;
3953 /* Split "((X op X) op Y) op Y)" as "T op T" where
3954 T is "X op Y". */
3955 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3957 rtx tmp = simplify_gen_binary (code, mode, p, r);
3958 newi2pat = gen_rtx_SET (newdest, tmp);
3959 SUBST (XEXP (setsrc, 0), newdest);
3960 SUBST (XEXP (setsrc, 1), newdest);
3961 subst_done = true;
3966 if (!subst_done)
3968 newi2pat = gen_rtx_SET (newdest, *split);
3969 SUBST (*split, newdest);
3972 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3974 /* recog_for_combine might have added CLOBBERs to newi2pat.
3975 Make sure NEWPAT does not depend on the clobbered regs. */
3976 if (GET_CODE (newi2pat) == PARALLEL)
3977 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3978 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3980 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3981 if (reg_overlap_mentioned_p (reg, newpat))
3983 undo_all ();
3984 return 0;
3988 /* If the split point was a MULT and we didn't have one before,
3989 don't use one now. */
3990 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3991 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3995 /* Check for a case where we loaded from memory in a narrow mode and
3996 then sign extended it, but we need both registers. In that case,
3997 we have a PARALLEL with both loads from the same memory location.
3998 We can split this into a load from memory followed by a register-register
3999 copy. This saves at least one insn, more if register allocation can
4000 eliminate the copy.
4002 We cannot do this if the destination of the first assignment is a
4003 condition code register or cc0. We eliminate this case by making sure
4004 the SET_DEST and SET_SRC have the same mode.
4006 We cannot do this if the destination of the second assignment is
4007 a register that we have already assumed is zero-extended. Similarly
4008 for a SUBREG of such a register. */
4010 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4011 && GET_CODE (newpat) == PARALLEL
4012 && XVECLEN (newpat, 0) == 2
4013 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4014 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4015 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4016 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4017 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4018 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4019 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4020 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4021 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4022 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4023 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4024 (REG_P (temp_expr)
4025 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4026 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4027 BITS_PER_WORD)
4028 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4029 HOST_BITS_PER_INT)
4030 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4031 != GET_MODE_MASK (word_mode))))
4032 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4033 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4034 (REG_P (temp_expr)
4035 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4036 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 BITS_PER_WORD)
4038 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4039 HOST_BITS_PER_INT)
4040 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4041 != GET_MODE_MASK (word_mode)))))
4042 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4043 SET_SRC (XVECEXP (newpat, 0, 1)))
4044 && ! find_reg_note (i3, REG_UNUSED,
4045 SET_DEST (XVECEXP (newpat, 0, 0))))
4047 rtx ni2dest;
4049 newi2pat = XVECEXP (newpat, 0, 0);
4050 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4051 newpat = XVECEXP (newpat, 0, 1);
4052 SUBST (SET_SRC (newpat),
4053 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4054 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4056 if (i2_code_number >= 0)
4057 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4059 if (insn_code_number >= 0)
4060 swap_i2i3 = 1;
4063 /* Similarly, check for a case where we have a PARALLEL of two independent
4064 SETs but we started with three insns. In this case, we can do the sets
4065 as two separate insns. This case occurs when some SET allows two
4066 other insns to combine, but the destination of that SET is still live.
4068 Also do this if we started with two insns and (at least) one of the
4069 resulting sets is a noop; this noop will be deleted later.
4071 Also do this if we started with two insns neither of which was a simple
4072 move. */
4074 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4075 && GET_CODE (newpat) == PARALLEL
4076 && XVECLEN (newpat, 0) == 2
4077 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4078 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4079 && (i1
4080 || set_noop_p (XVECEXP (newpat, 0, 0))
4081 || set_noop_p (XVECEXP (newpat, 0, 1))
4082 || (!i2_was_move && !i3_was_move))
4083 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4084 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4085 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4086 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4087 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4088 XVECEXP (newpat, 0, 0))
4089 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4090 XVECEXP (newpat, 0, 1))
4091 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4092 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4094 rtx set0 = XVECEXP (newpat, 0, 0);
4095 rtx set1 = XVECEXP (newpat, 0, 1);
4097 /* Normally, it doesn't matter which of the two is done first,
4098 but the one that references cc0 can't be the second, and
4099 one which uses any regs/memory set in between i2 and i3 can't
4100 be first. The PARALLEL might also have been pre-existing in i3,
4101 so we need to make sure that we won't wrongly hoist a SET to i2
4102 that would conflict with a death note present in there, or would
4103 have its dest modified between i2 and i3. */
4104 if (!modified_between_p (SET_SRC (set1), i2, i3)
4105 && !(REG_P (SET_DEST (set1))
4106 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4107 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4108 && find_reg_note (i2, REG_DEAD,
4109 SUBREG_REG (SET_DEST (set1))))
4110 && !modified_between_p (SET_DEST (set1), i2, i3)
4111 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4112 /* If I3 is a jump, ensure that set0 is a jump so that
4113 we do not create invalid RTL. */
4114 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4117 newi2pat = set1;
4118 newpat = set0;
4120 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4121 && !(REG_P (SET_DEST (set0))
4122 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4123 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4124 && find_reg_note (i2, REG_DEAD,
4125 SUBREG_REG (SET_DEST (set0))))
4126 && !modified_between_p (SET_DEST (set0), i2, i3)
4127 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4128 /* If I3 is a jump, ensure that set1 is a jump so that
4129 we do not create invalid RTL. */
4130 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4133 newi2pat = set0;
4134 newpat = set1;
4136 else
4138 undo_all ();
4139 return 0;
4142 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4144 if (i2_code_number >= 0)
4146 /* recog_for_combine might have added CLOBBERs to newi2pat.
4147 Make sure NEWPAT does not depend on the clobbered regs. */
4148 if (GET_CODE (newi2pat) == PARALLEL)
4150 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4151 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4153 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4154 if (reg_overlap_mentioned_p (reg, newpat))
4156 undo_all ();
4157 return 0;
4162 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4164 if (insn_code_number >= 0)
4165 split_i2i3 = 1;
4169 /* If it still isn't recognized, fail and change things back the way they
4170 were. */
4171 if ((insn_code_number < 0
4172 /* Is the result a reasonable ASM_OPERANDS? */
4173 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4175 undo_all ();
4176 return 0;
4179 /* If we had to change another insn, make sure it is valid also. */
4180 if (undobuf.other_insn)
4182 CLEAR_HARD_REG_SET (newpat_used_regs);
4184 other_pat = PATTERN (undobuf.other_insn);
4185 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4186 &new_other_notes);
4188 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4190 undo_all ();
4191 return 0;
4195 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4196 they are adjacent to each other or not. */
4197 if (HAVE_cc0)
4199 rtx_insn *p = prev_nonnote_insn (i3);
4200 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4201 && sets_cc0_p (newi2pat))
4203 undo_all ();
4204 return 0;
4208 /* Only allow this combination if insn_cost reports that the
4209 replacement instructions are cheaper than the originals. */
4210 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4212 undo_all ();
4213 return 0;
4216 if (MAY_HAVE_DEBUG_BIND_INSNS)
4218 struct undo *undo;
4220 for (undo = undobuf.undos; undo; undo = undo->next)
4221 if (undo->kind == UNDO_MODE)
4223 rtx reg = *undo->where.r;
4224 machine_mode new_mode = GET_MODE (reg);
4225 machine_mode old_mode = undo->old_contents.m;
4227 /* Temporarily revert mode back. */
4228 adjust_reg_mode (reg, old_mode);
4230 if (reg == i2dest && i2scratch)
4232 /* If we used i2dest as a scratch register with a
4233 different mode, substitute it for the original
4234 i2src while its original mode is temporarily
4235 restored, and then clear i2scratch so that we don't
4236 do it again later. */
4237 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4238 this_basic_block);
4239 i2scratch = false;
4240 /* Put back the new mode. */
4241 adjust_reg_mode (reg, new_mode);
4243 else
4245 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4246 rtx_insn *first, *last;
4248 if (reg == i2dest)
4250 first = i2;
4251 last = last_combined_insn;
4253 else
4255 first = i3;
4256 last = undobuf.other_insn;
4257 gcc_assert (last);
4258 if (DF_INSN_LUID (last)
4259 < DF_INSN_LUID (last_combined_insn))
4260 last = last_combined_insn;
4263 /* We're dealing with a reg that changed mode but not
4264 meaning, so we want to turn it into a subreg for
4265 the new mode. However, because of REG sharing and
4266 because its mode had already changed, we have to do
4267 it in two steps. First, replace any debug uses of
4268 reg, with its original mode temporarily restored,
4269 with this copy we have created; then, replace the
4270 copy with the SUBREG of the original shared reg,
4271 once again changed to the new mode. */
4272 propagate_for_debug (first, last, reg, tempreg,
4273 this_basic_block);
4274 adjust_reg_mode (reg, new_mode);
4275 propagate_for_debug (first, last, tempreg,
4276 lowpart_subreg (old_mode, reg, new_mode),
4277 this_basic_block);
4282 /* If we will be able to accept this, we have made a
4283 change to the destination of I3. This requires us to
4284 do a few adjustments. */
4286 if (changed_i3_dest)
4288 PATTERN (i3) = newpat;
4289 adjust_for_new_dest (i3);
4292 /* We now know that we can do this combination. Merge the insns and
4293 update the status of registers and LOG_LINKS. */
4295 if (undobuf.other_insn)
4297 rtx note, next;
4299 PATTERN (undobuf.other_insn) = other_pat;
4301 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4302 ensure that they are still valid. Then add any non-duplicate
4303 notes added by recog_for_combine. */
4304 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4306 next = XEXP (note, 1);
4308 if ((REG_NOTE_KIND (note) == REG_DEAD
4309 && !reg_referenced_p (XEXP (note, 0),
4310 PATTERN (undobuf.other_insn)))
4311 ||(REG_NOTE_KIND (note) == REG_UNUSED
4312 && !reg_set_p (XEXP (note, 0),
4313 PATTERN (undobuf.other_insn)))
4314 /* Simply drop equal note since it may be no longer valid
4315 for other_insn. It may be possible to record that CC
4316 register is changed and only discard those notes, but
4317 in practice it's unnecessary complication and doesn't
4318 give any meaningful improvement.
4320 See PR78559. */
4321 || REG_NOTE_KIND (note) == REG_EQUAL
4322 || REG_NOTE_KIND (note) == REG_EQUIV)
4323 remove_note (undobuf.other_insn, note);
4326 distribute_notes (new_other_notes, undobuf.other_insn,
4327 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4328 NULL_RTX);
4331 if (swap_i2i3)
4333 /* I3 now uses what used to be its destination and which is now
4334 I2's destination. This requires us to do a few adjustments. */
4335 PATTERN (i3) = newpat;
4336 adjust_for_new_dest (i3);
4339 if (swap_i2i3 || split_i2i3)
4341 /* We might need a LOG_LINK from I3 to I2. But then we used to
4342 have one, so we still will.
4344 However, some later insn might be using I2's dest and have
4345 a LOG_LINK pointing at I3. We should change it to point at
4346 I2 instead. */
4348 /* newi2pat is usually a SET here; however, recog_for_combine might
4349 have added some clobbers. */
4350 rtx x = newi2pat;
4351 if (GET_CODE (x) == PARALLEL)
4352 x = XVECEXP (newi2pat, 0, 0);
4354 if (REG_P (SET_DEST (x))
4355 || (GET_CODE (SET_DEST (x)) == SUBREG
4356 && REG_P (SUBREG_REG (SET_DEST (x)))))
4358 unsigned int regno = reg_or_subregno (SET_DEST (x));
4360 bool done = false;
4361 for (rtx_insn *insn = NEXT_INSN (i3);
4362 !done
4363 && insn
4364 && NONDEBUG_INSN_P (insn)
4365 && BLOCK_FOR_INSN (insn) == this_basic_block;
4366 insn = NEXT_INSN (insn))
4368 struct insn_link *link;
4369 FOR_EACH_LOG_LINK (link, insn)
4370 if (link->insn == i3 && link->regno == regno)
4372 link->insn = i2;
4373 done = true;
4374 break;
4381 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4382 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4383 rtx midnotes = 0;
4384 int from_luid;
4385 /* Compute which registers we expect to eliminate. newi2pat may be setting
4386 either i3dest or i2dest, so we must check it. */
4387 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4388 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4389 || !i2dest_killed
4390 ? 0 : i2dest);
4391 /* For i1, we need to compute both local elimination and global
4392 elimination information with respect to newi2pat because i1dest
4393 may be the same as i3dest, in which case newi2pat may be setting
4394 i1dest. Global information is used when distributing REG_DEAD
4395 note for i2 and i3, in which case it does matter if newi2pat sets
4396 i1dest or not.
4398 Local information is used when distributing REG_DEAD note for i1,
4399 in which case it doesn't matter if newi2pat sets i1dest or not.
4400 See PR62151, if we have four insns combination:
4401 i0: r0 <- i0src
4402 i1: r1 <- i1src (using r0)
4403 REG_DEAD (r0)
4404 i2: r0 <- i2src (using r1)
4405 i3: r3 <- i3src (using r0)
4406 ix: using r0
4407 From i1's point of view, r0 is eliminated, no matter if it is set
4408 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4409 should be discarded.
4411 Note local information only affects cases in forms like "I1->I2->I3",
4412 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4413 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4414 i0dest anyway. */
4415 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4416 || !i1dest_killed
4417 ? 0 : i1dest);
4418 rtx elim_i1 = (local_elim_i1 == 0
4419 || (newi2pat && reg_set_p (i1dest, newi2pat))
4420 ? 0 : i1dest);
4421 /* Same case as i1. */
4422 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4423 ? 0 : i0dest);
4424 rtx elim_i0 = (local_elim_i0 == 0
4425 || (newi2pat && reg_set_p (i0dest, newi2pat))
4426 ? 0 : i0dest);
4428 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4429 clear them. */
4430 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4431 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4432 if (i1)
4433 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4434 if (i0)
4435 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4437 /* Ensure that we do not have something that should not be shared but
4438 occurs multiple times in the new insns. Check this by first
4439 resetting all the `used' flags and then copying anything is shared. */
4441 reset_used_flags (i3notes);
4442 reset_used_flags (i2notes);
4443 reset_used_flags (i1notes);
4444 reset_used_flags (i0notes);
4445 reset_used_flags (newpat);
4446 reset_used_flags (newi2pat);
4447 if (undobuf.other_insn)
4448 reset_used_flags (PATTERN (undobuf.other_insn));
4450 i3notes = copy_rtx_if_shared (i3notes);
4451 i2notes = copy_rtx_if_shared (i2notes);
4452 i1notes = copy_rtx_if_shared (i1notes);
4453 i0notes = copy_rtx_if_shared (i0notes);
4454 newpat = copy_rtx_if_shared (newpat);
4455 newi2pat = copy_rtx_if_shared (newi2pat);
4456 if (undobuf.other_insn)
4457 reset_used_flags (PATTERN (undobuf.other_insn));
4459 INSN_CODE (i3) = insn_code_number;
4460 PATTERN (i3) = newpat;
4462 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4464 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4465 link = XEXP (link, 1))
4467 if (substed_i2)
4469 /* I2SRC must still be meaningful at this point. Some
4470 splitting operations can invalidate I2SRC, but those
4471 operations do not apply to calls. */
4472 gcc_assert (i2src);
4473 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4474 i2dest, i2src);
4476 if (substed_i1)
4477 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4478 i1dest, i1src);
4479 if (substed_i0)
4480 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4481 i0dest, i0src);
4485 if (undobuf.other_insn)
4486 INSN_CODE (undobuf.other_insn) = other_code_number;
4488 /* We had one special case above where I2 had more than one set and
4489 we replaced a destination of one of those sets with the destination
4490 of I3. In that case, we have to update LOG_LINKS of insns later
4491 in this basic block. Note that this (expensive) case is rare.
4493 Also, in this case, we must pretend that all REG_NOTEs for I2
4494 actually came from I3, so that REG_UNUSED notes from I2 will be
4495 properly handled. */
4497 if (i3_subst_into_i2)
4499 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4500 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4501 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4502 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4503 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4504 && ! find_reg_note (i2, REG_UNUSED,
4505 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4506 for (temp_insn = NEXT_INSN (i2);
4507 temp_insn
4508 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4509 || BB_HEAD (this_basic_block) != temp_insn);
4510 temp_insn = NEXT_INSN (temp_insn))
4511 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4512 FOR_EACH_LOG_LINK (link, temp_insn)
4513 if (link->insn == i2)
4514 link->insn = i3;
4516 if (i3notes)
4518 rtx link = i3notes;
4519 while (XEXP (link, 1))
4520 link = XEXP (link, 1);
4521 XEXP (link, 1) = i2notes;
4523 else
4524 i3notes = i2notes;
4525 i2notes = 0;
4528 LOG_LINKS (i3) = NULL;
4529 REG_NOTES (i3) = 0;
4530 LOG_LINKS (i2) = NULL;
4531 REG_NOTES (i2) = 0;
4533 if (newi2pat)
4535 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4536 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4537 this_basic_block);
4538 INSN_CODE (i2) = i2_code_number;
4539 PATTERN (i2) = newi2pat;
4541 else
4543 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4544 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4545 this_basic_block);
4546 SET_INSN_DELETED (i2);
4549 if (i1)
4551 LOG_LINKS (i1) = NULL;
4552 REG_NOTES (i1) = 0;
4553 if (MAY_HAVE_DEBUG_BIND_INSNS)
4554 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4555 this_basic_block);
4556 SET_INSN_DELETED (i1);
4559 if (i0)
4561 LOG_LINKS (i0) = NULL;
4562 REG_NOTES (i0) = 0;
4563 if (MAY_HAVE_DEBUG_BIND_INSNS)
4564 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4565 this_basic_block);
4566 SET_INSN_DELETED (i0);
4569 /* Get death notes for everything that is now used in either I3 or
4570 I2 and used to die in a previous insn. If we built two new
4571 patterns, move from I1 to I2 then I2 to I3 so that we get the
4572 proper movement on registers that I2 modifies. */
4574 if (i0)
4575 from_luid = DF_INSN_LUID (i0);
4576 else if (i1)
4577 from_luid = DF_INSN_LUID (i1);
4578 else
4579 from_luid = DF_INSN_LUID (i2);
4580 if (newi2pat)
4581 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4582 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4584 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4585 if (i3notes)
4586 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4587 elim_i2, elim_i1, elim_i0);
4588 if (i2notes)
4589 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4590 elim_i2, elim_i1, elim_i0);
4591 if (i1notes)
4592 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4593 elim_i2, local_elim_i1, local_elim_i0);
4594 if (i0notes)
4595 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4596 elim_i2, elim_i1, local_elim_i0);
4597 if (midnotes)
4598 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4599 elim_i2, elim_i1, elim_i0);
4601 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4602 know these are REG_UNUSED and want them to go to the desired insn,
4603 so we always pass it as i3. */
4605 if (newi2pat && new_i2_notes)
4606 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4607 NULL_RTX);
4609 if (new_i3_notes)
4610 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4611 NULL_RTX);
4613 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4614 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4615 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4616 in that case, it might delete I2. Similarly for I2 and I1.
4617 Show an additional death due to the REG_DEAD note we make here. If
4618 we discard it in distribute_notes, we will decrement it again. */
4620 if (i3dest_killed)
4622 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4623 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4624 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4625 elim_i1, elim_i0);
4626 else
4627 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4628 elim_i2, elim_i1, elim_i0);
4631 if (i2dest_in_i2src)
4633 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4634 if (newi2pat && reg_set_p (i2dest, newi2pat))
4635 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4636 NULL_RTX, NULL_RTX);
4637 else
4638 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4639 NULL_RTX, NULL_RTX, NULL_RTX);
4642 if (i1dest_in_i1src)
4644 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4645 if (newi2pat && reg_set_p (i1dest, newi2pat))
4646 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4647 NULL_RTX, NULL_RTX);
4648 else
4649 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4650 NULL_RTX, NULL_RTX, NULL_RTX);
4653 if (i0dest_in_i0src)
4655 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4656 if (newi2pat && reg_set_p (i0dest, newi2pat))
4657 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4658 NULL_RTX, NULL_RTX);
4659 else
4660 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4661 NULL_RTX, NULL_RTX, NULL_RTX);
4664 distribute_links (i3links);
4665 distribute_links (i2links);
4666 distribute_links (i1links);
4667 distribute_links (i0links);
4669 if (REG_P (i2dest))
4671 struct insn_link *link;
4672 rtx_insn *i2_insn = 0;
4673 rtx i2_val = 0, set;
4675 /* The insn that used to set this register doesn't exist, and
4676 this life of the register may not exist either. See if one of
4677 I3's links points to an insn that sets I2DEST. If it does,
4678 that is now the last known value for I2DEST. If we don't update
4679 this and I2 set the register to a value that depended on its old
4680 contents, we will get confused. If this insn is used, thing
4681 will be set correctly in combine_instructions. */
4682 FOR_EACH_LOG_LINK (link, i3)
4683 if ((set = single_set (link->insn)) != 0
4684 && rtx_equal_p (i2dest, SET_DEST (set)))
4685 i2_insn = link->insn, i2_val = SET_SRC (set);
4687 record_value_for_reg (i2dest, i2_insn, i2_val);
4689 /* If the reg formerly set in I2 died only once and that was in I3,
4690 zero its use count so it won't make `reload' do any work. */
4691 if (! added_sets_2
4692 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4693 && ! i2dest_in_i2src
4694 && REGNO (i2dest) < reg_n_sets_max)
4695 INC_REG_N_SETS (REGNO (i2dest), -1);
4698 if (i1 && REG_P (i1dest))
4700 struct insn_link *link;
4701 rtx_insn *i1_insn = 0;
4702 rtx i1_val = 0, set;
4704 FOR_EACH_LOG_LINK (link, i3)
4705 if ((set = single_set (link->insn)) != 0
4706 && rtx_equal_p (i1dest, SET_DEST (set)))
4707 i1_insn = link->insn, i1_val = SET_SRC (set);
4709 record_value_for_reg (i1dest, i1_insn, i1_val);
4711 if (! added_sets_1
4712 && ! i1dest_in_i1src
4713 && REGNO (i1dest) < reg_n_sets_max)
4714 INC_REG_N_SETS (REGNO (i1dest), -1);
4717 if (i0 && REG_P (i0dest))
4719 struct insn_link *link;
4720 rtx_insn *i0_insn = 0;
4721 rtx i0_val = 0, set;
4723 FOR_EACH_LOG_LINK (link, i3)
4724 if ((set = single_set (link->insn)) != 0
4725 && rtx_equal_p (i0dest, SET_DEST (set)))
4726 i0_insn = link->insn, i0_val = SET_SRC (set);
4728 record_value_for_reg (i0dest, i0_insn, i0_val);
4730 if (! added_sets_0
4731 && ! i0dest_in_i0src
4732 && REGNO (i0dest) < reg_n_sets_max)
4733 INC_REG_N_SETS (REGNO (i0dest), -1);
4736 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4737 been made to this insn. The order is important, because newi2pat
4738 can affect nonzero_bits of newpat. */
4739 if (newi2pat)
4740 note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4741 note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4744 if (undobuf.other_insn != NULL_RTX)
4746 if (dump_file)
4748 fprintf (dump_file, "modifying other_insn ");
4749 dump_insn_slim (dump_file, undobuf.other_insn);
4751 df_insn_rescan (undobuf.other_insn);
4754 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4756 if (dump_file)
4758 fprintf (dump_file, "modifying insn i0 ");
4759 dump_insn_slim (dump_file, i0);
4761 df_insn_rescan (i0);
4764 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4766 if (dump_file)
4768 fprintf (dump_file, "modifying insn i1 ");
4769 dump_insn_slim (dump_file, i1);
4771 df_insn_rescan (i1);
4774 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4776 if (dump_file)
4778 fprintf (dump_file, "modifying insn i2 ");
4779 dump_insn_slim (dump_file, i2);
4781 df_insn_rescan (i2);
4784 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4786 if (dump_file)
4788 fprintf (dump_file, "modifying insn i3 ");
4789 dump_insn_slim (dump_file, i3);
4791 df_insn_rescan (i3);
4794 /* Set new_direct_jump_p if a new return or simple jump instruction
4795 has been created. Adjust the CFG accordingly. */
4796 if (returnjump_p (i3) || any_uncondjump_p (i3))
4798 *new_direct_jump_p = 1;
4799 mark_jump_label (PATTERN (i3), i3, 0);
4800 update_cfg_for_uncondjump (i3);
4803 if (undobuf.other_insn != NULL_RTX
4804 && (returnjump_p (undobuf.other_insn)
4805 || any_uncondjump_p (undobuf.other_insn)))
4807 *new_direct_jump_p = 1;
4808 update_cfg_for_uncondjump (undobuf.other_insn);
4811 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4812 && XEXP (PATTERN (i3), 0) == const1_rtx)
4814 basic_block bb = BLOCK_FOR_INSN (i3);
4815 gcc_assert (bb);
4816 remove_edge (split_block (bb, i3));
4817 emit_barrier_after_bb (bb);
4818 *new_direct_jump_p = 1;
4821 if (undobuf.other_insn
4822 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4823 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4825 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4826 gcc_assert (bb);
4827 remove_edge (split_block (bb, undobuf.other_insn));
4828 emit_barrier_after_bb (bb);
4829 *new_direct_jump_p = 1;
4832 /* A noop might also need cleaning up of CFG, if it comes from the
4833 simplification of a jump. */
4834 if (JUMP_P (i3)
4835 && GET_CODE (newpat) == SET
4836 && SET_SRC (newpat) == pc_rtx
4837 && SET_DEST (newpat) == pc_rtx)
4839 *new_direct_jump_p = 1;
4840 update_cfg_for_uncondjump (i3);
4843 if (undobuf.other_insn != NULL_RTX
4844 && JUMP_P (undobuf.other_insn)
4845 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4846 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4847 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4849 *new_direct_jump_p = 1;
4850 update_cfg_for_uncondjump (undobuf.other_insn);
4853 combine_successes++;
4854 undo_commit ();
4856 rtx_insn *ret = newi2pat ? i2 : i3;
4857 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4858 ret = added_links_insn;
4859 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4860 ret = added_notes_insn;
4862 return ret;
4865 /* Get a marker for undoing to the current state. */
4867 static void *
4868 get_undo_marker (void)
4870 return undobuf.undos;
4873 /* Undo the modifications up to the marker. */
4875 static void
4876 undo_to_marker (void *marker)
4878 struct undo *undo, *next;
4880 for (undo = undobuf.undos; undo != marker; undo = next)
4882 gcc_assert (undo);
4884 next = undo->next;
4885 switch (undo->kind)
4887 case UNDO_RTX:
4888 *undo->where.r = undo->old_contents.r;
4889 break;
4890 case UNDO_INT:
4891 *undo->where.i = undo->old_contents.i;
4892 break;
4893 case UNDO_MODE:
4894 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4895 break;
4896 case UNDO_LINKS:
4897 *undo->where.l = undo->old_contents.l;
4898 break;
4899 default:
4900 gcc_unreachable ();
4903 undo->next = undobuf.frees;
4904 undobuf.frees = undo;
4907 undobuf.undos = (struct undo *) marker;
4910 /* Undo all the modifications recorded in undobuf. */
4912 static void
4913 undo_all (void)
4915 undo_to_marker (0);
4918 /* We've committed to accepting the changes we made. Move all
4919 of the undos to the free list. */
4921 static void
4922 undo_commit (void)
4924 struct undo *undo, *next;
4926 for (undo = undobuf.undos; undo; undo = next)
4928 next = undo->next;
4929 undo->next = undobuf.frees;
4930 undobuf.frees = undo;
4932 undobuf.undos = 0;
4935 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4936 where we have an arithmetic expression and return that point. LOC will
4937 be inside INSN.
4939 try_combine will call this function to see if an insn can be split into
4940 two insns. */
4942 static rtx *
4943 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4945 rtx x = *loc;
4946 enum rtx_code code = GET_CODE (x);
4947 rtx *split;
4948 unsigned HOST_WIDE_INT len = 0;
4949 HOST_WIDE_INT pos = 0;
4950 int unsignedp = 0;
4951 rtx inner = NULL_RTX;
4952 scalar_int_mode mode, inner_mode;
4954 /* First special-case some codes. */
4955 switch (code)
4957 case SUBREG:
4958 #ifdef INSN_SCHEDULING
4959 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4960 point. */
4961 if (MEM_P (SUBREG_REG (x)))
4962 return loc;
4963 #endif
4964 return find_split_point (&SUBREG_REG (x), insn, false);
4966 case MEM:
4967 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4968 using LO_SUM and HIGH. */
4969 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4970 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4972 machine_mode address_mode = get_address_mode (x);
4974 SUBST (XEXP (x, 0),
4975 gen_rtx_LO_SUM (address_mode,
4976 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4977 XEXP (x, 0)));
4978 return &XEXP (XEXP (x, 0), 0);
4981 /* If we have a PLUS whose second operand is a constant and the
4982 address is not valid, perhaps we can split it up using
4983 the machine-specific way to split large constants. We use
4984 the first pseudo-reg (one of the virtual regs) as a placeholder;
4985 it will not remain in the result. */
4986 if (GET_CODE (XEXP (x, 0)) == PLUS
4987 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4988 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4989 MEM_ADDR_SPACE (x)))
4991 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4992 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4993 subst_insn);
4995 /* This should have produced two insns, each of which sets our
4996 placeholder. If the source of the second is a valid address,
4997 we can put both sources together and make a split point
4998 in the middle. */
5000 if (seq
5001 && NEXT_INSN (seq) != NULL_RTX
5002 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
5003 && NONJUMP_INSN_P (seq)
5004 && GET_CODE (PATTERN (seq)) == SET
5005 && SET_DEST (PATTERN (seq)) == reg
5006 && ! reg_mentioned_p (reg,
5007 SET_SRC (PATTERN (seq)))
5008 && NONJUMP_INSN_P (NEXT_INSN (seq))
5009 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5010 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5011 && memory_address_addr_space_p
5012 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5013 MEM_ADDR_SPACE (x)))
5015 rtx src1 = SET_SRC (PATTERN (seq));
5016 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5018 /* Replace the placeholder in SRC2 with SRC1. If we can
5019 find where in SRC2 it was placed, that can become our
5020 split point and we can replace this address with SRC2.
5021 Just try two obvious places. */
5023 src2 = replace_rtx (src2, reg, src1);
5024 split = 0;
5025 if (XEXP (src2, 0) == src1)
5026 split = &XEXP (src2, 0);
5027 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5028 && XEXP (XEXP (src2, 0), 0) == src1)
5029 split = &XEXP (XEXP (src2, 0), 0);
5031 if (split)
5033 SUBST (XEXP (x, 0), src2);
5034 return split;
5038 /* If that didn't work and we have a nested plus, like:
5039 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5040 is valid address, try to split (REG1 * CONST1). */
5041 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5042 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5043 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5044 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5045 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5046 0), 0)))))
5048 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5049 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5050 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5051 MEM_ADDR_SPACE (x)))
5053 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5054 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5056 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5058 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5059 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5060 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5061 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5062 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5063 0), 1)))))
5065 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5066 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5067 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5068 MEM_ADDR_SPACE (x)))
5070 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5071 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5073 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5076 /* If that didn't work, perhaps the first operand is complex and
5077 needs to be computed separately, so make a split point there.
5078 This will occur on machines that just support REG + CONST
5079 and have a constant moved through some previous computation. */
5080 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5081 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5082 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5083 return &XEXP (XEXP (x, 0), 0);
5086 /* If we have a PLUS whose first operand is complex, try computing it
5087 separately by making a split there. */
5088 if (GET_CODE (XEXP (x, 0)) == PLUS
5089 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5090 MEM_ADDR_SPACE (x))
5091 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5092 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5093 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5094 return &XEXP (XEXP (x, 0), 0);
5095 break;
5097 case SET:
5098 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5099 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5100 we need to put the operand into a register. So split at that
5101 point. */
5103 if (SET_DEST (x) == cc0_rtx
5104 && GET_CODE (SET_SRC (x)) != COMPARE
5105 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5106 && !OBJECT_P (SET_SRC (x))
5107 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5108 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5109 return &SET_SRC (x);
5111 /* See if we can split SET_SRC as it stands. */
5112 split = find_split_point (&SET_SRC (x), insn, true);
5113 if (split && split != &SET_SRC (x))
5114 return split;
5116 /* See if we can split SET_DEST as it stands. */
5117 split = find_split_point (&SET_DEST (x), insn, false);
5118 if (split && split != &SET_DEST (x))
5119 return split;
5121 /* See if this is a bitfield assignment with everything constant. If
5122 so, this is an IOR of an AND, so split it into that. */
5123 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5124 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5125 &inner_mode)
5126 && HWI_COMPUTABLE_MODE_P (inner_mode)
5127 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5128 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5129 && CONST_INT_P (SET_SRC (x))
5130 && ((INTVAL (XEXP (SET_DEST (x), 1))
5131 + INTVAL (XEXP (SET_DEST (x), 2)))
5132 <= GET_MODE_PRECISION (inner_mode))
5133 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5135 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5136 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5137 rtx dest = XEXP (SET_DEST (x), 0);
5138 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
5139 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
5140 rtx or_mask;
5142 if (BITS_BIG_ENDIAN)
5143 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5145 or_mask = gen_int_mode (src << pos, inner_mode);
5146 if (src == mask)
5147 SUBST (SET_SRC (x),
5148 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5149 else
5151 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5152 SUBST (SET_SRC (x),
5153 simplify_gen_binary (IOR, inner_mode,
5154 simplify_gen_binary (AND, inner_mode,
5155 dest, negmask),
5156 or_mask));
5159 SUBST (SET_DEST (x), dest);
5161 split = find_split_point (&SET_SRC (x), insn, true);
5162 if (split && split != &SET_SRC (x))
5163 return split;
5166 /* Otherwise, see if this is an operation that we can split into two.
5167 If so, try to split that. */
5168 code = GET_CODE (SET_SRC (x));
5170 switch (code)
5172 case AND:
5173 /* If we are AND'ing with a large constant that is only a single
5174 bit and the result is only being used in a context where we
5175 need to know if it is zero or nonzero, replace it with a bit
5176 extraction. This will avoid the large constant, which might
5177 have taken more than one insn to make. If the constant were
5178 not a valid argument to the AND but took only one insn to make,
5179 this is no worse, but if it took more than one insn, it will
5180 be better. */
5182 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5183 && REG_P (XEXP (SET_SRC (x), 0))
5184 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5185 && REG_P (SET_DEST (x))
5186 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5187 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5188 && XEXP (*split, 0) == SET_DEST (x)
5189 && XEXP (*split, 1) == const0_rtx)
5191 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5192 XEXP (SET_SRC (x), 0),
5193 pos, NULL_RTX, 1, 1, 0, 0);
5194 if (extraction != 0)
5196 SUBST (SET_SRC (x), extraction);
5197 return find_split_point (loc, insn, false);
5200 break;
5202 case NE:
5203 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5204 is known to be on, this can be converted into a NEG of a shift. */
5205 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5206 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5207 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5208 GET_MODE (XEXP (SET_SRC (x),
5209 0))))) >= 1))
5211 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5212 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5213 SUBST (SET_SRC (x),
5214 gen_rtx_NEG (mode,
5215 gen_rtx_LSHIFTRT (mode,
5216 XEXP (SET_SRC (x), 0),
5217 pos_rtx)));
5219 split = find_split_point (&SET_SRC (x), insn, true);
5220 if (split && split != &SET_SRC (x))
5221 return split;
5223 break;
5225 case SIGN_EXTEND:
5226 inner = XEXP (SET_SRC (x), 0);
5228 /* We can't optimize if either mode is a partial integer
5229 mode as we don't know how many bits are significant
5230 in those modes. */
5231 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5232 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5233 break;
5235 pos = 0;
5236 len = GET_MODE_PRECISION (inner_mode);
5237 unsignedp = 0;
5238 break;
5240 case SIGN_EXTRACT:
5241 case ZERO_EXTRACT:
5242 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5243 &inner_mode)
5244 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5245 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5247 inner = XEXP (SET_SRC (x), 0);
5248 len = INTVAL (XEXP (SET_SRC (x), 1));
5249 pos = INTVAL (XEXP (SET_SRC (x), 2));
5251 if (BITS_BIG_ENDIAN)
5252 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5253 unsignedp = (code == ZERO_EXTRACT);
5255 break;
5257 default:
5258 break;
5261 if (len
5262 && known_subrange_p (pos, len,
5263 0, GET_MODE_PRECISION (GET_MODE (inner)))
5264 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5266 /* For unsigned, we have a choice of a shift followed by an
5267 AND or two shifts. Use two shifts for field sizes where the
5268 constant might be too large. We assume here that we can
5269 always at least get 8-bit constants in an AND insn, which is
5270 true for every current RISC. */
5272 if (unsignedp && len <= 8)
5274 unsigned HOST_WIDE_INT mask
5275 = (HOST_WIDE_INT_1U << len) - 1;
5276 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5277 SUBST (SET_SRC (x),
5278 gen_rtx_AND (mode,
5279 gen_rtx_LSHIFTRT
5280 (mode, gen_lowpart (mode, inner), pos_rtx),
5281 gen_int_mode (mask, mode)));
5283 split = find_split_point (&SET_SRC (x), insn, true);
5284 if (split && split != &SET_SRC (x))
5285 return split;
5287 else
5289 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5290 int right_bits = GET_MODE_PRECISION (mode) - len;
5291 SUBST (SET_SRC (x),
5292 gen_rtx_fmt_ee
5293 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5294 gen_rtx_ASHIFT (mode,
5295 gen_lowpart (mode, inner),
5296 gen_int_shift_amount (mode, left_bits)),
5297 gen_int_shift_amount (mode, right_bits)));
5299 split = find_split_point (&SET_SRC (x), insn, true);
5300 if (split && split != &SET_SRC (x))
5301 return split;
5305 /* See if this is a simple operation with a constant as the second
5306 operand. It might be that this constant is out of range and hence
5307 could be used as a split point. */
5308 if (BINARY_P (SET_SRC (x))
5309 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5310 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5311 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5312 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5313 return &XEXP (SET_SRC (x), 1);
5315 /* Finally, see if this is a simple operation with its first operand
5316 not in a register. The operation might require this operand in a
5317 register, so return it as a split point. We can always do this
5318 because if the first operand were another operation, we would have
5319 already found it as a split point. */
5320 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5321 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5322 return &XEXP (SET_SRC (x), 0);
5324 return 0;
5326 case AND:
5327 case IOR:
5328 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5329 it is better to write this as (not (ior A B)) so we can split it.
5330 Similarly for IOR. */
5331 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5333 SUBST (*loc,
5334 gen_rtx_NOT (GET_MODE (x),
5335 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5336 GET_MODE (x),
5337 XEXP (XEXP (x, 0), 0),
5338 XEXP (XEXP (x, 1), 0))));
5339 return find_split_point (loc, insn, set_src);
5342 /* Many RISC machines have a large set of logical insns. If the
5343 second operand is a NOT, put it first so we will try to split the
5344 other operand first. */
5345 if (GET_CODE (XEXP (x, 1)) == NOT)
5347 rtx tem = XEXP (x, 0);
5348 SUBST (XEXP (x, 0), XEXP (x, 1));
5349 SUBST (XEXP (x, 1), tem);
5351 break;
5353 case PLUS:
5354 case MINUS:
5355 /* Canonicalization can produce (minus A (mult B C)), where C is a
5356 constant. It may be better to try splitting (plus (mult B -C) A)
5357 instead if this isn't a multiply by a power of two. */
5358 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5359 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5360 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5362 machine_mode mode = GET_MODE (x);
5363 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5364 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5365 SUBST (*loc, gen_rtx_PLUS (mode,
5366 gen_rtx_MULT (mode,
5367 XEXP (XEXP (x, 1), 0),
5368 gen_int_mode (other_int,
5369 mode)),
5370 XEXP (x, 0)));
5371 return find_split_point (loc, insn, set_src);
5374 /* Split at a multiply-accumulate instruction. However if this is
5375 the SET_SRC, we likely do not have such an instruction and it's
5376 worthless to try this split. */
5377 if (!set_src
5378 && (GET_CODE (XEXP (x, 0)) == MULT
5379 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5380 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5381 return loc;
5383 default:
5384 break;
5387 /* Otherwise, select our actions depending on our rtx class. */
5388 switch (GET_RTX_CLASS (code))
5390 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5391 case RTX_TERNARY:
5392 split = find_split_point (&XEXP (x, 2), insn, false);
5393 if (split)
5394 return split;
5395 /* fall through */
5396 case RTX_BIN_ARITH:
5397 case RTX_COMM_ARITH:
5398 case RTX_COMPARE:
5399 case RTX_COMM_COMPARE:
5400 split = find_split_point (&XEXP (x, 1), insn, false);
5401 if (split)
5402 return split;
5403 /* fall through */
5404 case RTX_UNARY:
5405 /* Some machines have (and (shift ...) ...) insns. If X is not
5406 an AND, but XEXP (X, 0) is, use it as our split point. */
5407 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5408 return &XEXP (x, 0);
5410 split = find_split_point (&XEXP (x, 0), insn, false);
5411 if (split)
5412 return split;
5413 return loc;
5415 default:
5416 /* Otherwise, we don't have a split point. */
5417 return 0;
5421 /* Throughout X, replace FROM with TO, and return the result.
5422 The result is TO if X is FROM;
5423 otherwise the result is X, but its contents may have been modified.
5424 If they were modified, a record was made in undobuf so that
5425 undo_all will (among other things) return X to its original state.
5427 If the number of changes necessary is too much to record to undo,
5428 the excess changes are not made, so the result is invalid.
5429 The changes already made can still be undone.
5430 undobuf.num_undo is incremented for such changes, so by testing that
5431 the caller can tell whether the result is valid.
5433 `n_occurrences' is incremented each time FROM is replaced.
5435 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5437 IN_COND is nonzero if we are at the top level of a condition.
5439 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5440 by copying if `n_occurrences' is nonzero. */
5442 static rtx
5443 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5445 enum rtx_code code = GET_CODE (x);
5446 machine_mode op0_mode = VOIDmode;
5447 const char *fmt;
5448 int len, i;
5449 rtx new_rtx;
5451 /* Two expressions are equal if they are identical copies of a shared
5452 RTX or if they are both registers with the same register number
5453 and mode. */
5455 #define COMBINE_RTX_EQUAL_P(X,Y) \
5456 ((X) == (Y) \
5457 || (REG_P (X) && REG_P (Y) \
5458 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5460 /* Do not substitute into clobbers of regs -- this will never result in
5461 valid RTL. */
5462 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5463 return x;
5465 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5467 n_occurrences++;
5468 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5471 /* If X and FROM are the same register but different modes, they
5472 will not have been seen as equal above. However, the log links code
5473 will make a LOG_LINKS entry for that case. If we do nothing, we
5474 will try to rerecognize our original insn and, when it succeeds,
5475 we will delete the feeding insn, which is incorrect.
5477 So force this insn not to match in this (rare) case. */
5478 if (! in_dest && code == REG && REG_P (from)
5479 && reg_overlap_mentioned_p (x, from))
5480 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5482 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5483 of which may contain things that can be combined. */
5484 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5485 return x;
5487 /* It is possible to have a subexpression appear twice in the insn.
5488 Suppose that FROM is a register that appears within TO.
5489 Then, after that subexpression has been scanned once by `subst',
5490 the second time it is scanned, TO may be found. If we were
5491 to scan TO here, we would find FROM within it and create a
5492 self-referent rtl structure which is completely wrong. */
5493 if (COMBINE_RTX_EQUAL_P (x, to))
5494 return to;
5496 /* Parallel asm_operands need special attention because all of the
5497 inputs are shared across the arms. Furthermore, unsharing the
5498 rtl results in recognition failures. Failure to handle this case
5499 specially can result in circular rtl.
5501 Solve this by doing a normal pass across the first entry of the
5502 parallel, and only processing the SET_DESTs of the subsequent
5503 entries. Ug. */
5505 if (code == PARALLEL
5506 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5507 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5509 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5511 /* If this substitution failed, this whole thing fails. */
5512 if (GET_CODE (new_rtx) == CLOBBER
5513 && XEXP (new_rtx, 0) == const0_rtx)
5514 return new_rtx;
5516 SUBST (XVECEXP (x, 0, 0), new_rtx);
5518 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5520 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5522 if (!REG_P (dest)
5523 && GET_CODE (dest) != CC0
5524 && GET_CODE (dest) != PC)
5526 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5528 /* If this substitution failed, this whole thing fails. */
5529 if (GET_CODE (new_rtx) == CLOBBER
5530 && XEXP (new_rtx, 0) == const0_rtx)
5531 return new_rtx;
5533 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5537 else
5539 len = GET_RTX_LENGTH (code);
5540 fmt = GET_RTX_FORMAT (code);
5542 /* We don't need to process a SET_DEST that is a register, CC0,
5543 or PC, so set up to skip this common case. All other cases
5544 where we want to suppress replacing something inside a
5545 SET_SRC are handled via the IN_DEST operand. */
5546 if (code == SET
5547 && (REG_P (SET_DEST (x))
5548 || GET_CODE (SET_DEST (x)) == CC0
5549 || GET_CODE (SET_DEST (x)) == PC))
5550 fmt = "ie";
5552 /* Trying to simplify the operands of a widening MULT is not likely
5553 to create RTL matching a machine insn. */
5554 if (code == MULT
5555 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5556 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5557 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5558 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5559 && REG_P (XEXP (XEXP (x, 0), 0))
5560 && REG_P (XEXP (XEXP (x, 1), 0))
5561 && from == to)
5562 return x;
5565 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5566 constant. */
5567 if (fmt[0] == 'e')
5568 op0_mode = GET_MODE (XEXP (x, 0));
5570 for (i = 0; i < len; i++)
5572 if (fmt[i] == 'E')
5574 int j;
5575 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5577 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5579 new_rtx = (unique_copy && n_occurrences
5580 ? copy_rtx (to) : to);
5581 n_occurrences++;
5583 else
5585 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5586 unique_copy);
5588 /* If this substitution failed, this whole thing
5589 fails. */
5590 if (GET_CODE (new_rtx) == CLOBBER
5591 && XEXP (new_rtx, 0) == const0_rtx)
5592 return new_rtx;
5595 SUBST (XVECEXP (x, i, j), new_rtx);
5598 else if (fmt[i] == 'e')
5600 /* If this is a register being set, ignore it. */
5601 new_rtx = XEXP (x, i);
5602 if (in_dest
5603 && i == 0
5604 && (((code == SUBREG || code == ZERO_EXTRACT)
5605 && REG_P (new_rtx))
5606 || code == STRICT_LOW_PART))
5609 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5611 /* In general, don't install a subreg involving two
5612 modes not tieable. It can worsen register
5613 allocation, and can even make invalid reload
5614 insns, since the reg inside may need to be copied
5615 from in the outside mode, and that may be invalid
5616 if it is an fp reg copied in integer mode.
5618 We allow two exceptions to this: It is valid if
5619 it is inside another SUBREG and the mode of that
5620 SUBREG and the mode of the inside of TO is
5621 tieable and it is valid if X is a SET that copies
5622 FROM to CC0. */
5624 if (GET_CODE (to) == SUBREG
5625 && !targetm.modes_tieable_p (GET_MODE (to),
5626 GET_MODE (SUBREG_REG (to)))
5627 && ! (code == SUBREG
5628 && (targetm.modes_tieable_p
5629 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5630 && (!HAVE_cc0
5631 || (! (code == SET
5632 && i == 1
5633 && XEXP (x, 0) == cc0_rtx))))
5634 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5636 if (code == SUBREG
5637 && REG_P (to)
5638 && REGNO (to) < FIRST_PSEUDO_REGISTER
5639 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5640 SUBREG_BYTE (x),
5641 GET_MODE (x)) < 0)
5642 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5644 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5645 n_occurrences++;
5647 else
5648 /* If we are in a SET_DEST, suppress most cases unless we
5649 have gone inside a MEM, in which case we want to
5650 simplify the address. We assume here that things that
5651 are actually part of the destination have their inner
5652 parts in the first expression. This is true for SUBREG,
5653 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5654 things aside from REG and MEM that should appear in a
5655 SET_DEST. */
5656 new_rtx = subst (XEXP (x, i), from, to,
5657 (((in_dest
5658 && (code == SUBREG || code == STRICT_LOW_PART
5659 || code == ZERO_EXTRACT))
5660 || code == SET)
5661 && i == 0),
5662 code == IF_THEN_ELSE && i == 0,
5663 unique_copy);
5665 /* If we found that we will have to reject this combination,
5666 indicate that by returning the CLOBBER ourselves, rather than
5667 an expression containing it. This will speed things up as
5668 well as prevent accidents where two CLOBBERs are considered
5669 to be equal, thus producing an incorrect simplification. */
5671 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5672 return new_rtx;
5674 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5676 machine_mode mode = GET_MODE (x);
5678 x = simplify_subreg (GET_MODE (x), new_rtx,
5679 GET_MODE (SUBREG_REG (x)),
5680 SUBREG_BYTE (x));
5681 if (! x)
5682 x = gen_rtx_CLOBBER (mode, const0_rtx);
5684 else if (CONST_SCALAR_INT_P (new_rtx)
5685 && (GET_CODE (x) == ZERO_EXTEND
5686 || GET_CODE (x) == SIGN_EXTEND
5687 || GET_CODE (x) == FLOAT
5688 || GET_CODE (x) == UNSIGNED_FLOAT))
5690 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5691 new_rtx,
5692 GET_MODE (XEXP (x, 0)));
5693 if (!x)
5694 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5696 else
5697 SUBST (XEXP (x, i), new_rtx);
5702 /* Check if we are loading something from the constant pool via float
5703 extension; in this case we would undo compress_float_constant
5704 optimization and degenerate constant load to an immediate value. */
5705 if (GET_CODE (x) == FLOAT_EXTEND
5706 && MEM_P (XEXP (x, 0))
5707 && MEM_READONLY_P (XEXP (x, 0)))
5709 rtx tmp = avoid_constant_pool_reference (x);
5710 if (x != tmp)
5711 return x;
5714 /* Try to simplify X. If the simplification changed the code, it is likely
5715 that further simplification will help, so loop, but limit the number
5716 of repetitions that will be performed. */
5718 for (i = 0; i < 4; i++)
5720 /* If X is sufficiently simple, don't bother trying to do anything
5721 with it. */
5722 if (code != CONST_INT && code != REG && code != CLOBBER)
5723 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5725 if (GET_CODE (x) == code)
5726 break;
5728 code = GET_CODE (x);
5730 /* We no longer know the original mode of operand 0 since we
5731 have changed the form of X) */
5732 op0_mode = VOIDmode;
5735 return x;
5738 /* If X is a commutative operation whose operands are not in the canonical
5739 order, use substitutions to swap them. */
5741 static void
5742 maybe_swap_commutative_operands (rtx x)
5744 if (COMMUTATIVE_ARITH_P (x)
5745 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5747 rtx temp = XEXP (x, 0);
5748 SUBST (XEXP (x, 0), XEXP (x, 1));
5749 SUBST (XEXP (x, 1), temp);
5753 /* Simplify X, a piece of RTL. We just operate on the expression at the
5754 outer level; call `subst' to simplify recursively. Return the new
5755 expression.
5757 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5758 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5759 of a condition. */
5761 static rtx
5762 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5763 int in_cond)
5765 enum rtx_code code = GET_CODE (x);
5766 machine_mode mode = GET_MODE (x);
5767 scalar_int_mode int_mode;
5768 rtx temp;
5769 int i;
5771 /* If this is a commutative operation, put a constant last and a complex
5772 expression first. We don't need to do this for comparisons here. */
5773 maybe_swap_commutative_operands (x);
5775 /* Try to fold this expression in case we have constants that weren't
5776 present before. */
5777 temp = 0;
5778 switch (GET_RTX_CLASS (code))
5780 case RTX_UNARY:
5781 if (op0_mode == VOIDmode)
5782 op0_mode = GET_MODE (XEXP (x, 0));
5783 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5784 break;
5785 case RTX_COMPARE:
5786 case RTX_COMM_COMPARE:
5788 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5789 if (cmp_mode == VOIDmode)
5791 cmp_mode = GET_MODE (XEXP (x, 1));
5792 if (cmp_mode == VOIDmode)
5793 cmp_mode = op0_mode;
5795 temp = simplify_relational_operation (code, mode, cmp_mode,
5796 XEXP (x, 0), XEXP (x, 1));
5798 break;
5799 case RTX_COMM_ARITH:
5800 case RTX_BIN_ARITH:
5801 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5802 break;
5803 case RTX_BITFIELD_OPS:
5804 case RTX_TERNARY:
5805 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5806 XEXP (x, 1), XEXP (x, 2));
5807 break;
5808 default:
5809 break;
5812 if (temp)
5814 x = temp;
5815 code = GET_CODE (temp);
5816 op0_mode = VOIDmode;
5817 mode = GET_MODE (temp);
5820 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5821 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5822 things. Check for cases where both arms are testing the same
5823 condition.
5825 Don't do anything if all operands are very simple. */
5827 if ((BINARY_P (x)
5828 && ((!OBJECT_P (XEXP (x, 0))
5829 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5830 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5831 || (!OBJECT_P (XEXP (x, 1))
5832 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5833 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5834 || (UNARY_P (x)
5835 && (!OBJECT_P (XEXP (x, 0))
5836 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5837 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5839 rtx cond, true_rtx, false_rtx;
5841 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5842 if (cond != 0
5843 /* If everything is a comparison, what we have is highly unlikely
5844 to be simpler, so don't use it. */
5845 && ! (COMPARISON_P (x)
5846 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5847 /* Similarly, if we end up with one of the expressions the same
5848 as the original, it is certainly not simpler. */
5849 && ! rtx_equal_p (x, true_rtx)
5850 && ! rtx_equal_p (x, false_rtx))
5852 rtx cop1 = const0_rtx;
5853 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5855 if (cond_code == NE && COMPARISON_P (cond))
5856 return x;
5858 /* Simplify the alternative arms; this may collapse the true and
5859 false arms to store-flag values. Be careful to use copy_rtx
5860 here since true_rtx or false_rtx might share RTL with x as a
5861 result of the if_then_else_cond call above. */
5862 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5863 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5865 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5866 is unlikely to be simpler. */
5867 if (general_operand (true_rtx, VOIDmode)
5868 && general_operand (false_rtx, VOIDmode))
5870 enum rtx_code reversed;
5872 /* Restarting if we generate a store-flag expression will cause
5873 us to loop. Just drop through in this case. */
5875 /* If the result values are STORE_FLAG_VALUE and zero, we can
5876 just make the comparison operation. */
5877 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5878 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5879 cond, cop1);
5880 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5881 && ((reversed = reversed_comparison_code_parts
5882 (cond_code, cond, cop1, NULL))
5883 != UNKNOWN))
5884 x = simplify_gen_relational (reversed, mode, VOIDmode,
5885 cond, cop1);
5887 /* Likewise, we can make the negate of a comparison operation
5888 if the result values are - STORE_FLAG_VALUE and zero. */
5889 else if (CONST_INT_P (true_rtx)
5890 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5891 && false_rtx == const0_rtx)
5892 x = simplify_gen_unary (NEG, mode,
5893 simplify_gen_relational (cond_code,
5894 mode, VOIDmode,
5895 cond, cop1),
5896 mode);
5897 else if (CONST_INT_P (false_rtx)
5898 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5899 && true_rtx == const0_rtx
5900 && ((reversed = reversed_comparison_code_parts
5901 (cond_code, cond, cop1, NULL))
5902 != UNKNOWN))
5903 x = simplify_gen_unary (NEG, mode,
5904 simplify_gen_relational (reversed,
5905 mode, VOIDmode,
5906 cond, cop1),
5907 mode);
5909 code = GET_CODE (x);
5910 op0_mode = VOIDmode;
5915 /* First see if we can apply the inverse distributive law. */
5916 if (code == PLUS || code == MINUS
5917 || code == AND || code == IOR || code == XOR)
5919 x = apply_distributive_law (x);
5920 code = GET_CODE (x);
5921 op0_mode = VOIDmode;
5924 /* If CODE is an associative operation not otherwise handled, see if we
5925 can associate some operands. This can win if they are constants or
5926 if they are logically related (i.e. (a & b) & a). */
5927 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5928 || code == AND || code == IOR || code == XOR
5929 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5930 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5931 || (flag_associative_math && FLOAT_MODE_P (mode))))
5933 if (GET_CODE (XEXP (x, 0)) == code)
5935 rtx other = XEXP (XEXP (x, 0), 0);
5936 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5937 rtx inner_op1 = XEXP (x, 1);
5938 rtx inner;
5940 /* Make sure we pass the constant operand if any as the second
5941 one if this is a commutative operation. */
5942 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5943 std::swap (inner_op0, inner_op1);
5944 inner = simplify_binary_operation (code == MINUS ? PLUS
5945 : code == DIV ? MULT
5946 : code,
5947 mode, inner_op0, inner_op1);
5949 /* For commutative operations, try the other pair if that one
5950 didn't simplify. */
5951 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5953 other = XEXP (XEXP (x, 0), 1);
5954 inner = simplify_binary_operation (code, mode,
5955 XEXP (XEXP (x, 0), 0),
5956 XEXP (x, 1));
5959 if (inner)
5960 return simplify_gen_binary (code, mode, other, inner);
5964 /* A little bit of algebraic simplification here. */
5965 switch (code)
5967 case MEM:
5968 /* Ensure that our address has any ASHIFTs converted to MULT in case
5969 address-recognizing predicates are called later. */
5970 temp = make_compound_operation (XEXP (x, 0), MEM);
5971 SUBST (XEXP (x, 0), temp);
5972 break;
5974 case SUBREG:
5975 if (op0_mode == VOIDmode)
5976 op0_mode = GET_MODE (SUBREG_REG (x));
5978 /* See if this can be moved to simplify_subreg. */
5979 if (CONSTANT_P (SUBREG_REG (x))
5980 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5981 /* Don't call gen_lowpart if the inner mode
5982 is VOIDmode and we cannot simplify it, as SUBREG without
5983 inner mode is invalid. */
5984 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5985 || gen_lowpart_common (mode, SUBREG_REG (x))))
5986 return gen_lowpart (mode, SUBREG_REG (x));
5988 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5989 break;
5991 rtx temp;
5992 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5993 SUBREG_BYTE (x));
5994 if (temp)
5995 return temp;
5997 /* If op is known to have all lower bits zero, the result is zero. */
5998 scalar_int_mode int_mode, int_op0_mode;
5999 if (!in_dest
6000 && is_a <scalar_int_mode> (mode, &int_mode)
6001 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
6002 && (GET_MODE_PRECISION (int_mode)
6003 < GET_MODE_PRECISION (int_op0_mode))
6004 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6005 SUBREG_BYTE (x))
6006 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6007 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6008 & GET_MODE_MASK (int_mode)) == 0)
6009 && !side_effects_p (SUBREG_REG (x)))
6010 return CONST0_RTX (int_mode);
6013 /* Don't change the mode of the MEM if that would change the meaning
6014 of the address. */
6015 if (MEM_P (SUBREG_REG (x))
6016 && (MEM_VOLATILE_P (SUBREG_REG (x))
6017 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6018 MEM_ADDR_SPACE (SUBREG_REG (x)))))
6019 return gen_rtx_CLOBBER (mode, const0_rtx);
6021 /* Note that we cannot do any narrowing for non-constants since
6022 we might have been counting on using the fact that some bits were
6023 zero. We now do this in the SET. */
6025 break;
6027 case NEG:
6028 temp = expand_compound_operation (XEXP (x, 0));
6030 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6031 replaced by (lshiftrt X C). This will convert
6032 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6034 if (GET_CODE (temp) == ASHIFTRT
6035 && CONST_INT_P (XEXP (temp, 1))
6036 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6037 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6038 INTVAL (XEXP (temp, 1)));
6040 /* If X has only a single bit that might be nonzero, say, bit I, convert
6041 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6042 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6043 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6044 or a SUBREG of one since we'd be making the expression more
6045 complex if it was just a register. */
6047 if (!REG_P (temp)
6048 && ! (GET_CODE (temp) == SUBREG
6049 && REG_P (SUBREG_REG (temp)))
6050 && is_a <scalar_int_mode> (mode, &int_mode)
6051 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6053 rtx temp1 = simplify_shift_const
6054 (NULL_RTX, ASHIFTRT, int_mode,
6055 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6056 GET_MODE_PRECISION (int_mode) - 1 - i),
6057 GET_MODE_PRECISION (int_mode) - 1 - i);
6059 /* If all we did was surround TEMP with the two shifts, we
6060 haven't improved anything, so don't use it. Otherwise,
6061 we are better off with TEMP1. */
6062 if (GET_CODE (temp1) != ASHIFTRT
6063 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6064 || XEXP (XEXP (temp1, 0), 0) != temp)
6065 return temp1;
6067 break;
6069 case TRUNCATE:
6070 /* We can't handle truncation to a partial integer mode here
6071 because we don't know the real bitsize of the partial
6072 integer mode. */
6073 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6074 break;
6076 if (HWI_COMPUTABLE_MODE_P (mode))
6077 SUBST (XEXP (x, 0),
6078 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6079 GET_MODE_MASK (mode), 0));
6081 /* We can truncate a constant value and return it. */
6083 poly_int64 c;
6084 if (poly_int_rtx_p (XEXP (x, 0), &c))
6085 return gen_int_mode (c, mode);
6088 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6089 whose value is a comparison can be replaced with a subreg if
6090 STORE_FLAG_VALUE permits. */
6091 if (HWI_COMPUTABLE_MODE_P (mode)
6092 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6093 && (temp = get_last_value (XEXP (x, 0)))
6094 && COMPARISON_P (temp))
6095 return gen_lowpart (mode, XEXP (x, 0));
6096 break;
6098 case CONST:
6099 /* (const (const X)) can become (const X). Do it this way rather than
6100 returning the inner CONST since CONST can be shared with a
6101 REG_EQUAL note. */
6102 if (GET_CODE (XEXP (x, 0)) == CONST)
6103 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6104 break;
6106 case LO_SUM:
6107 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6108 can add in an offset. find_split_point will split this address up
6109 again if it doesn't match. */
6110 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6111 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6112 return XEXP (x, 1);
6113 break;
6115 case PLUS:
6116 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6117 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6118 bit-field and can be replaced by either a sign_extend or a
6119 sign_extract. The `and' may be a zero_extend and the two
6120 <c>, -<c> constants may be reversed. */
6121 if (GET_CODE (XEXP (x, 0)) == XOR
6122 && is_a <scalar_int_mode> (mode, &int_mode)
6123 && CONST_INT_P (XEXP (x, 1))
6124 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6125 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6126 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6127 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6128 && HWI_COMPUTABLE_MODE_P (int_mode)
6129 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6130 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6131 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6132 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6133 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6134 && known_eq ((GET_MODE_PRECISION
6135 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6136 (unsigned int) i + 1))))
6137 return simplify_shift_const
6138 (NULL_RTX, ASHIFTRT, int_mode,
6139 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6140 XEXP (XEXP (XEXP (x, 0), 0), 0),
6141 GET_MODE_PRECISION (int_mode) - (i + 1)),
6142 GET_MODE_PRECISION (int_mode) - (i + 1));
6144 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6145 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6146 the bitsize of the mode - 1. This allows simplification of
6147 "a = (b & 8) == 0;" */
6148 if (XEXP (x, 1) == constm1_rtx
6149 && !REG_P (XEXP (x, 0))
6150 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6151 && REG_P (SUBREG_REG (XEXP (x, 0))))
6152 && is_a <scalar_int_mode> (mode, &int_mode)
6153 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6154 return simplify_shift_const
6155 (NULL_RTX, ASHIFTRT, int_mode,
6156 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6157 gen_rtx_XOR (int_mode, XEXP (x, 0),
6158 const1_rtx),
6159 GET_MODE_PRECISION (int_mode) - 1),
6160 GET_MODE_PRECISION (int_mode) - 1);
6162 /* If we are adding two things that have no bits in common, convert
6163 the addition into an IOR. This will often be further simplified,
6164 for example in cases like ((a & 1) + (a & 2)), which can
6165 become a & 3. */
6167 if (HWI_COMPUTABLE_MODE_P (mode)
6168 && (nonzero_bits (XEXP (x, 0), mode)
6169 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6171 /* Try to simplify the expression further. */
6172 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6173 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6175 /* If we could, great. If not, do not go ahead with the IOR
6176 replacement, since PLUS appears in many special purpose
6177 address arithmetic instructions. */
6178 if (GET_CODE (temp) != CLOBBER
6179 && (GET_CODE (temp) != IOR
6180 || ((XEXP (temp, 0) != XEXP (x, 0)
6181 || XEXP (temp, 1) != XEXP (x, 1))
6182 && (XEXP (temp, 0) != XEXP (x, 1)
6183 || XEXP (temp, 1) != XEXP (x, 0)))))
6184 return temp;
6187 /* Canonicalize x + x into x << 1. */
6188 if (GET_MODE_CLASS (mode) == MODE_INT
6189 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6190 && !side_effects_p (XEXP (x, 0)))
6191 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6193 break;
6195 case MINUS:
6196 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6197 (and <foo> (const_int pow2-1)) */
6198 if (is_a <scalar_int_mode> (mode, &int_mode)
6199 && GET_CODE (XEXP (x, 1)) == AND
6200 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6201 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6202 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6203 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6204 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6205 break;
6207 case MULT:
6208 /* If we have (mult (plus A B) C), apply the distributive law and then
6209 the inverse distributive law to see if things simplify. This
6210 occurs mostly in addresses, often when unrolling loops. */
6212 if (GET_CODE (XEXP (x, 0)) == PLUS)
6214 rtx result = distribute_and_simplify_rtx (x, 0);
6215 if (result)
6216 return result;
6219 /* Try simplify a*(b/c) as (a*b)/c. */
6220 if (FLOAT_MODE_P (mode) && flag_associative_math
6221 && GET_CODE (XEXP (x, 0)) == DIV)
6223 rtx tem = simplify_binary_operation (MULT, mode,
6224 XEXP (XEXP (x, 0), 0),
6225 XEXP (x, 1));
6226 if (tem)
6227 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6229 break;
6231 case UDIV:
6232 /* If this is a divide by a power of two, treat it as a shift if
6233 its first operand is a shift. */
6234 if (is_a <scalar_int_mode> (mode, &int_mode)
6235 && CONST_INT_P (XEXP (x, 1))
6236 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6237 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6238 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6239 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6240 || GET_CODE (XEXP (x, 0)) == ROTATE
6241 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6242 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6243 XEXP (x, 0), i);
6244 break;
6246 case EQ: case NE:
6247 case GT: case GTU: case GE: case GEU:
6248 case LT: case LTU: case LE: case LEU:
6249 case UNEQ: case LTGT:
6250 case UNGT: case UNGE:
6251 case UNLT: case UNLE:
6252 case UNORDERED: case ORDERED:
6253 /* If the first operand is a condition code, we can't do anything
6254 with it. */
6255 if (GET_CODE (XEXP (x, 0)) == COMPARE
6256 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6257 && ! CC0_P (XEXP (x, 0))))
6259 rtx op0 = XEXP (x, 0);
6260 rtx op1 = XEXP (x, 1);
6261 enum rtx_code new_code;
6263 if (GET_CODE (op0) == COMPARE)
6264 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6266 /* Simplify our comparison, if possible. */
6267 new_code = simplify_comparison (code, &op0, &op1);
6269 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6270 if only the low-order bit is possibly nonzero in X (such as when
6271 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6272 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6273 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6274 (plus X 1).
6276 Remove any ZERO_EXTRACT we made when thinking this was a
6277 comparison. It may now be simpler to use, e.g., an AND. If a
6278 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6279 the call to make_compound_operation in the SET case.
6281 Don't apply these optimizations if the caller would
6282 prefer a comparison rather than a value.
6283 E.g., for the condition in an IF_THEN_ELSE most targets need
6284 an explicit comparison. */
6286 if (in_cond)
6289 else if (STORE_FLAG_VALUE == 1
6290 && new_code == NE
6291 && is_int_mode (mode, &int_mode)
6292 && op1 == const0_rtx
6293 && int_mode == GET_MODE (op0)
6294 && nonzero_bits (op0, int_mode) == 1)
6295 return gen_lowpart (int_mode,
6296 expand_compound_operation (op0));
6298 else if (STORE_FLAG_VALUE == 1
6299 && new_code == NE
6300 && is_int_mode (mode, &int_mode)
6301 && op1 == const0_rtx
6302 && int_mode == GET_MODE (op0)
6303 && (num_sign_bit_copies (op0, int_mode)
6304 == GET_MODE_PRECISION (int_mode)))
6306 op0 = expand_compound_operation (op0);
6307 return simplify_gen_unary (NEG, int_mode,
6308 gen_lowpart (int_mode, op0),
6309 int_mode);
6312 else if (STORE_FLAG_VALUE == 1
6313 && new_code == EQ
6314 && is_int_mode (mode, &int_mode)
6315 && op1 == const0_rtx
6316 && int_mode == GET_MODE (op0)
6317 && nonzero_bits (op0, int_mode) == 1)
6319 op0 = expand_compound_operation (op0);
6320 return simplify_gen_binary (XOR, int_mode,
6321 gen_lowpart (int_mode, op0),
6322 const1_rtx);
6325 else if (STORE_FLAG_VALUE == 1
6326 && new_code == EQ
6327 && is_int_mode (mode, &int_mode)
6328 && op1 == const0_rtx
6329 && int_mode == GET_MODE (op0)
6330 && (num_sign_bit_copies (op0, int_mode)
6331 == GET_MODE_PRECISION (int_mode)))
6333 op0 = expand_compound_operation (op0);
6334 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6337 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6338 those above. */
6339 if (in_cond)
6342 else if (STORE_FLAG_VALUE == -1
6343 && new_code == NE
6344 && is_int_mode (mode, &int_mode)
6345 && op1 == const0_rtx
6346 && int_mode == GET_MODE (op0)
6347 && (num_sign_bit_copies (op0, int_mode)
6348 == GET_MODE_PRECISION (int_mode)))
6349 return gen_lowpart (int_mode, expand_compound_operation (op0));
6351 else if (STORE_FLAG_VALUE == -1
6352 && new_code == NE
6353 && is_int_mode (mode, &int_mode)
6354 && op1 == const0_rtx
6355 && int_mode == GET_MODE (op0)
6356 && nonzero_bits (op0, int_mode) == 1)
6358 op0 = expand_compound_operation (op0);
6359 return simplify_gen_unary (NEG, int_mode,
6360 gen_lowpart (int_mode, op0),
6361 int_mode);
6364 else if (STORE_FLAG_VALUE == -1
6365 && new_code == EQ
6366 && is_int_mode (mode, &int_mode)
6367 && op1 == const0_rtx
6368 && int_mode == GET_MODE (op0)
6369 && (num_sign_bit_copies (op0, int_mode)
6370 == GET_MODE_PRECISION (int_mode)))
6372 op0 = expand_compound_operation (op0);
6373 return simplify_gen_unary (NOT, int_mode,
6374 gen_lowpart (int_mode, op0),
6375 int_mode);
6378 /* If X is 0/1, (eq X 0) is X-1. */
6379 else if (STORE_FLAG_VALUE == -1
6380 && new_code == EQ
6381 && is_int_mode (mode, &int_mode)
6382 && op1 == const0_rtx
6383 && int_mode == GET_MODE (op0)
6384 && nonzero_bits (op0, int_mode) == 1)
6386 op0 = expand_compound_operation (op0);
6387 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6390 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6391 one bit that might be nonzero, we can convert (ne x 0) to
6392 (ashift x c) where C puts the bit in the sign bit. Remove any
6393 AND with STORE_FLAG_VALUE when we are done, since we are only
6394 going to test the sign bit. */
6395 if (new_code == NE
6396 && is_int_mode (mode, &int_mode)
6397 && HWI_COMPUTABLE_MODE_P (int_mode)
6398 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6399 && op1 == const0_rtx
6400 && int_mode == GET_MODE (op0)
6401 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6403 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6404 expand_compound_operation (op0),
6405 GET_MODE_PRECISION (int_mode) - 1 - i);
6406 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6407 return XEXP (x, 0);
6408 else
6409 return x;
6412 /* If the code changed, return a whole new comparison.
6413 We also need to avoid using SUBST in cases where
6414 simplify_comparison has widened a comparison with a CONST_INT,
6415 since in that case the wider CONST_INT may fail the sanity
6416 checks in do_SUBST. */
6417 if (new_code != code
6418 || (CONST_INT_P (op1)
6419 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6420 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6421 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6423 /* Otherwise, keep this operation, but maybe change its operands.
6424 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6425 SUBST (XEXP (x, 0), op0);
6426 SUBST (XEXP (x, 1), op1);
6428 break;
6430 case IF_THEN_ELSE:
6431 return simplify_if_then_else (x);
6433 case ZERO_EXTRACT:
6434 case SIGN_EXTRACT:
6435 case ZERO_EXTEND:
6436 case SIGN_EXTEND:
6437 /* If we are processing SET_DEST, we are done. */
6438 if (in_dest)
6439 return x;
6441 return expand_compound_operation (x);
6443 case SET:
6444 return simplify_set (x);
6446 case AND:
6447 case IOR:
6448 return simplify_logical (x);
6450 case ASHIFT:
6451 case LSHIFTRT:
6452 case ASHIFTRT:
6453 case ROTATE:
6454 case ROTATERT:
6455 /* If this is a shift by a constant amount, simplify it. */
6456 if (CONST_INT_P (XEXP (x, 1)))
6457 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6458 INTVAL (XEXP (x, 1)));
6460 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6461 SUBST (XEXP (x, 1),
6462 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6463 (HOST_WIDE_INT_1U
6464 << exact_log2 (GET_MODE_UNIT_BITSIZE
6465 (GET_MODE (x))))
6466 - 1,
6467 0));
6468 break;
6470 default:
6471 break;
6474 return x;
6477 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6479 static rtx
6480 simplify_if_then_else (rtx x)
6482 machine_mode mode = GET_MODE (x);
6483 rtx cond = XEXP (x, 0);
6484 rtx true_rtx = XEXP (x, 1);
6485 rtx false_rtx = XEXP (x, 2);
6486 enum rtx_code true_code = GET_CODE (cond);
6487 int comparison_p = COMPARISON_P (cond);
6488 rtx temp;
6489 int i;
6490 enum rtx_code false_code;
6491 rtx reversed;
6492 scalar_int_mode int_mode, inner_mode;
6494 /* Simplify storing of the truth value. */
6495 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6496 return simplify_gen_relational (true_code, mode, VOIDmode,
6497 XEXP (cond, 0), XEXP (cond, 1));
6499 /* Also when the truth value has to be reversed. */
6500 if (comparison_p
6501 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6502 && (reversed = reversed_comparison (cond, mode)))
6503 return reversed;
6505 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6506 in it is being compared against certain values. Get the true and false
6507 comparisons and see if that says anything about the value of each arm. */
6509 if (comparison_p
6510 && ((false_code = reversed_comparison_code (cond, NULL))
6511 != UNKNOWN)
6512 && REG_P (XEXP (cond, 0)))
6514 HOST_WIDE_INT nzb;
6515 rtx from = XEXP (cond, 0);
6516 rtx true_val = XEXP (cond, 1);
6517 rtx false_val = true_val;
6518 int swapped = 0;
6520 /* If FALSE_CODE is EQ, swap the codes and arms. */
6522 if (false_code == EQ)
6524 swapped = 1, true_code = EQ, false_code = NE;
6525 std::swap (true_rtx, false_rtx);
6528 scalar_int_mode from_mode;
6529 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6531 /* If we are comparing against zero and the expression being
6532 tested has only a single bit that might be nonzero, that is
6533 its value when it is not equal to zero. Similarly if it is
6534 known to be -1 or 0. */
6535 if (true_code == EQ
6536 && true_val == const0_rtx
6537 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6539 false_code = EQ;
6540 false_val = gen_int_mode (nzb, from_mode);
6542 else if (true_code == EQ
6543 && true_val == const0_rtx
6544 && (num_sign_bit_copies (from, from_mode)
6545 == GET_MODE_PRECISION (from_mode)))
6547 false_code = EQ;
6548 false_val = constm1_rtx;
6552 /* Now simplify an arm if we know the value of the register in the
6553 branch and it is used in the arm. Be careful due to the potential
6554 of locally-shared RTL. */
6556 if (reg_mentioned_p (from, true_rtx))
6557 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6558 from, true_val),
6559 pc_rtx, pc_rtx, 0, 0, 0);
6560 if (reg_mentioned_p (from, false_rtx))
6561 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6562 from, false_val),
6563 pc_rtx, pc_rtx, 0, 0, 0);
6565 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6566 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6568 true_rtx = XEXP (x, 1);
6569 false_rtx = XEXP (x, 2);
6570 true_code = GET_CODE (cond);
6573 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6574 reversed, do so to avoid needing two sets of patterns for
6575 subtract-and-branch insns. Similarly if we have a constant in the true
6576 arm, the false arm is the same as the first operand of the comparison, or
6577 the false arm is more complicated than the true arm. */
6579 if (comparison_p
6580 && reversed_comparison_code (cond, NULL) != UNKNOWN
6581 && (true_rtx == pc_rtx
6582 || (CONSTANT_P (true_rtx)
6583 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6584 || true_rtx == const0_rtx
6585 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6586 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6587 && !OBJECT_P (false_rtx))
6588 || reg_mentioned_p (true_rtx, false_rtx)
6589 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6591 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6592 SUBST (XEXP (x, 1), false_rtx);
6593 SUBST (XEXP (x, 2), true_rtx);
6595 std::swap (true_rtx, false_rtx);
6596 cond = XEXP (x, 0);
6598 /* It is possible that the conditional has been simplified out. */
6599 true_code = GET_CODE (cond);
6600 comparison_p = COMPARISON_P (cond);
6603 /* If the two arms are identical, we don't need the comparison. */
6605 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6606 return true_rtx;
6608 /* Convert a == b ? b : a to "a". */
6609 if (true_code == EQ && ! side_effects_p (cond)
6610 && !HONOR_NANS (mode)
6611 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6612 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6613 return false_rtx;
6614 else if (true_code == NE && ! side_effects_p (cond)
6615 && !HONOR_NANS (mode)
6616 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6617 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6618 return true_rtx;
6620 /* Look for cases where we have (abs x) or (neg (abs X)). */
6622 if (GET_MODE_CLASS (mode) == MODE_INT
6623 && comparison_p
6624 && XEXP (cond, 1) == const0_rtx
6625 && GET_CODE (false_rtx) == NEG
6626 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6627 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6628 && ! side_effects_p (true_rtx))
6629 switch (true_code)
6631 case GT:
6632 case GE:
6633 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6634 case LT:
6635 case LE:
6636 return
6637 simplify_gen_unary (NEG, mode,
6638 simplify_gen_unary (ABS, mode, true_rtx, mode),
6639 mode);
6640 default:
6641 break;
6644 /* Look for MIN or MAX. */
6646 if ((! FLOAT_MODE_P (mode)
6647 || (flag_unsafe_math_optimizations
6648 && !HONOR_NANS (mode)
6649 && !HONOR_SIGNED_ZEROS (mode)))
6650 && comparison_p
6651 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6652 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6653 && ! side_effects_p (cond))
6654 switch (true_code)
6656 case GE:
6657 case GT:
6658 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6659 case LE:
6660 case LT:
6661 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6662 case GEU:
6663 case GTU:
6664 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6665 case LEU:
6666 case LTU:
6667 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6668 default:
6669 break;
6672 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6673 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6674 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6675 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6676 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6677 neither 1 or -1, but it isn't worth checking for. */
6679 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6680 && comparison_p
6681 && is_int_mode (mode, &int_mode)
6682 && ! side_effects_p (x))
6684 rtx t = make_compound_operation (true_rtx, SET);
6685 rtx f = make_compound_operation (false_rtx, SET);
6686 rtx cond_op0 = XEXP (cond, 0);
6687 rtx cond_op1 = XEXP (cond, 1);
6688 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6689 scalar_int_mode m = int_mode;
6690 rtx z = 0, c1 = NULL_RTX;
6692 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6693 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6694 || GET_CODE (t) == ASHIFT
6695 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6696 && rtx_equal_p (XEXP (t, 0), f))
6697 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6699 /* If an identity-zero op is commutative, check whether there
6700 would be a match if we swapped the operands. */
6701 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6702 || GET_CODE (t) == XOR)
6703 && rtx_equal_p (XEXP (t, 1), f))
6704 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6705 else if (GET_CODE (t) == SIGN_EXTEND
6706 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6707 && (GET_CODE (XEXP (t, 0)) == PLUS
6708 || GET_CODE (XEXP (t, 0)) == MINUS
6709 || GET_CODE (XEXP (t, 0)) == IOR
6710 || GET_CODE (XEXP (t, 0)) == XOR
6711 || GET_CODE (XEXP (t, 0)) == ASHIFT
6712 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6713 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6714 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6715 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6716 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6717 && (num_sign_bit_copies (f, GET_MODE (f))
6718 > (unsigned int)
6719 (GET_MODE_PRECISION (int_mode)
6720 - GET_MODE_PRECISION (inner_mode))))
6722 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6723 extend_op = SIGN_EXTEND;
6724 m = inner_mode;
6726 else if (GET_CODE (t) == SIGN_EXTEND
6727 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6728 && (GET_CODE (XEXP (t, 0)) == PLUS
6729 || GET_CODE (XEXP (t, 0)) == IOR
6730 || GET_CODE (XEXP (t, 0)) == XOR)
6731 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6732 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6733 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6734 && (num_sign_bit_copies (f, GET_MODE (f))
6735 > (unsigned int)
6736 (GET_MODE_PRECISION (int_mode)
6737 - GET_MODE_PRECISION (inner_mode))))
6739 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6740 extend_op = SIGN_EXTEND;
6741 m = inner_mode;
6743 else if (GET_CODE (t) == ZERO_EXTEND
6744 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6745 && (GET_CODE (XEXP (t, 0)) == PLUS
6746 || GET_CODE (XEXP (t, 0)) == MINUS
6747 || GET_CODE (XEXP (t, 0)) == IOR
6748 || GET_CODE (XEXP (t, 0)) == XOR
6749 || GET_CODE (XEXP (t, 0)) == ASHIFT
6750 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6751 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6752 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6753 && HWI_COMPUTABLE_MODE_P (int_mode)
6754 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6755 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6756 && ((nonzero_bits (f, GET_MODE (f))
6757 & ~GET_MODE_MASK (inner_mode))
6758 == 0))
6760 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6761 extend_op = ZERO_EXTEND;
6762 m = inner_mode;
6764 else if (GET_CODE (t) == ZERO_EXTEND
6765 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6766 && (GET_CODE (XEXP (t, 0)) == PLUS
6767 || GET_CODE (XEXP (t, 0)) == IOR
6768 || GET_CODE (XEXP (t, 0)) == XOR)
6769 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6770 && HWI_COMPUTABLE_MODE_P (int_mode)
6771 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6772 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6773 && ((nonzero_bits (f, GET_MODE (f))
6774 & ~GET_MODE_MASK (inner_mode))
6775 == 0))
6777 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6778 extend_op = ZERO_EXTEND;
6779 m = inner_mode;
6782 if (z)
6784 machine_mode cm = m;
6785 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6786 && GET_MODE (c1) != VOIDmode)
6787 cm = GET_MODE (c1);
6788 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6789 cond_op0, cond_op1),
6790 pc_rtx, pc_rtx, 0, 0, 0);
6791 temp = simplify_gen_binary (MULT, cm, temp,
6792 simplify_gen_binary (MULT, cm, c1,
6793 const_true_rtx));
6794 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6795 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6797 if (extend_op != UNKNOWN)
6798 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6800 return temp;
6804 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6805 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6806 negation of a single bit, we can convert this operation to a shift. We
6807 can actually do this more generally, but it doesn't seem worth it. */
6809 if (true_code == NE
6810 && is_a <scalar_int_mode> (mode, &int_mode)
6811 && XEXP (cond, 1) == const0_rtx
6812 && false_rtx == const0_rtx
6813 && CONST_INT_P (true_rtx)
6814 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6815 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6816 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6817 == GET_MODE_PRECISION (int_mode))
6818 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6819 return
6820 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6821 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6823 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6824 non-zero bit in A is C1. */
6825 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6826 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6827 && is_a <scalar_int_mode> (mode, &int_mode)
6828 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6829 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6830 == nonzero_bits (XEXP (cond, 0), inner_mode)
6831 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6833 rtx val = XEXP (cond, 0);
6834 if (inner_mode == int_mode)
6835 return val;
6836 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6837 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6840 return x;
6843 /* Simplify X, a SET expression. Return the new expression. */
6845 static rtx
6846 simplify_set (rtx x)
6848 rtx src = SET_SRC (x);
6849 rtx dest = SET_DEST (x);
6850 machine_mode mode
6851 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6852 rtx_insn *other_insn;
6853 rtx *cc_use;
6854 scalar_int_mode int_mode;
6856 /* (set (pc) (return)) gets written as (return). */
6857 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6858 return src;
6860 /* Now that we know for sure which bits of SRC we are using, see if we can
6861 simplify the expression for the object knowing that we only need the
6862 low-order bits. */
6864 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6866 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6867 SUBST (SET_SRC (x), src);
6870 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6871 the comparison result and try to simplify it unless we already have used
6872 undobuf.other_insn. */
6873 if ((GET_MODE_CLASS (mode) == MODE_CC
6874 || GET_CODE (src) == COMPARE
6875 || CC0_P (dest))
6876 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6877 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6878 && COMPARISON_P (*cc_use)
6879 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6881 enum rtx_code old_code = GET_CODE (*cc_use);
6882 enum rtx_code new_code;
6883 rtx op0, op1, tmp;
6884 int other_changed = 0;
6885 rtx inner_compare = NULL_RTX;
6886 machine_mode compare_mode = GET_MODE (dest);
6888 if (GET_CODE (src) == COMPARE)
6890 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6891 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6893 inner_compare = op0;
6894 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6897 else
6898 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6900 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6901 op0, op1);
6902 if (!tmp)
6903 new_code = old_code;
6904 else if (!CONSTANT_P (tmp))
6906 new_code = GET_CODE (tmp);
6907 op0 = XEXP (tmp, 0);
6908 op1 = XEXP (tmp, 1);
6910 else
6912 rtx pat = PATTERN (other_insn);
6913 undobuf.other_insn = other_insn;
6914 SUBST (*cc_use, tmp);
6916 /* Attempt to simplify CC user. */
6917 if (GET_CODE (pat) == SET)
6919 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6920 if (new_rtx != NULL_RTX)
6921 SUBST (SET_SRC (pat), new_rtx);
6924 /* Convert X into a no-op move. */
6925 SUBST (SET_DEST (x), pc_rtx);
6926 SUBST (SET_SRC (x), pc_rtx);
6927 return x;
6930 /* Simplify our comparison, if possible. */
6931 new_code = simplify_comparison (new_code, &op0, &op1);
6933 #ifdef SELECT_CC_MODE
6934 /* If this machine has CC modes other than CCmode, check to see if we
6935 need to use a different CC mode here. */
6936 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6937 compare_mode = GET_MODE (op0);
6938 else if (inner_compare
6939 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6940 && new_code == old_code
6941 && op0 == XEXP (inner_compare, 0)
6942 && op1 == XEXP (inner_compare, 1))
6943 compare_mode = GET_MODE (inner_compare);
6944 else
6945 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6947 /* If the mode changed, we have to change SET_DEST, the mode in the
6948 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6949 a hard register, just build new versions with the proper mode. If it
6950 is a pseudo, we lose unless it is only time we set the pseudo, in
6951 which case we can safely change its mode. */
6952 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6954 if (can_change_dest_mode (dest, 0, compare_mode))
6956 unsigned int regno = REGNO (dest);
6957 rtx new_dest;
6959 if (regno < FIRST_PSEUDO_REGISTER)
6960 new_dest = gen_rtx_REG (compare_mode, regno);
6961 else
6963 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6964 new_dest = regno_reg_rtx[regno];
6967 SUBST (SET_DEST (x), new_dest);
6968 SUBST (XEXP (*cc_use, 0), new_dest);
6969 other_changed = 1;
6971 dest = new_dest;
6974 #endif /* SELECT_CC_MODE */
6976 /* If the code changed, we have to build a new comparison in
6977 undobuf.other_insn. */
6978 if (new_code != old_code)
6980 int other_changed_previously = other_changed;
6981 unsigned HOST_WIDE_INT mask;
6982 rtx old_cc_use = *cc_use;
6984 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6985 dest, const0_rtx));
6986 other_changed = 1;
6988 /* If the only change we made was to change an EQ into an NE or
6989 vice versa, OP0 has only one bit that might be nonzero, and OP1
6990 is zero, check if changing the user of the condition code will
6991 produce a valid insn. If it won't, we can keep the original code
6992 in that insn by surrounding our operation with an XOR. */
6994 if (((old_code == NE && new_code == EQ)
6995 || (old_code == EQ && new_code == NE))
6996 && ! other_changed_previously && op1 == const0_rtx
6997 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6998 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
7000 rtx pat = PATTERN (other_insn), note = 0;
7002 if ((recog_for_combine (&pat, other_insn, &note) < 0
7003 && ! check_asm_operands (pat)))
7005 *cc_use = old_cc_use;
7006 other_changed = 0;
7008 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7009 gen_int_mode (mask,
7010 GET_MODE (op0)));
7015 if (other_changed)
7016 undobuf.other_insn = other_insn;
7018 /* Don't generate a compare of a CC with 0, just use that CC. */
7019 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7021 SUBST (SET_SRC (x), op0);
7022 src = SET_SRC (x);
7024 /* Otherwise, if we didn't previously have the same COMPARE we
7025 want, create it from scratch. */
7026 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7027 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7029 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7030 src = SET_SRC (x);
7033 else
7035 /* Get SET_SRC in a form where we have placed back any
7036 compound expressions. Then do the checks below. */
7037 src = make_compound_operation (src, SET);
7038 SUBST (SET_SRC (x), src);
7041 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7042 and X being a REG or (subreg (reg)), we may be able to convert this to
7043 (set (subreg:m2 x) (op)).
7045 We can always do this if M1 is narrower than M2 because that means that
7046 we only care about the low bits of the result.
7048 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7049 perform a narrower operation than requested since the high-order bits will
7050 be undefined. On machine where it is defined, this transformation is safe
7051 as long as M1 and M2 have the same number of words. */
7053 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7054 && !OBJECT_P (SUBREG_REG (src))
7055 && (known_equal_after_align_up
7056 (GET_MODE_SIZE (GET_MODE (src)),
7057 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7058 UNITS_PER_WORD))
7059 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7060 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7061 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7062 GET_MODE (SUBREG_REG (src)),
7063 GET_MODE (src)))
7064 && (REG_P (dest)
7065 || (GET_CODE (dest) == SUBREG
7066 && REG_P (SUBREG_REG (dest)))))
7068 SUBST (SET_DEST (x),
7069 gen_lowpart (GET_MODE (SUBREG_REG (src)),
7070 dest));
7071 SUBST (SET_SRC (x), SUBREG_REG (src));
7073 src = SET_SRC (x), dest = SET_DEST (x);
7076 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7077 in SRC. */
7078 if (dest == cc0_rtx
7079 && partial_subreg_p (src)
7080 && subreg_lowpart_p (src))
7082 rtx inner = SUBREG_REG (src);
7083 machine_mode inner_mode = GET_MODE (inner);
7085 /* Here we make sure that we don't have a sign bit on. */
7086 if (val_signbit_known_clear_p (GET_MODE (src),
7087 nonzero_bits (inner, inner_mode)))
7089 SUBST (SET_SRC (x), inner);
7090 src = SET_SRC (x);
7094 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7095 would require a paradoxical subreg. Replace the subreg with a
7096 zero_extend to avoid the reload that would otherwise be required.
7097 Don't do this unless we have a scalar integer mode, otherwise the
7098 transformation is incorrect. */
7100 enum rtx_code extend_op;
7101 if (paradoxical_subreg_p (src)
7102 && MEM_P (SUBREG_REG (src))
7103 && SCALAR_INT_MODE_P (GET_MODE (src))
7104 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7106 SUBST (SET_SRC (x),
7107 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7109 src = SET_SRC (x);
7112 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7113 are comparing an item known to be 0 or -1 against 0, use a logical
7114 operation instead. Check for one of the arms being an IOR of the other
7115 arm with some value. We compute three terms to be IOR'ed together. In
7116 practice, at most two will be nonzero. Then we do the IOR's. */
7118 if (GET_CODE (dest) != PC
7119 && GET_CODE (src) == IF_THEN_ELSE
7120 && is_int_mode (GET_MODE (src), &int_mode)
7121 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7122 && XEXP (XEXP (src, 0), 1) == const0_rtx
7123 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7124 && (!HAVE_conditional_move
7125 || ! can_conditionally_move_p (int_mode))
7126 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7127 == GET_MODE_PRECISION (int_mode))
7128 && ! side_effects_p (src))
7130 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7131 ? XEXP (src, 1) : XEXP (src, 2));
7132 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7133 ? XEXP (src, 2) : XEXP (src, 1));
7134 rtx term1 = const0_rtx, term2, term3;
7136 if (GET_CODE (true_rtx) == IOR
7137 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7138 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7139 else if (GET_CODE (true_rtx) == IOR
7140 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7141 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7142 else if (GET_CODE (false_rtx) == IOR
7143 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7144 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7145 else if (GET_CODE (false_rtx) == IOR
7146 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7147 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7149 term2 = simplify_gen_binary (AND, int_mode,
7150 XEXP (XEXP (src, 0), 0), true_rtx);
7151 term3 = simplify_gen_binary (AND, int_mode,
7152 simplify_gen_unary (NOT, int_mode,
7153 XEXP (XEXP (src, 0), 0),
7154 int_mode),
7155 false_rtx);
7157 SUBST (SET_SRC (x),
7158 simplify_gen_binary (IOR, int_mode,
7159 simplify_gen_binary (IOR, int_mode,
7160 term1, term2),
7161 term3));
7163 src = SET_SRC (x);
7166 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7167 whole thing fail. */
7168 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7169 return src;
7170 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7171 return dest;
7172 else
7173 /* Convert this into a field assignment operation, if possible. */
7174 return make_field_assignment (x);
7177 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7178 result. */
7180 static rtx
7181 simplify_logical (rtx x)
7183 rtx op0 = XEXP (x, 0);
7184 rtx op1 = XEXP (x, 1);
7185 scalar_int_mode mode;
7187 switch (GET_CODE (x))
7189 case AND:
7190 /* We can call simplify_and_const_int only if we don't lose
7191 any (sign) bits when converting INTVAL (op1) to
7192 "unsigned HOST_WIDE_INT". */
7193 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7194 && CONST_INT_P (op1)
7195 && (HWI_COMPUTABLE_MODE_P (mode)
7196 || INTVAL (op1) > 0))
7198 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7199 if (GET_CODE (x) != AND)
7200 return x;
7202 op0 = XEXP (x, 0);
7203 op1 = XEXP (x, 1);
7206 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7207 apply the distributive law and then the inverse distributive
7208 law to see if things simplify. */
7209 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7211 rtx result = distribute_and_simplify_rtx (x, 0);
7212 if (result)
7213 return result;
7215 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7217 rtx result = distribute_and_simplify_rtx (x, 1);
7218 if (result)
7219 return result;
7221 break;
7223 case IOR:
7224 /* If we have (ior (and A B) C), apply the distributive law and then
7225 the inverse distributive law to see if things simplify. */
7227 if (GET_CODE (op0) == AND)
7229 rtx result = distribute_and_simplify_rtx (x, 0);
7230 if (result)
7231 return result;
7234 if (GET_CODE (op1) == AND)
7236 rtx result = distribute_and_simplify_rtx (x, 1);
7237 if (result)
7238 return result;
7240 break;
7242 default:
7243 gcc_unreachable ();
7246 return x;
7249 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7250 operations" because they can be replaced with two more basic operations.
7251 ZERO_EXTEND is also considered "compound" because it can be replaced with
7252 an AND operation, which is simpler, though only one operation.
7254 The function expand_compound_operation is called with an rtx expression
7255 and will convert it to the appropriate shifts and AND operations,
7256 simplifying at each stage.
7258 The function make_compound_operation is called to convert an expression
7259 consisting of shifts and ANDs into the equivalent compound expression.
7260 It is the inverse of this function, loosely speaking. */
7262 static rtx
7263 expand_compound_operation (rtx x)
7265 unsigned HOST_WIDE_INT pos = 0, len;
7266 int unsignedp = 0;
7267 unsigned int modewidth;
7268 rtx tem;
7269 scalar_int_mode inner_mode;
7271 switch (GET_CODE (x))
7273 case ZERO_EXTEND:
7274 unsignedp = 1;
7275 /* FALLTHRU */
7276 case SIGN_EXTEND:
7277 /* We can't necessarily use a const_int for a multiword mode;
7278 it depends on implicitly extending the value.
7279 Since we don't know the right way to extend it,
7280 we can't tell whether the implicit way is right.
7282 Even for a mode that is no wider than a const_int,
7283 we can't win, because we need to sign extend one of its bits through
7284 the rest of it, and we don't know which bit. */
7285 if (CONST_INT_P (XEXP (x, 0)))
7286 return x;
7288 /* Reject modes that aren't scalar integers because turning vector
7289 or complex modes into shifts causes problems. */
7290 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7291 return x;
7293 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7294 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7295 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7296 reloaded. If not for that, MEM's would very rarely be safe.
7298 Reject modes bigger than a word, because we might not be able
7299 to reference a two-register group starting with an arbitrary register
7300 (and currently gen_lowpart might crash for a SUBREG). */
7302 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7303 return x;
7305 len = GET_MODE_PRECISION (inner_mode);
7306 /* If the inner object has VOIDmode (the only way this can happen
7307 is if it is an ASM_OPERANDS), we can't do anything since we don't
7308 know how much masking to do. */
7309 if (len == 0)
7310 return x;
7312 break;
7314 case ZERO_EXTRACT:
7315 unsignedp = 1;
7317 /* fall through */
7319 case SIGN_EXTRACT:
7320 /* If the operand is a CLOBBER, just return it. */
7321 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7322 return XEXP (x, 0);
7324 if (!CONST_INT_P (XEXP (x, 1))
7325 || !CONST_INT_P (XEXP (x, 2)))
7326 return x;
7328 /* Reject modes that aren't scalar integers because turning vector
7329 or complex modes into shifts causes problems. */
7330 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7331 return x;
7333 len = INTVAL (XEXP (x, 1));
7334 pos = INTVAL (XEXP (x, 2));
7336 /* This should stay within the object being extracted, fail otherwise. */
7337 if (len + pos > GET_MODE_PRECISION (inner_mode))
7338 return x;
7340 if (BITS_BIG_ENDIAN)
7341 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7343 break;
7345 default:
7346 return x;
7349 /* We've rejected non-scalar operations by now. */
7350 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7352 /* Convert sign extension to zero extension, if we know that the high
7353 bit is not set, as this is easier to optimize. It will be converted
7354 back to cheaper alternative in make_extraction. */
7355 if (GET_CODE (x) == SIGN_EXTEND
7356 && HWI_COMPUTABLE_MODE_P (mode)
7357 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7358 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7359 == 0))
7361 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7362 rtx temp2 = expand_compound_operation (temp);
7364 /* Make sure this is a profitable operation. */
7365 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7366 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7367 return temp2;
7368 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7369 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7370 return temp;
7371 else
7372 return x;
7375 /* We can optimize some special cases of ZERO_EXTEND. */
7376 if (GET_CODE (x) == ZERO_EXTEND)
7378 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7379 know that the last value didn't have any inappropriate bits
7380 set. */
7381 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7382 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7383 && HWI_COMPUTABLE_MODE_P (mode)
7384 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7385 & ~GET_MODE_MASK (inner_mode)) == 0)
7386 return XEXP (XEXP (x, 0), 0);
7388 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7389 if (GET_CODE (XEXP (x, 0)) == SUBREG
7390 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7391 && subreg_lowpart_p (XEXP (x, 0))
7392 && HWI_COMPUTABLE_MODE_P (mode)
7393 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7394 & ~GET_MODE_MASK (inner_mode)) == 0)
7395 return SUBREG_REG (XEXP (x, 0));
7397 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7398 is a comparison and STORE_FLAG_VALUE permits. This is like
7399 the first case, but it works even when MODE is larger
7400 than HOST_WIDE_INT. */
7401 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7402 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7403 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7404 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7405 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7406 return XEXP (XEXP (x, 0), 0);
7408 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7409 if (GET_CODE (XEXP (x, 0)) == SUBREG
7410 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7411 && subreg_lowpart_p (XEXP (x, 0))
7412 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7413 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7414 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7415 return SUBREG_REG (XEXP (x, 0));
7419 /* If we reach here, we want to return a pair of shifts. The inner
7420 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7421 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7422 logical depending on the value of UNSIGNEDP.
7424 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7425 converted into an AND of a shift.
7427 We must check for the case where the left shift would have a negative
7428 count. This can happen in a case like (x >> 31) & 255 on machines
7429 that can't shift by a constant. On those machines, we would first
7430 combine the shift with the AND to produce a variable-position
7431 extraction. Then the constant of 31 would be substituted in
7432 to produce such a position. */
7434 modewidth = GET_MODE_PRECISION (mode);
7435 if (modewidth >= pos + len)
7437 tem = gen_lowpart (mode, XEXP (x, 0));
7438 if (!tem || GET_CODE (tem) == CLOBBER)
7439 return x;
7440 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7441 tem, modewidth - pos - len);
7442 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7443 mode, tem, modewidth - len);
7445 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7446 tem = simplify_and_const_int (NULL_RTX, mode,
7447 simplify_shift_const (NULL_RTX, LSHIFTRT,
7448 mode, XEXP (x, 0),
7449 pos),
7450 (HOST_WIDE_INT_1U << len) - 1);
7451 else
7452 /* Any other cases we can't handle. */
7453 return x;
7455 /* If we couldn't do this for some reason, return the original
7456 expression. */
7457 if (GET_CODE (tem) == CLOBBER)
7458 return x;
7460 return tem;
7463 /* X is a SET which contains an assignment of one object into
7464 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7465 or certain SUBREGS). If possible, convert it into a series of
7466 logical operations.
7468 We half-heartedly support variable positions, but do not at all
7469 support variable lengths. */
7471 static const_rtx
7472 expand_field_assignment (const_rtx x)
7474 rtx inner;
7475 rtx pos; /* Always counts from low bit. */
7476 int len, inner_len;
7477 rtx mask, cleared, masked;
7478 scalar_int_mode compute_mode;
7480 /* Loop until we find something we can't simplify. */
7481 while (1)
7483 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7484 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7486 rtx x0 = XEXP (SET_DEST (x), 0);
7487 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7488 break;
7489 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7490 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7491 MAX_MODE_INT);
7493 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7494 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7496 inner = XEXP (SET_DEST (x), 0);
7497 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7498 break;
7500 len = INTVAL (XEXP (SET_DEST (x), 1));
7501 pos = XEXP (SET_DEST (x), 2);
7503 /* A constant position should stay within the width of INNER. */
7504 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7505 break;
7507 if (BITS_BIG_ENDIAN)
7509 if (CONST_INT_P (pos))
7510 pos = GEN_INT (inner_len - len - INTVAL (pos));
7511 else if (GET_CODE (pos) == MINUS
7512 && CONST_INT_P (XEXP (pos, 1))
7513 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7514 /* If position is ADJUST - X, new position is X. */
7515 pos = XEXP (pos, 0);
7516 else
7517 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7518 gen_int_mode (inner_len - len,
7519 GET_MODE (pos)),
7520 pos);
7524 /* If the destination is a subreg that overwrites the whole of the inner
7525 register, we can move the subreg to the source. */
7526 else if (GET_CODE (SET_DEST (x)) == SUBREG
7527 /* We need SUBREGs to compute nonzero_bits properly. */
7528 && nonzero_sign_valid
7529 && !read_modify_subreg_p (SET_DEST (x)))
7531 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7532 gen_lowpart
7533 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7534 SET_SRC (x)));
7535 continue;
7537 else
7538 break;
7540 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7541 inner = SUBREG_REG (inner);
7543 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7544 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7546 /* Don't do anything for vector or complex integral types. */
7547 if (! FLOAT_MODE_P (GET_MODE (inner)))
7548 break;
7550 /* Try to find an integral mode to pun with. */
7551 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7552 .exists (&compute_mode))
7553 break;
7555 inner = gen_lowpart (compute_mode, inner);
7558 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7559 if (len >= HOST_BITS_PER_WIDE_INT)
7560 break;
7562 /* Don't try to compute in too wide unsupported modes. */
7563 if (!targetm.scalar_mode_supported_p (compute_mode))
7564 break;
7566 /* Now compute the equivalent expression. Make a copy of INNER
7567 for the SET_DEST in case it is a MEM into which we will substitute;
7568 we don't want shared RTL in that case. */
7569 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7570 compute_mode);
7571 cleared = simplify_gen_binary (AND, compute_mode,
7572 simplify_gen_unary (NOT, compute_mode,
7573 simplify_gen_binary (ASHIFT,
7574 compute_mode,
7575 mask, pos),
7576 compute_mode),
7577 inner);
7578 masked = simplify_gen_binary (ASHIFT, compute_mode,
7579 simplify_gen_binary (
7580 AND, compute_mode,
7581 gen_lowpart (compute_mode, SET_SRC (x)),
7582 mask),
7583 pos);
7585 x = gen_rtx_SET (copy_rtx (inner),
7586 simplify_gen_binary (IOR, compute_mode,
7587 cleared, masked));
7590 return x;
7593 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7594 it is an RTX that represents the (variable) starting position; otherwise,
7595 POS is the (constant) starting bit position. Both are counted from the LSB.
7597 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7599 IN_DEST is nonzero if this is a reference in the destination of a SET.
7600 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7601 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7602 be used.
7604 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7605 ZERO_EXTRACT should be built even for bits starting at bit 0.
7607 MODE is the desired mode of the result (if IN_DEST == 0).
7609 The result is an RTX for the extraction or NULL_RTX if the target
7610 can't handle it. */
7612 static rtx
7613 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7614 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7615 int in_dest, int in_compare)
7617 /* This mode describes the size of the storage area
7618 to fetch the overall value from. Within that, we
7619 ignore the POS lowest bits, etc. */
7620 machine_mode is_mode = GET_MODE (inner);
7621 machine_mode inner_mode;
7622 scalar_int_mode wanted_inner_mode;
7623 scalar_int_mode wanted_inner_reg_mode = word_mode;
7624 scalar_int_mode pos_mode = word_mode;
7625 machine_mode extraction_mode = word_mode;
7626 rtx new_rtx = 0;
7627 rtx orig_pos_rtx = pos_rtx;
7628 HOST_WIDE_INT orig_pos;
7630 if (pos_rtx && CONST_INT_P (pos_rtx))
7631 pos = INTVAL (pos_rtx), pos_rtx = 0;
7633 if (GET_CODE (inner) == SUBREG
7634 && subreg_lowpart_p (inner)
7635 && (paradoxical_subreg_p (inner)
7636 /* If trying or potentionally trying to extract
7637 bits outside of is_mode, don't look through
7638 non-paradoxical SUBREGs. See PR82192. */
7639 || (pos_rtx == NULL_RTX
7640 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7642 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7643 consider just the QI as the memory to extract from.
7644 The subreg adds or removes high bits; its mode is
7645 irrelevant to the meaning of this extraction,
7646 since POS and LEN count from the lsb. */
7647 if (MEM_P (SUBREG_REG (inner)))
7648 is_mode = GET_MODE (SUBREG_REG (inner));
7649 inner = SUBREG_REG (inner);
7651 else if (GET_CODE (inner) == ASHIFT
7652 && CONST_INT_P (XEXP (inner, 1))
7653 && pos_rtx == 0 && pos == 0
7654 && len > UINTVAL (XEXP (inner, 1)))
7656 /* We're extracting the least significant bits of an rtx
7657 (ashift X (const_int C)), where LEN > C. Extract the
7658 least significant (LEN - C) bits of X, giving an rtx
7659 whose mode is MODE, then shift it left C times. */
7660 new_rtx = make_extraction (mode, XEXP (inner, 0),
7661 0, 0, len - INTVAL (XEXP (inner, 1)),
7662 unsignedp, in_dest, in_compare);
7663 if (new_rtx != 0)
7664 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7666 else if (GET_CODE (inner) == TRUNCATE
7667 /* If trying or potentionally trying to extract
7668 bits outside of is_mode, don't look through
7669 TRUNCATE. See PR82192. */
7670 && pos_rtx == NULL_RTX
7671 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7672 inner = XEXP (inner, 0);
7674 inner_mode = GET_MODE (inner);
7676 /* See if this can be done without an extraction. We never can if the
7677 width of the field is not the same as that of some integer mode. For
7678 registers, we can only avoid the extraction if the position is at the
7679 low-order bit and this is either not in the destination or we have the
7680 appropriate STRICT_LOW_PART operation available.
7682 For MEM, we can avoid an extract if the field starts on an appropriate
7683 boundary and we can change the mode of the memory reference. */
7685 scalar_int_mode tmode;
7686 if (int_mode_for_size (len, 1).exists (&tmode)
7687 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7688 && !MEM_P (inner)
7689 && (pos == 0 || REG_P (inner))
7690 && (inner_mode == tmode
7691 || !REG_P (inner)
7692 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7693 || reg_truncated_to_mode (tmode, inner))
7694 && (! in_dest
7695 || (REG_P (inner)
7696 && have_insn_for (STRICT_LOW_PART, tmode))))
7697 || (MEM_P (inner) && pos_rtx == 0
7698 && (pos
7699 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7700 : BITS_PER_UNIT)) == 0
7701 /* We can't do this if we are widening INNER_MODE (it
7702 may not be aligned, for one thing). */
7703 && !paradoxical_subreg_p (tmode, inner_mode)
7704 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7705 && (inner_mode == tmode
7706 || (! mode_dependent_address_p (XEXP (inner, 0),
7707 MEM_ADDR_SPACE (inner))
7708 && ! MEM_VOLATILE_P (inner))))))
7710 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7711 field. If the original and current mode are the same, we need not
7712 adjust the offset. Otherwise, we do if bytes big endian.
7714 If INNER is not a MEM, get a piece consisting of just the field
7715 of interest (in this case POS % BITS_PER_WORD must be 0). */
7717 if (MEM_P (inner))
7719 poly_int64 offset;
7721 /* POS counts from lsb, but make OFFSET count in memory order. */
7722 if (BYTES_BIG_ENDIAN)
7723 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7724 - len - pos);
7725 else
7726 offset = pos / BITS_PER_UNIT;
7728 new_rtx = adjust_address_nv (inner, tmode, offset);
7730 else if (REG_P (inner))
7732 if (tmode != inner_mode)
7734 /* We can't call gen_lowpart in a DEST since we
7735 always want a SUBREG (see below) and it would sometimes
7736 return a new hard register. */
7737 if (pos || in_dest)
7739 poly_uint64 offset
7740 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7742 /* Avoid creating invalid subregs, for example when
7743 simplifying (x>>32)&255. */
7744 if (!validate_subreg (tmode, inner_mode, inner, offset))
7745 return NULL_RTX;
7747 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7749 else
7750 new_rtx = gen_lowpart (tmode, inner);
7752 else
7753 new_rtx = inner;
7755 else
7756 new_rtx = force_to_mode (inner, tmode,
7757 len >= HOST_BITS_PER_WIDE_INT
7758 ? HOST_WIDE_INT_M1U
7759 : (HOST_WIDE_INT_1U << len) - 1, 0);
7761 /* If this extraction is going into the destination of a SET,
7762 make a STRICT_LOW_PART unless we made a MEM. */
7764 if (in_dest)
7765 return (MEM_P (new_rtx) ? new_rtx
7766 : (GET_CODE (new_rtx) != SUBREG
7767 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7768 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7770 if (mode == tmode)
7771 return new_rtx;
7773 if (CONST_SCALAR_INT_P (new_rtx))
7774 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7775 mode, new_rtx, tmode);
7777 /* If we know that no extraneous bits are set, and that the high
7778 bit is not set, convert the extraction to the cheaper of
7779 sign and zero extension, that are equivalent in these cases. */
7780 if (flag_expensive_optimizations
7781 && (HWI_COMPUTABLE_MODE_P (tmode)
7782 && ((nonzero_bits (new_rtx, tmode)
7783 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7784 == 0)))
7786 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7787 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7789 /* Prefer ZERO_EXTENSION, since it gives more information to
7790 backends. */
7791 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7792 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7793 return temp;
7794 return temp1;
7797 /* Otherwise, sign- or zero-extend unless we already are in the
7798 proper mode. */
7800 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7801 mode, new_rtx));
7804 /* Unless this is a COMPARE or we have a funny memory reference,
7805 don't do anything with zero-extending field extracts starting at
7806 the low-order bit since they are simple AND operations. */
7807 if (pos_rtx == 0 && pos == 0 && ! in_dest
7808 && ! in_compare && unsignedp)
7809 return 0;
7811 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7812 if the position is not a constant and the length is not 1. In all
7813 other cases, we would only be going outside our object in cases when
7814 an original shift would have been undefined. */
7815 if (MEM_P (inner)
7816 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7817 || (pos_rtx != 0 && len != 1)))
7818 return 0;
7820 enum extraction_pattern pattern = (in_dest ? EP_insv
7821 : unsignedp ? EP_extzv : EP_extv);
7823 /* If INNER is not from memory, we want it to have the mode of a register
7824 extraction pattern's structure operand, or word_mode if there is no
7825 such pattern. The same applies to extraction_mode and pos_mode
7826 and their respective operands.
7828 For memory, assume that the desired extraction_mode and pos_mode
7829 are the same as for a register operation, since at present we don't
7830 have named patterns for aligned memory structures. */
7831 class extraction_insn insn;
7832 unsigned int inner_size;
7833 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7834 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7836 wanted_inner_reg_mode = insn.struct_mode.require ();
7837 pos_mode = insn.pos_mode;
7838 extraction_mode = insn.field_mode;
7841 /* Never narrow an object, since that might not be safe. */
7843 if (mode != VOIDmode
7844 && partial_subreg_p (extraction_mode, mode))
7845 extraction_mode = mode;
7847 /* Punt if len is too large for extraction_mode. */
7848 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7849 return NULL_RTX;
7851 if (!MEM_P (inner))
7852 wanted_inner_mode = wanted_inner_reg_mode;
7853 else
7855 /* Be careful not to go beyond the extracted object and maintain the
7856 natural alignment of the memory. */
7857 wanted_inner_mode = smallest_int_mode_for_size (len);
7858 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7859 > GET_MODE_BITSIZE (wanted_inner_mode))
7860 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7863 orig_pos = pos;
7865 if (BITS_BIG_ENDIAN)
7867 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7868 BITS_BIG_ENDIAN style. If position is constant, compute new
7869 position. Otherwise, build subtraction.
7870 Note that POS is relative to the mode of the original argument.
7871 If it's a MEM we need to recompute POS relative to that.
7872 However, if we're extracting from (or inserting into) a register,
7873 we want to recompute POS relative to wanted_inner_mode. */
7874 int width;
7875 if (!MEM_P (inner))
7876 width = GET_MODE_BITSIZE (wanted_inner_mode);
7877 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7878 return NULL_RTX;
7880 if (pos_rtx == 0)
7881 pos = width - len - pos;
7882 else
7883 pos_rtx
7884 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7885 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7886 pos_rtx);
7887 /* POS may be less than 0 now, but we check for that below.
7888 Note that it can only be less than 0 if !MEM_P (inner). */
7891 /* If INNER has a wider mode, and this is a constant extraction, try to
7892 make it smaller and adjust the byte to point to the byte containing
7893 the value. */
7894 if (wanted_inner_mode != VOIDmode
7895 && inner_mode != wanted_inner_mode
7896 && ! pos_rtx
7897 && partial_subreg_p (wanted_inner_mode, is_mode)
7898 && MEM_P (inner)
7899 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7900 && ! MEM_VOLATILE_P (inner))
7902 poly_int64 offset = 0;
7904 /* The computations below will be correct if the machine is big
7905 endian in both bits and bytes or little endian in bits and bytes.
7906 If it is mixed, we must adjust. */
7908 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7909 adjust OFFSET to compensate. */
7910 if (BYTES_BIG_ENDIAN
7911 && paradoxical_subreg_p (is_mode, inner_mode))
7912 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7914 /* We can now move to the desired byte. */
7915 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7916 * GET_MODE_SIZE (wanted_inner_mode);
7917 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7919 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7920 && is_mode != wanted_inner_mode)
7921 offset = (GET_MODE_SIZE (is_mode)
7922 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7924 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7927 /* If INNER is not memory, get it into the proper mode. If we are changing
7928 its mode, POS must be a constant and smaller than the size of the new
7929 mode. */
7930 else if (!MEM_P (inner))
7932 /* On the LHS, don't create paradoxical subregs implicitely truncating
7933 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7934 if (in_dest
7935 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7936 wanted_inner_mode))
7937 return NULL_RTX;
7939 if (GET_MODE (inner) != wanted_inner_mode
7940 && (pos_rtx != 0
7941 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7942 return NULL_RTX;
7944 if (orig_pos < 0)
7945 return NULL_RTX;
7947 inner = force_to_mode (inner, wanted_inner_mode,
7948 pos_rtx
7949 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7950 ? HOST_WIDE_INT_M1U
7951 : (((HOST_WIDE_INT_1U << len) - 1)
7952 << orig_pos),
7956 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7957 have to zero extend. Otherwise, we can just use a SUBREG.
7959 We dealt with constant rtxes earlier, so pos_rtx cannot
7960 have VOIDmode at this point. */
7961 if (pos_rtx != 0
7962 && (GET_MODE_SIZE (pos_mode)
7963 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7965 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7966 GET_MODE (pos_rtx));
7968 /* If we know that no extraneous bits are set, and that the high
7969 bit is not set, convert extraction to cheaper one - either
7970 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7971 cases. */
7972 if (flag_expensive_optimizations
7973 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7974 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7975 & ~(((unsigned HOST_WIDE_INT)
7976 GET_MODE_MASK (GET_MODE (pos_rtx)))
7977 >> 1))
7978 == 0)))
7980 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7981 GET_MODE (pos_rtx));
7983 /* Prefer ZERO_EXTENSION, since it gives more information to
7984 backends. */
7985 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7986 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7987 temp = temp1;
7989 pos_rtx = temp;
7992 /* Make POS_RTX unless we already have it and it is correct. If we don't
7993 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7994 be a CONST_INT. */
7995 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7996 pos_rtx = orig_pos_rtx;
7998 else if (pos_rtx == 0)
7999 pos_rtx = GEN_INT (pos);
8001 /* Make the required operation. See if we can use existing rtx. */
8002 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
8003 extraction_mode, inner, GEN_INT (len), pos_rtx);
8004 if (! in_dest)
8005 new_rtx = gen_lowpart (mode, new_rtx);
8007 return new_rtx;
8010 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8011 can be commuted with any other operations in X. Return X without
8012 that shift if so. */
8014 static rtx
8015 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8017 enum rtx_code code = GET_CODE (x);
8018 rtx tem;
8020 switch (code)
8022 case ASHIFT:
8023 /* This is the shift itself. If it is wide enough, we will return
8024 either the value being shifted if the shift count is equal to
8025 COUNT or a shift for the difference. */
8026 if (CONST_INT_P (XEXP (x, 1))
8027 && INTVAL (XEXP (x, 1)) >= count)
8028 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8029 INTVAL (XEXP (x, 1)) - count);
8030 break;
8032 case NEG: case NOT:
8033 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8034 return simplify_gen_unary (code, mode, tem, mode);
8036 break;
8038 case PLUS: case IOR: case XOR: case AND:
8039 /* If we can safely shift this constant and we find the inner shift,
8040 make a new operation. */
8041 if (CONST_INT_P (XEXP (x, 1))
8042 && (UINTVAL (XEXP (x, 1))
8043 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8044 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8046 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8047 return simplify_gen_binary (code, mode, tem,
8048 gen_int_mode (val, mode));
8050 break;
8052 default:
8053 break;
8056 return 0;
8059 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8060 level of the expression and MODE is its mode. IN_CODE is as for
8061 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8062 that should be used when recursing on operands of *X_PTR.
8064 There are two possible actions:
8066 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8067 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8069 - Return a new rtx, which the caller returns directly. */
8071 static rtx
8072 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8073 enum rtx_code in_code,
8074 enum rtx_code *next_code_ptr)
8076 rtx x = *x_ptr;
8077 enum rtx_code next_code = *next_code_ptr;
8078 enum rtx_code code = GET_CODE (x);
8079 int mode_width = GET_MODE_PRECISION (mode);
8080 rtx rhs, lhs;
8081 rtx new_rtx = 0;
8082 int i;
8083 rtx tem;
8084 scalar_int_mode inner_mode;
8085 bool equality_comparison = false;
8087 if (in_code == EQ)
8089 equality_comparison = true;
8090 in_code = COMPARE;
8093 /* Process depending on the code of this operation. If NEW is set
8094 nonzero, it will be returned. */
8096 switch (code)
8098 case ASHIFT:
8099 /* Convert shifts by constants into multiplications if inside
8100 an address. */
8101 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8102 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8103 && INTVAL (XEXP (x, 1)) >= 0)
8105 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8106 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8108 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8109 if (GET_CODE (new_rtx) == NEG)
8111 new_rtx = XEXP (new_rtx, 0);
8112 multval = -multval;
8114 multval = trunc_int_for_mode (multval, mode);
8115 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8117 break;
8119 case PLUS:
8120 lhs = XEXP (x, 0);
8121 rhs = XEXP (x, 1);
8122 lhs = make_compound_operation (lhs, next_code);
8123 rhs = make_compound_operation (rhs, next_code);
8124 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8126 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8127 XEXP (lhs, 1));
8128 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8130 else if (GET_CODE (lhs) == MULT
8131 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8133 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8134 simplify_gen_unary (NEG, mode,
8135 XEXP (lhs, 1),
8136 mode));
8137 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8139 else
8141 SUBST (XEXP (x, 0), lhs);
8142 SUBST (XEXP (x, 1), rhs);
8144 maybe_swap_commutative_operands (x);
8145 return x;
8147 case MINUS:
8148 lhs = XEXP (x, 0);
8149 rhs = XEXP (x, 1);
8150 lhs = make_compound_operation (lhs, next_code);
8151 rhs = make_compound_operation (rhs, next_code);
8152 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8154 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8155 XEXP (rhs, 1));
8156 return simplify_gen_binary (PLUS, mode, tem, lhs);
8158 else if (GET_CODE (rhs) == MULT
8159 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8161 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8162 simplify_gen_unary (NEG, mode,
8163 XEXP (rhs, 1),
8164 mode));
8165 return simplify_gen_binary (PLUS, mode, tem, lhs);
8167 else
8169 SUBST (XEXP (x, 0), lhs);
8170 SUBST (XEXP (x, 1), rhs);
8171 return x;
8174 case AND:
8175 /* If the second operand is not a constant, we can't do anything
8176 with it. */
8177 if (!CONST_INT_P (XEXP (x, 1)))
8178 break;
8180 /* If the constant is a power of two minus one and the first operand
8181 is a logical right shift, make an extraction. */
8182 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8183 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8185 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8186 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8187 i, 1, 0, in_code == COMPARE);
8190 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8191 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8192 && subreg_lowpart_p (XEXP (x, 0))
8193 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8194 &inner_mode)
8195 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8196 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8198 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8199 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8200 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8201 XEXP (inner_x0, 1),
8202 i, 1, 0, in_code == COMPARE);
8204 /* If we narrowed the mode when dropping the subreg, then we lose. */
8205 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8206 new_rtx = NULL;
8208 /* If that didn't give anything, see if the AND simplifies on
8209 its own. */
8210 if (!new_rtx && i >= 0)
8212 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8213 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8214 0, in_code == COMPARE);
8217 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8218 else if ((GET_CODE (XEXP (x, 0)) == XOR
8219 || GET_CODE (XEXP (x, 0)) == IOR)
8220 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8221 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8222 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8224 /* Apply the distributive law, and then try to make extractions. */
8225 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8226 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8227 XEXP (x, 1)),
8228 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8229 XEXP (x, 1)));
8230 new_rtx = make_compound_operation (new_rtx, in_code);
8233 /* If we are have (and (rotate X C) M) and C is larger than the number
8234 of bits in M, this is an extraction. */
8236 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8237 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8238 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8239 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8241 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8242 new_rtx = make_extraction (mode, new_rtx,
8243 (GET_MODE_PRECISION (mode)
8244 - INTVAL (XEXP (XEXP (x, 0), 1))),
8245 NULL_RTX, i, 1, 0, in_code == COMPARE);
8248 /* On machines without logical shifts, if the operand of the AND is
8249 a logical shift and our mask turns off all the propagated sign
8250 bits, we can replace the logical shift with an arithmetic shift. */
8251 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8252 && !have_insn_for (LSHIFTRT, mode)
8253 && have_insn_for (ASHIFTRT, mode)
8254 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8255 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8256 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8257 && mode_width <= HOST_BITS_PER_WIDE_INT)
8259 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8261 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8262 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8263 SUBST (XEXP (x, 0),
8264 gen_rtx_ASHIFTRT (mode,
8265 make_compound_operation (XEXP (XEXP (x,
8268 next_code),
8269 XEXP (XEXP (x, 0), 1)));
8272 /* If the constant is one less than a power of two, this might be
8273 representable by an extraction even if no shift is present.
8274 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8275 we are in a COMPARE. */
8276 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8277 new_rtx = make_extraction (mode,
8278 make_compound_operation (XEXP (x, 0),
8279 next_code),
8280 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8282 /* If we are in a comparison and this is an AND with a power of two,
8283 convert this into the appropriate bit extract. */
8284 else if (in_code == COMPARE
8285 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8286 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8287 new_rtx = make_extraction (mode,
8288 make_compound_operation (XEXP (x, 0),
8289 next_code),
8290 i, NULL_RTX, 1, 1, 0, 1);
8292 /* If the one operand is a paradoxical subreg of a register or memory and
8293 the constant (limited to the smaller mode) has only zero bits where
8294 the sub expression has known zero bits, this can be expressed as
8295 a zero_extend. */
8296 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8298 rtx sub;
8300 sub = XEXP (XEXP (x, 0), 0);
8301 machine_mode sub_mode = GET_MODE (sub);
8302 int sub_width;
8303 if ((REG_P (sub) || MEM_P (sub))
8304 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8305 && sub_width < mode_width)
8307 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8308 unsigned HOST_WIDE_INT mask;
8310 /* original AND constant with all the known zero bits set */
8311 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8312 if ((mask & mode_mask) == mode_mask)
8314 new_rtx = make_compound_operation (sub, next_code);
8315 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8316 1, 0, in_code == COMPARE);
8321 break;
8323 case LSHIFTRT:
8324 /* If the sign bit is known to be zero, replace this with an
8325 arithmetic shift. */
8326 if (have_insn_for (ASHIFTRT, mode)
8327 && ! have_insn_for (LSHIFTRT, mode)
8328 && mode_width <= HOST_BITS_PER_WIDE_INT
8329 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8331 new_rtx = gen_rtx_ASHIFTRT (mode,
8332 make_compound_operation (XEXP (x, 0),
8333 next_code),
8334 XEXP (x, 1));
8335 break;
8338 /* fall through */
8340 case ASHIFTRT:
8341 lhs = XEXP (x, 0);
8342 rhs = XEXP (x, 1);
8344 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8345 this is a SIGN_EXTRACT. */
8346 if (CONST_INT_P (rhs)
8347 && GET_CODE (lhs) == ASHIFT
8348 && CONST_INT_P (XEXP (lhs, 1))
8349 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8350 && INTVAL (XEXP (lhs, 1)) >= 0
8351 && INTVAL (rhs) < mode_width)
8353 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8354 new_rtx = make_extraction (mode, new_rtx,
8355 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8356 NULL_RTX, mode_width - INTVAL (rhs),
8357 code == LSHIFTRT, 0, in_code == COMPARE);
8358 break;
8361 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8362 If so, try to merge the shifts into a SIGN_EXTEND. We could
8363 also do this for some cases of SIGN_EXTRACT, but it doesn't
8364 seem worth the effort; the case checked for occurs on Alpha. */
8366 if (!OBJECT_P (lhs)
8367 && ! (GET_CODE (lhs) == SUBREG
8368 && (OBJECT_P (SUBREG_REG (lhs))))
8369 && CONST_INT_P (rhs)
8370 && INTVAL (rhs) >= 0
8371 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8372 && INTVAL (rhs) < mode_width
8373 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8374 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8375 next_code),
8376 0, NULL_RTX, mode_width - INTVAL (rhs),
8377 code == LSHIFTRT, 0, in_code == COMPARE);
8379 break;
8381 case SUBREG:
8382 /* Call ourselves recursively on the inner expression. If we are
8383 narrowing the object and it has a different RTL code from
8384 what it originally did, do this SUBREG as a force_to_mode. */
8386 rtx inner = SUBREG_REG (x), simplified;
8387 enum rtx_code subreg_code = in_code;
8389 /* If the SUBREG is masking of a logical right shift,
8390 make an extraction. */
8391 if (GET_CODE (inner) == LSHIFTRT
8392 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8393 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8394 && CONST_INT_P (XEXP (inner, 1))
8395 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8396 && subreg_lowpart_p (x))
8398 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8399 int width = GET_MODE_PRECISION (inner_mode)
8400 - INTVAL (XEXP (inner, 1));
8401 if (width > mode_width)
8402 width = mode_width;
8403 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8404 width, 1, 0, in_code == COMPARE);
8405 break;
8408 /* If in_code is COMPARE, it isn't always safe to pass it through
8409 to the recursive make_compound_operation call. */
8410 if (subreg_code == COMPARE
8411 && (!subreg_lowpart_p (x)
8412 || GET_CODE (inner) == SUBREG
8413 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8414 is (const_int 0), rather than
8415 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8416 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8417 for non-equality comparisons against 0 is not equivalent
8418 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8419 || (GET_CODE (inner) == AND
8420 && CONST_INT_P (XEXP (inner, 1))
8421 && partial_subreg_p (x)
8422 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8423 >= GET_MODE_BITSIZE (mode) - 1)))
8424 subreg_code = SET;
8426 tem = make_compound_operation (inner, subreg_code);
8428 simplified
8429 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8430 if (simplified)
8431 tem = simplified;
8433 if (GET_CODE (tem) != GET_CODE (inner)
8434 && partial_subreg_p (x)
8435 && subreg_lowpart_p (x))
8437 rtx newer
8438 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8440 /* If we have something other than a SUBREG, we might have
8441 done an expansion, so rerun ourselves. */
8442 if (GET_CODE (newer) != SUBREG)
8443 newer = make_compound_operation (newer, in_code);
8445 /* force_to_mode can expand compounds. If it just re-expanded
8446 the compound, use gen_lowpart to convert to the desired
8447 mode. */
8448 if (rtx_equal_p (newer, x)
8449 /* Likewise if it re-expanded the compound only partially.
8450 This happens for SUBREG of ZERO_EXTRACT if they extract
8451 the same number of bits. */
8452 || (GET_CODE (newer) == SUBREG
8453 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8454 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8455 && GET_CODE (inner) == AND
8456 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8457 return gen_lowpart (GET_MODE (x), tem);
8459 return newer;
8462 if (simplified)
8463 return tem;
8465 break;
8467 default:
8468 break;
8471 if (new_rtx)
8472 *x_ptr = gen_lowpart (mode, new_rtx);
8473 *next_code_ptr = next_code;
8474 return NULL_RTX;
8477 /* Look at the expression rooted at X. Look for expressions
8478 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8479 Form these expressions.
8481 Return the new rtx, usually just X.
8483 Also, for machines like the VAX that don't have logical shift insns,
8484 try to convert logical to arithmetic shift operations in cases where
8485 they are equivalent. This undoes the canonicalizations to logical
8486 shifts done elsewhere.
8488 We try, as much as possible, to re-use rtl expressions to save memory.
8490 IN_CODE says what kind of expression we are processing. Normally, it is
8491 SET. In a memory address it is MEM. When processing the arguments of
8492 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8493 precisely it is an equality comparison against zero. */
8496 make_compound_operation (rtx x, enum rtx_code in_code)
8498 enum rtx_code code = GET_CODE (x);
8499 const char *fmt;
8500 int i, j;
8501 enum rtx_code next_code;
8502 rtx new_rtx, tem;
8504 /* Select the code to be used in recursive calls. Once we are inside an
8505 address, we stay there. If we have a comparison, set to COMPARE,
8506 but once inside, go back to our default of SET. */
8508 next_code = (code == MEM ? MEM
8509 : ((code == COMPARE || COMPARISON_P (x))
8510 && XEXP (x, 1) == const0_rtx) ? COMPARE
8511 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8513 scalar_int_mode mode;
8514 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8516 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8517 &next_code);
8518 if (new_rtx)
8519 return new_rtx;
8520 code = GET_CODE (x);
8523 /* Now recursively process each operand of this operation. We need to
8524 handle ZERO_EXTEND specially so that we don't lose track of the
8525 inner mode. */
8526 if (code == ZERO_EXTEND)
8528 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8529 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8530 new_rtx, GET_MODE (XEXP (x, 0)));
8531 if (tem)
8532 return tem;
8533 SUBST (XEXP (x, 0), new_rtx);
8534 return x;
8537 fmt = GET_RTX_FORMAT (code);
8538 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8539 if (fmt[i] == 'e')
8541 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8542 SUBST (XEXP (x, i), new_rtx);
8544 else if (fmt[i] == 'E')
8545 for (j = 0; j < XVECLEN (x, i); j++)
8547 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8548 SUBST (XVECEXP (x, i, j), new_rtx);
8551 maybe_swap_commutative_operands (x);
8552 return x;
8555 /* Given M see if it is a value that would select a field of bits
8556 within an item, but not the entire word. Return -1 if not.
8557 Otherwise, return the starting position of the field, where 0 is the
8558 low-order bit.
8560 *PLEN is set to the length of the field. */
8562 static int
8563 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8565 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8566 int pos = m ? ctz_hwi (m) : -1;
8567 int len = 0;
8569 if (pos >= 0)
8570 /* Now shift off the low-order zero bits and see if we have a
8571 power of two minus 1. */
8572 len = exact_log2 ((m >> pos) + 1);
8574 if (len <= 0)
8575 pos = -1;
8577 *plen = len;
8578 return pos;
8581 /* If X refers to a register that equals REG in value, replace these
8582 references with REG. */
8583 static rtx
8584 canon_reg_for_combine (rtx x, rtx reg)
8586 rtx op0, op1, op2;
8587 const char *fmt;
8588 int i;
8589 bool copied;
8591 enum rtx_code code = GET_CODE (x);
8592 switch (GET_RTX_CLASS (code))
8594 case RTX_UNARY:
8595 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8596 if (op0 != XEXP (x, 0))
8597 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8598 GET_MODE (reg));
8599 break;
8601 case RTX_BIN_ARITH:
8602 case RTX_COMM_ARITH:
8603 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8604 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8605 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8606 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8607 break;
8609 case RTX_COMPARE:
8610 case RTX_COMM_COMPARE:
8611 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8612 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8613 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8614 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8615 GET_MODE (op0), op0, op1);
8616 break;
8618 case RTX_TERNARY:
8619 case RTX_BITFIELD_OPS:
8620 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8621 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8622 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8623 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8624 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8625 GET_MODE (op0), op0, op1, op2);
8626 /* FALLTHRU */
8628 case RTX_OBJ:
8629 if (REG_P (x))
8631 if (rtx_equal_p (get_last_value (reg), x)
8632 || rtx_equal_p (reg, get_last_value (x)))
8633 return reg;
8634 else
8635 break;
8638 /* fall through */
8640 default:
8641 fmt = GET_RTX_FORMAT (code);
8642 copied = false;
8643 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8644 if (fmt[i] == 'e')
8646 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8647 if (op != XEXP (x, i))
8649 if (!copied)
8651 copied = true;
8652 x = copy_rtx (x);
8654 XEXP (x, i) = op;
8657 else if (fmt[i] == 'E')
8659 int j;
8660 for (j = 0; j < XVECLEN (x, i); j++)
8662 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8663 if (op != XVECEXP (x, i, j))
8665 if (!copied)
8667 copied = true;
8668 x = copy_rtx (x);
8670 XVECEXP (x, i, j) = op;
8675 break;
8678 return x;
8681 /* Return X converted to MODE. If the value is already truncated to
8682 MODE we can just return a subreg even though in the general case we
8683 would need an explicit truncation. */
8685 static rtx
8686 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8688 if (!CONST_INT_P (x)
8689 && partial_subreg_p (mode, GET_MODE (x))
8690 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8691 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8693 /* Bit-cast X into an integer mode. */
8694 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8695 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8696 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8697 x, GET_MODE (x));
8700 return gen_lowpart (mode, x);
8703 /* See if X can be simplified knowing that we will only refer to it in
8704 MODE and will only refer to those bits that are nonzero in MASK.
8705 If other bits are being computed or if masking operations are done
8706 that select a superset of the bits in MASK, they can sometimes be
8707 ignored.
8709 Return a possibly simplified expression, but always convert X to
8710 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8712 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8713 are all off in X. This is used when X will be complemented, by either
8714 NOT, NEG, or XOR. */
8716 static rtx
8717 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8718 int just_select)
8720 enum rtx_code code = GET_CODE (x);
8721 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8722 machine_mode op_mode;
8723 unsigned HOST_WIDE_INT nonzero;
8725 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8726 code below will do the wrong thing since the mode of such an
8727 expression is VOIDmode.
8729 Also do nothing if X is a CLOBBER; this can happen if X was
8730 the return value from a call to gen_lowpart. */
8731 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8732 return x;
8734 /* We want to perform the operation in its present mode unless we know
8735 that the operation is valid in MODE, in which case we do the operation
8736 in MODE. */
8737 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8738 && have_insn_for (code, mode))
8739 ? mode : GET_MODE (x));
8741 /* It is not valid to do a right-shift in a narrower mode
8742 than the one it came in with. */
8743 if ((code == LSHIFTRT || code == ASHIFTRT)
8744 && partial_subreg_p (mode, GET_MODE (x)))
8745 op_mode = GET_MODE (x);
8747 /* Truncate MASK to fit OP_MODE. */
8748 if (op_mode)
8749 mask &= GET_MODE_MASK (op_mode);
8751 /* Determine what bits of X are guaranteed to be (non)zero. */
8752 nonzero = nonzero_bits (x, mode);
8754 /* If none of the bits in X are needed, return a zero. */
8755 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8756 x = const0_rtx;
8758 /* If X is a CONST_INT, return a new one. Do this here since the
8759 test below will fail. */
8760 if (CONST_INT_P (x))
8762 if (SCALAR_INT_MODE_P (mode))
8763 return gen_int_mode (INTVAL (x) & mask, mode);
8764 else
8766 x = GEN_INT (INTVAL (x) & mask);
8767 return gen_lowpart_common (mode, x);
8771 /* If X is narrower than MODE and we want all the bits in X's mode, just
8772 get X in the proper mode. */
8773 if (paradoxical_subreg_p (mode, GET_MODE (x))
8774 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8775 return gen_lowpart (mode, x);
8777 /* We can ignore the effect of a SUBREG if it narrows the mode or
8778 if the constant masks to zero all the bits the mode doesn't have. */
8779 if (GET_CODE (x) == SUBREG
8780 && subreg_lowpart_p (x)
8781 && (partial_subreg_p (x)
8782 || (mask
8783 & GET_MODE_MASK (GET_MODE (x))
8784 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8785 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8787 scalar_int_mode int_mode, xmode;
8788 if (is_a <scalar_int_mode> (mode, &int_mode)
8789 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8790 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8791 integer too. */
8792 return force_int_to_mode (x, int_mode, xmode,
8793 as_a <scalar_int_mode> (op_mode),
8794 mask, just_select);
8796 return gen_lowpart_or_truncate (mode, x);
8799 /* Subroutine of force_to_mode that handles cases in which both X and
8800 the result are scalar integers. MODE is the mode of the result,
8801 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8802 is preferred for simplified versions of X. The other arguments
8803 are as for force_to_mode. */
8805 static rtx
8806 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8807 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8808 int just_select)
8810 enum rtx_code code = GET_CODE (x);
8811 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8812 unsigned HOST_WIDE_INT fuller_mask;
8813 rtx op0, op1, temp;
8814 poly_int64 const_op0;
8816 /* When we have an arithmetic operation, or a shift whose count we
8817 do not know, we need to assume that all bits up to the highest-order
8818 bit in MASK will be needed. This is how we form such a mask. */
8819 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8820 fuller_mask = HOST_WIDE_INT_M1U;
8821 else
8822 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8823 - 1);
8825 switch (code)
8827 case CLOBBER:
8828 /* If X is a (clobber (const_int)), return it since we know we are
8829 generating something that won't match. */
8830 return x;
8832 case SIGN_EXTEND:
8833 case ZERO_EXTEND:
8834 case ZERO_EXTRACT:
8835 case SIGN_EXTRACT:
8836 x = expand_compound_operation (x);
8837 if (GET_CODE (x) != code)
8838 return force_to_mode (x, mode, mask, next_select);
8839 break;
8841 case TRUNCATE:
8842 /* Similarly for a truncate. */
8843 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8845 case AND:
8846 /* If this is an AND with a constant, convert it into an AND
8847 whose constant is the AND of that constant with MASK. If it
8848 remains an AND of MASK, delete it since it is redundant. */
8850 if (CONST_INT_P (XEXP (x, 1)))
8852 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8853 mask & INTVAL (XEXP (x, 1)));
8854 xmode = op_mode;
8856 /* If X is still an AND, see if it is an AND with a mask that
8857 is just some low-order bits. If so, and it is MASK, we don't
8858 need it. */
8860 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8861 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8862 x = XEXP (x, 0);
8864 /* If it remains an AND, try making another AND with the bits
8865 in the mode mask that aren't in MASK turned on. If the
8866 constant in the AND is wide enough, this might make a
8867 cheaper constant. */
8869 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8870 && GET_MODE_MASK (xmode) != mask
8871 && HWI_COMPUTABLE_MODE_P (xmode))
8873 unsigned HOST_WIDE_INT cval
8874 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8875 rtx y;
8877 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8878 gen_int_mode (cval, xmode));
8879 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8880 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8881 x = y;
8884 break;
8887 goto binop;
8889 case PLUS:
8890 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8891 low-order bits (as in an alignment operation) and FOO is already
8892 aligned to that boundary, mask C1 to that boundary as well.
8893 This may eliminate that PLUS and, later, the AND. */
8896 unsigned int width = GET_MODE_PRECISION (mode);
8897 unsigned HOST_WIDE_INT smask = mask;
8899 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8900 number, sign extend it. */
8902 if (width < HOST_BITS_PER_WIDE_INT
8903 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8904 smask |= HOST_WIDE_INT_M1U << width;
8906 if (CONST_INT_P (XEXP (x, 1))
8907 && pow2p_hwi (- smask)
8908 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8909 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8910 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8911 (INTVAL (XEXP (x, 1)) & smask)),
8912 mode, smask, next_select);
8915 /* fall through */
8917 case MULT:
8918 /* Substituting into the operands of a widening MULT is not likely to
8919 create RTL matching a machine insn. */
8920 if (code == MULT
8921 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8922 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8923 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8924 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8925 && REG_P (XEXP (XEXP (x, 0), 0))
8926 && REG_P (XEXP (XEXP (x, 1), 0)))
8927 return gen_lowpart_or_truncate (mode, x);
8929 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8930 most significant bit in MASK since carries from those bits will
8931 affect the bits we are interested in. */
8932 mask = fuller_mask;
8933 goto binop;
8935 case MINUS:
8936 /* If X is (minus C Y) where C's least set bit is larger than any bit
8937 in the mask, then we may replace with (neg Y). */
8938 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8939 && known_alignment (poly_uint64 (const_op0)) > mask)
8941 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8942 return force_to_mode (x, mode, mask, next_select);
8945 /* Similarly, if C contains every bit in the fuller_mask, then we may
8946 replace with (not Y). */
8947 if (CONST_INT_P (XEXP (x, 0))
8948 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8950 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8951 return force_to_mode (x, mode, mask, next_select);
8954 mask = fuller_mask;
8955 goto binop;
8957 case IOR:
8958 case XOR:
8959 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8960 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8961 operation which may be a bitfield extraction. Ensure that the
8962 constant we form is not wider than the mode of X. */
8964 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8965 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8966 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8967 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8968 && CONST_INT_P (XEXP (x, 1))
8969 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8970 + floor_log2 (INTVAL (XEXP (x, 1))))
8971 < GET_MODE_PRECISION (xmode))
8972 && (UINTVAL (XEXP (x, 1))
8973 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8975 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8976 << INTVAL (XEXP (XEXP (x, 0), 1)),
8977 xmode);
8978 temp = simplify_gen_binary (GET_CODE (x), xmode,
8979 XEXP (XEXP (x, 0), 0), temp);
8980 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8981 XEXP (XEXP (x, 0), 1));
8982 return force_to_mode (x, mode, mask, next_select);
8985 binop:
8986 /* For most binary operations, just propagate into the operation and
8987 change the mode if we have an operation of that mode. */
8989 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8990 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8992 /* If we ended up truncating both operands, truncate the result of the
8993 operation instead. */
8994 if (GET_CODE (op0) == TRUNCATE
8995 && GET_CODE (op1) == TRUNCATE)
8997 op0 = XEXP (op0, 0);
8998 op1 = XEXP (op1, 0);
9001 op0 = gen_lowpart_or_truncate (op_mode, op0);
9002 op1 = gen_lowpart_or_truncate (op_mode, op1);
9004 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
9006 x = simplify_gen_binary (code, op_mode, op0, op1);
9007 xmode = op_mode;
9009 break;
9011 case ASHIFT:
9012 /* For left shifts, do the same, but just for the first operand.
9013 However, we cannot do anything with shifts where we cannot
9014 guarantee that the counts are smaller than the size of the mode
9015 because such a count will have a different meaning in a
9016 wider mode. */
9018 if (! (CONST_INT_P (XEXP (x, 1))
9019 && INTVAL (XEXP (x, 1)) >= 0
9020 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9021 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9022 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9023 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9024 break;
9026 /* If the shift count is a constant and we can do arithmetic in
9027 the mode of the shift, refine which bits we need. Otherwise, use the
9028 conservative form of the mask. */
9029 if (CONST_INT_P (XEXP (x, 1))
9030 && INTVAL (XEXP (x, 1)) >= 0
9031 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9032 && HWI_COMPUTABLE_MODE_P (op_mode))
9033 mask >>= INTVAL (XEXP (x, 1));
9034 else
9035 mask = fuller_mask;
9037 op0 = gen_lowpart_or_truncate (op_mode,
9038 force_to_mode (XEXP (x, 0), mode,
9039 mask, next_select));
9041 if (op_mode != xmode || op0 != XEXP (x, 0))
9043 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9044 xmode = op_mode;
9046 break;
9048 case LSHIFTRT:
9049 /* Here we can only do something if the shift count is a constant,
9050 this shift constant is valid for the host, and we can do arithmetic
9051 in OP_MODE. */
9053 if (CONST_INT_P (XEXP (x, 1))
9054 && INTVAL (XEXP (x, 1)) >= 0
9055 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9056 && HWI_COMPUTABLE_MODE_P (op_mode))
9058 rtx inner = XEXP (x, 0);
9059 unsigned HOST_WIDE_INT inner_mask;
9061 /* Select the mask of the bits we need for the shift operand. */
9062 inner_mask = mask << INTVAL (XEXP (x, 1));
9064 /* We can only change the mode of the shift if we can do arithmetic
9065 in the mode of the shift and INNER_MASK is no wider than the
9066 width of X's mode. */
9067 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9068 op_mode = xmode;
9070 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9072 if (xmode != op_mode || inner != XEXP (x, 0))
9074 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9075 xmode = op_mode;
9079 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9080 shift and AND produces only copies of the sign bit (C2 is one less
9081 than a power of two), we can do this with just a shift. */
9083 if (GET_CODE (x) == LSHIFTRT
9084 && CONST_INT_P (XEXP (x, 1))
9085 /* The shift puts one of the sign bit copies in the least significant
9086 bit. */
9087 && ((INTVAL (XEXP (x, 1))
9088 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9089 >= GET_MODE_PRECISION (xmode))
9090 && pow2p_hwi (mask + 1)
9091 /* Number of bits left after the shift must be more than the mask
9092 needs. */
9093 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9094 <= GET_MODE_PRECISION (xmode))
9095 /* Must be more sign bit copies than the mask needs. */
9096 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9097 >= exact_log2 (mask + 1)))
9099 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9100 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9101 gen_int_shift_amount (xmode, nbits));
9103 goto shiftrt;
9105 case ASHIFTRT:
9106 /* If we are just looking for the sign bit, we don't need this shift at
9107 all, even if it has a variable count. */
9108 if (val_signbit_p (xmode, mask))
9109 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9111 /* If this is a shift by a constant, get a mask that contains those bits
9112 that are not copies of the sign bit. We then have two cases: If
9113 MASK only includes those bits, this can be a logical shift, which may
9114 allow simplifications. If MASK is a single-bit field not within
9115 those bits, we are requesting a copy of the sign bit and hence can
9116 shift the sign bit to the appropriate location. */
9118 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9119 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9121 unsigned HOST_WIDE_INT nonzero;
9122 int i;
9124 /* If the considered data is wider than HOST_WIDE_INT, we can't
9125 represent a mask for all its bits in a single scalar.
9126 But we only care about the lower bits, so calculate these. */
9128 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9130 nonzero = HOST_WIDE_INT_M1U;
9132 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9133 is the number of bits a full-width mask would have set.
9134 We need only shift if these are fewer than nonzero can
9135 hold. If not, we must keep all bits set in nonzero. */
9137 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9138 < HOST_BITS_PER_WIDE_INT)
9139 nonzero >>= INTVAL (XEXP (x, 1))
9140 + HOST_BITS_PER_WIDE_INT
9141 - GET_MODE_PRECISION (xmode);
9143 else
9145 nonzero = GET_MODE_MASK (xmode);
9146 nonzero >>= INTVAL (XEXP (x, 1));
9149 if ((mask & ~nonzero) == 0)
9151 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9152 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9153 if (GET_CODE (x) != ASHIFTRT)
9154 return force_to_mode (x, mode, mask, next_select);
9157 else if ((i = exact_log2 (mask)) >= 0)
9159 x = simplify_shift_const
9160 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9161 GET_MODE_PRECISION (xmode) - 1 - i);
9163 if (GET_CODE (x) != ASHIFTRT)
9164 return force_to_mode (x, mode, mask, next_select);
9168 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9169 even if the shift count isn't a constant. */
9170 if (mask == 1)
9171 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9173 shiftrt:
9175 /* If this is a zero- or sign-extension operation that just affects bits
9176 we don't care about, remove it. Be sure the call above returned
9177 something that is still a shift. */
9179 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9180 && CONST_INT_P (XEXP (x, 1))
9181 && INTVAL (XEXP (x, 1)) >= 0
9182 && (INTVAL (XEXP (x, 1))
9183 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9184 && GET_CODE (XEXP (x, 0)) == ASHIFT
9185 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9186 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9187 next_select);
9189 break;
9191 case ROTATE:
9192 case ROTATERT:
9193 /* If the shift count is constant and we can do computations
9194 in the mode of X, compute where the bits we care about are.
9195 Otherwise, we can't do anything. Don't change the mode of
9196 the shift or propagate MODE into the shift, though. */
9197 if (CONST_INT_P (XEXP (x, 1))
9198 && INTVAL (XEXP (x, 1)) >= 0)
9200 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9201 xmode, gen_int_mode (mask, xmode),
9202 XEXP (x, 1));
9203 if (temp && CONST_INT_P (temp))
9204 x = simplify_gen_binary (code, xmode,
9205 force_to_mode (XEXP (x, 0), xmode,
9206 INTVAL (temp), next_select),
9207 XEXP (x, 1));
9209 break;
9211 case NEG:
9212 /* If we just want the low-order bit, the NEG isn't needed since it
9213 won't change the low-order bit. */
9214 if (mask == 1)
9215 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9217 /* We need any bits less significant than the most significant bit in
9218 MASK since carries from those bits will affect the bits we are
9219 interested in. */
9220 mask = fuller_mask;
9221 goto unop;
9223 case NOT:
9224 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9225 same as the XOR case above. Ensure that the constant we form is not
9226 wider than the mode of X. */
9228 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9229 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9230 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9231 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9232 < GET_MODE_PRECISION (xmode))
9233 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9235 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9236 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9237 x = simplify_gen_binary (LSHIFTRT, xmode,
9238 temp, XEXP (XEXP (x, 0), 1));
9240 return force_to_mode (x, mode, mask, next_select);
9243 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9244 use the full mask inside the NOT. */
9245 mask = fuller_mask;
9247 unop:
9248 op0 = gen_lowpart_or_truncate (op_mode,
9249 force_to_mode (XEXP (x, 0), mode, mask,
9250 next_select));
9251 if (op_mode != xmode || op0 != XEXP (x, 0))
9253 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9254 xmode = op_mode;
9256 break;
9258 case NE:
9259 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9260 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9261 which is equal to STORE_FLAG_VALUE. */
9262 if ((mask & ~STORE_FLAG_VALUE) == 0
9263 && XEXP (x, 1) == const0_rtx
9264 && GET_MODE (XEXP (x, 0)) == mode
9265 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9266 && (nonzero_bits (XEXP (x, 0), mode)
9267 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9268 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9270 break;
9272 case IF_THEN_ELSE:
9273 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9274 written in a narrower mode. We play it safe and do not do so. */
9276 op0 = gen_lowpart_or_truncate (xmode,
9277 force_to_mode (XEXP (x, 1), mode,
9278 mask, next_select));
9279 op1 = gen_lowpart_or_truncate (xmode,
9280 force_to_mode (XEXP (x, 2), mode,
9281 mask, next_select));
9282 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9283 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9284 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9285 op0, op1);
9286 break;
9288 default:
9289 break;
9292 /* Ensure we return a value of the proper mode. */
9293 return gen_lowpart_or_truncate (mode, x);
9296 /* Return nonzero if X is an expression that has one of two values depending on
9297 whether some other value is zero or nonzero. In that case, we return the
9298 value that is being tested, *PTRUE is set to the value if the rtx being
9299 returned has a nonzero value, and *PFALSE is set to the other alternative.
9301 If we return zero, we set *PTRUE and *PFALSE to X. */
9303 static rtx
9304 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9306 machine_mode mode = GET_MODE (x);
9307 enum rtx_code code = GET_CODE (x);
9308 rtx cond0, cond1, true0, true1, false0, false1;
9309 unsigned HOST_WIDE_INT nz;
9310 scalar_int_mode int_mode;
9312 /* If we are comparing a value against zero, we are done. */
9313 if ((code == NE || code == EQ)
9314 && XEXP (x, 1) == const0_rtx)
9316 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9317 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9318 return XEXP (x, 0);
9321 /* If this is a unary operation whose operand has one of two values, apply
9322 our opcode to compute those values. */
9323 else if (UNARY_P (x)
9324 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9326 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9327 *pfalse = simplify_gen_unary (code, mode, false0,
9328 GET_MODE (XEXP (x, 0)));
9329 return cond0;
9332 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9333 make can't possibly match and would suppress other optimizations. */
9334 else if (code == COMPARE)
9337 /* If this is a binary operation, see if either side has only one of two
9338 values. If either one does or if both do and they are conditional on
9339 the same value, compute the new true and false values. */
9340 else if (BINARY_P (x))
9342 rtx op0 = XEXP (x, 0);
9343 rtx op1 = XEXP (x, 1);
9344 cond0 = if_then_else_cond (op0, &true0, &false0);
9345 cond1 = if_then_else_cond (op1, &true1, &false1);
9347 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9348 && (REG_P (op0) || REG_P (op1)))
9350 /* Try to enable a simplification by undoing work done by
9351 if_then_else_cond if it converted a REG into something more
9352 complex. */
9353 if (REG_P (op0))
9355 cond0 = 0;
9356 true0 = false0 = op0;
9358 else
9360 cond1 = 0;
9361 true1 = false1 = op1;
9365 if ((cond0 != 0 || cond1 != 0)
9366 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9368 /* If if_then_else_cond returned zero, then true/false are the
9369 same rtl. We must copy one of them to prevent invalid rtl
9370 sharing. */
9371 if (cond0 == 0)
9372 true0 = copy_rtx (true0);
9373 else if (cond1 == 0)
9374 true1 = copy_rtx (true1);
9376 if (COMPARISON_P (x))
9378 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9379 true0, true1);
9380 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9381 false0, false1);
9383 else
9385 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9386 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9389 return cond0 ? cond0 : cond1;
9392 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9393 operands is zero when the other is nonzero, and vice-versa,
9394 and STORE_FLAG_VALUE is 1 or -1. */
9396 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9397 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9398 || code == UMAX)
9399 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9401 rtx op0 = XEXP (XEXP (x, 0), 1);
9402 rtx op1 = XEXP (XEXP (x, 1), 1);
9404 cond0 = XEXP (XEXP (x, 0), 0);
9405 cond1 = XEXP (XEXP (x, 1), 0);
9407 if (COMPARISON_P (cond0)
9408 && COMPARISON_P (cond1)
9409 && SCALAR_INT_MODE_P (mode)
9410 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9411 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9412 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9413 || ((swap_condition (GET_CODE (cond0))
9414 == reversed_comparison_code (cond1, NULL))
9415 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9416 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9417 && ! side_effects_p (x))
9419 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9420 *pfalse = simplify_gen_binary (MULT, mode,
9421 (code == MINUS
9422 ? simplify_gen_unary (NEG, mode,
9423 op1, mode)
9424 : op1),
9425 const_true_rtx);
9426 return cond0;
9430 /* Similarly for MULT, AND and UMIN, except that for these the result
9431 is always zero. */
9432 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9433 && (code == MULT || code == AND || code == UMIN)
9434 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9436 cond0 = XEXP (XEXP (x, 0), 0);
9437 cond1 = XEXP (XEXP (x, 1), 0);
9439 if (COMPARISON_P (cond0)
9440 && COMPARISON_P (cond1)
9441 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9442 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9443 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9444 || ((swap_condition (GET_CODE (cond0))
9445 == reversed_comparison_code (cond1, NULL))
9446 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9447 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9448 && ! side_effects_p (x))
9450 *ptrue = *pfalse = const0_rtx;
9451 return cond0;
9456 else if (code == IF_THEN_ELSE)
9458 /* If we have IF_THEN_ELSE already, extract the condition and
9459 canonicalize it if it is NE or EQ. */
9460 cond0 = XEXP (x, 0);
9461 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9462 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9463 return XEXP (cond0, 0);
9464 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9466 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9467 return XEXP (cond0, 0);
9469 else
9470 return cond0;
9473 /* If X is a SUBREG, we can narrow both the true and false values
9474 if the inner expression, if there is a condition. */
9475 else if (code == SUBREG
9476 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9477 &false0)) != 0)
9479 true0 = simplify_gen_subreg (mode, true0,
9480 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9481 false0 = simplify_gen_subreg (mode, false0,
9482 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9483 if (true0 && false0)
9485 *ptrue = true0;
9486 *pfalse = false0;
9487 return cond0;
9491 /* If X is a constant, this isn't special and will cause confusions
9492 if we treat it as such. Likewise if it is equivalent to a constant. */
9493 else if (CONSTANT_P (x)
9494 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9497 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9498 will be least confusing to the rest of the compiler. */
9499 else if (mode == BImode)
9501 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9502 return x;
9505 /* If X is known to be either 0 or -1, those are the true and
9506 false values when testing X. */
9507 else if (x == constm1_rtx || x == const0_rtx
9508 || (is_a <scalar_int_mode> (mode, &int_mode)
9509 && (num_sign_bit_copies (x, int_mode)
9510 == GET_MODE_PRECISION (int_mode))))
9512 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9513 return x;
9516 /* Likewise for 0 or a single bit. */
9517 else if (HWI_COMPUTABLE_MODE_P (mode)
9518 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9520 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9521 return x;
9524 /* Otherwise fail; show no condition with true and false values the same. */
9525 *ptrue = *pfalse = x;
9526 return 0;
9529 /* Return the value of expression X given the fact that condition COND
9530 is known to be true when applied to REG as its first operand and VAL
9531 as its second. X is known to not be shared and so can be modified in
9532 place.
9534 We only handle the simplest cases, and specifically those cases that
9535 arise with IF_THEN_ELSE expressions. */
9537 static rtx
9538 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9540 enum rtx_code code = GET_CODE (x);
9541 const char *fmt;
9542 int i, j;
9544 if (side_effects_p (x))
9545 return x;
9547 /* If either operand of the condition is a floating point value,
9548 then we have to avoid collapsing an EQ comparison. */
9549 if (cond == EQ
9550 && rtx_equal_p (x, reg)
9551 && ! FLOAT_MODE_P (GET_MODE (x))
9552 && ! FLOAT_MODE_P (GET_MODE (val)))
9553 return val;
9555 if (cond == UNEQ && rtx_equal_p (x, reg))
9556 return val;
9558 /* If X is (abs REG) and we know something about REG's relationship
9559 with zero, we may be able to simplify this. */
9561 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9562 switch (cond)
9564 case GE: case GT: case EQ:
9565 return XEXP (x, 0);
9566 case LT: case LE:
9567 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9568 XEXP (x, 0),
9569 GET_MODE (XEXP (x, 0)));
9570 default:
9571 break;
9574 /* The only other cases we handle are MIN, MAX, and comparisons if the
9575 operands are the same as REG and VAL. */
9577 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9579 if (rtx_equal_p (XEXP (x, 0), val))
9581 std::swap (val, reg);
9582 cond = swap_condition (cond);
9585 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9587 if (COMPARISON_P (x))
9589 if (comparison_dominates_p (cond, code))
9590 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9592 code = reversed_comparison_code (x, NULL);
9593 if (code != UNKNOWN
9594 && comparison_dominates_p (cond, code))
9595 return CONST0_RTX (GET_MODE (x));
9596 else
9597 return x;
9599 else if (code == SMAX || code == SMIN
9600 || code == UMIN || code == UMAX)
9602 int unsignedp = (code == UMIN || code == UMAX);
9604 /* Do not reverse the condition when it is NE or EQ.
9605 This is because we cannot conclude anything about
9606 the value of 'SMAX (x, y)' when x is not equal to y,
9607 but we can when x equals y. */
9608 if ((code == SMAX || code == UMAX)
9609 && ! (cond == EQ || cond == NE))
9610 cond = reverse_condition (cond);
9612 switch (cond)
9614 case GE: case GT:
9615 return unsignedp ? x : XEXP (x, 1);
9616 case LE: case LT:
9617 return unsignedp ? x : XEXP (x, 0);
9618 case GEU: case GTU:
9619 return unsignedp ? XEXP (x, 1) : x;
9620 case LEU: case LTU:
9621 return unsignedp ? XEXP (x, 0) : x;
9622 default:
9623 break;
9628 else if (code == SUBREG)
9630 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9631 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9633 if (SUBREG_REG (x) != r)
9635 /* We must simplify subreg here, before we lose track of the
9636 original inner_mode. */
9637 new_rtx = simplify_subreg (GET_MODE (x), r,
9638 inner_mode, SUBREG_BYTE (x));
9639 if (new_rtx)
9640 return new_rtx;
9641 else
9642 SUBST (SUBREG_REG (x), r);
9645 return x;
9647 /* We don't have to handle SIGN_EXTEND here, because even in the
9648 case of replacing something with a modeless CONST_INT, a
9649 CONST_INT is already (supposed to be) a valid sign extension for
9650 its narrower mode, which implies it's already properly
9651 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9652 story is different. */
9653 else if (code == ZERO_EXTEND)
9655 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9656 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9658 if (XEXP (x, 0) != r)
9660 /* We must simplify the zero_extend here, before we lose
9661 track of the original inner_mode. */
9662 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9663 r, inner_mode);
9664 if (new_rtx)
9665 return new_rtx;
9666 else
9667 SUBST (XEXP (x, 0), r);
9670 return x;
9673 fmt = GET_RTX_FORMAT (code);
9674 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9676 if (fmt[i] == 'e')
9677 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9678 else if (fmt[i] == 'E')
9679 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9680 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9681 cond, reg, val));
9684 return x;
9687 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9688 assignment as a field assignment. */
9690 static int
9691 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9693 if (widen_x && GET_MODE (x) != GET_MODE (y))
9695 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9696 return 0;
9697 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9698 return 0;
9699 x = adjust_address_nv (x, GET_MODE (y),
9700 byte_lowpart_offset (GET_MODE (y),
9701 GET_MODE (x)));
9704 if (x == y || rtx_equal_p (x, y))
9705 return 1;
9707 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9708 return 0;
9710 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9711 Note that all SUBREGs of MEM are paradoxical; otherwise they
9712 would have been rewritten. */
9713 if (MEM_P (x) && GET_CODE (y) == SUBREG
9714 && MEM_P (SUBREG_REG (y))
9715 && rtx_equal_p (SUBREG_REG (y),
9716 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9717 return 1;
9719 if (MEM_P (y) && GET_CODE (x) == SUBREG
9720 && MEM_P (SUBREG_REG (x))
9721 && rtx_equal_p (SUBREG_REG (x),
9722 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9723 return 1;
9725 /* We used to see if get_last_value of X and Y were the same but that's
9726 not correct. In one direction, we'll cause the assignment to have
9727 the wrong destination and in the case, we'll import a register into this
9728 insn that might have already have been dead. So fail if none of the
9729 above cases are true. */
9730 return 0;
9733 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9734 Return that assignment if so.
9736 We only handle the most common cases. */
9738 static rtx
9739 make_field_assignment (rtx x)
9741 rtx dest = SET_DEST (x);
9742 rtx src = SET_SRC (x);
9743 rtx assign;
9744 rtx rhs, lhs;
9745 HOST_WIDE_INT c1;
9746 HOST_WIDE_INT pos;
9747 unsigned HOST_WIDE_INT len;
9748 rtx other;
9750 /* All the rules in this function are specific to scalar integers. */
9751 scalar_int_mode mode;
9752 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9753 return x;
9755 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9756 a clear of a one-bit field. We will have changed it to
9757 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9758 for a SUBREG. */
9760 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9761 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9762 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9763 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9765 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9766 1, 1, 1, 0);
9767 if (assign != 0)
9768 return gen_rtx_SET (assign, const0_rtx);
9769 return x;
9772 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9773 && subreg_lowpart_p (XEXP (src, 0))
9774 && partial_subreg_p (XEXP (src, 0))
9775 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9776 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9777 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9778 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9780 assign = make_extraction (VOIDmode, dest, 0,
9781 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9782 1, 1, 1, 0);
9783 if (assign != 0)
9784 return gen_rtx_SET (assign, const0_rtx);
9785 return x;
9788 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9789 one-bit field. */
9790 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9791 && XEXP (XEXP (src, 0), 0) == const1_rtx
9792 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9794 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9795 1, 1, 1, 0);
9796 if (assign != 0)
9797 return gen_rtx_SET (assign, const1_rtx);
9798 return x;
9801 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9802 SRC is an AND with all bits of that field set, then we can discard
9803 the AND. */
9804 if (GET_CODE (dest) == ZERO_EXTRACT
9805 && CONST_INT_P (XEXP (dest, 1))
9806 && GET_CODE (src) == AND
9807 && CONST_INT_P (XEXP (src, 1)))
9809 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9810 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9811 unsigned HOST_WIDE_INT ze_mask;
9813 if (width >= HOST_BITS_PER_WIDE_INT)
9814 ze_mask = -1;
9815 else
9816 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9818 /* Complete overlap. We can remove the source AND. */
9819 if ((and_mask & ze_mask) == ze_mask)
9820 return gen_rtx_SET (dest, XEXP (src, 0));
9822 /* Partial overlap. We can reduce the source AND. */
9823 if ((and_mask & ze_mask) != and_mask)
9825 src = gen_rtx_AND (mode, XEXP (src, 0),
9826 gen_int_mode (and_mask & ze_mask, mode));
9827 return gen_rtx_SET (dest, src);
9831 /* The other case we handle is assignments into a constant-position
9832 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9833 a mask that has all one bits except for a group of zero bits and
9834 OTHER is known to have zeros where C1 has ones, this is such an
9835 assignment. Compute the position and length from C1. Shift OTHER
9836 to the appropriate position, force it to the required mode, and
9837 make the extraction. Check for the AND in both operands. */
9839 /* One or more SUBREGs might obscure the constant-position field
9840 assignment. The first one we are likely to encounter is an outer
9841 narrowing SUBREG, which we can just strip for the purposes of
9842 identifying the constant-field assignment. */
9843 scalar_int_mode src_mode = mode;
9844 if (GET_CODE (src) == SUBREG
9845 && subreg_lowpart_p (src)
9846 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9847 src = SUBREG_REG (src);
9849 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9850 return x;
9852 rhs = expand_compound_operation (XEXP (src, 0));
9853 lhs = expand_compound_operation (XEXP (src, 1));
9855 if (GET_CODE (rhs) == AND
9856 && CONST_INT_P (XEXP (rhs, 1))
9857 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9858 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9859 /* The second SUBREG that might get in the way is a paradoxical
9860 SUBREG around the first operand of the AND. We want to
9861 pretend the operand is as wide as the destination here. We
9862 do this by adjusting the MEM to wider mode for the sole
9863 purpose of the call to rtx_equal_for_field_assignment_p. Also
9864 note this trick only works for MEMs. */
9865 else if (GET_CODE (rhs) == AND
9866 && paradoxical_subreg_p (XEXP (rhs, 0))
9867 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9868 && CONST_INT_P (XEXP (rhs, 1))
9869 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9870 dest, true))
9871 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9872 else if (GET_CODE (lhs) == AND
9873 && CONST_INT_P (XEXP (lhs, 1))
9874 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9875 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9876 /* The second SUBREG that might get in the way is a paradoxical
9877 SUBREG around the first operand of the AND. We want to
9878 pretend the operand is as wide as the destination here. We
9879 do this by adjusting the MEM to wider mode for the sole
9880 purpose of the call to rtx_equal_for_field_assignment_p. Also
9881 note this trick only works for MEMs. */
9882 else if (GET_CODE (lhs) == AND
9883 && paradoxical_subreg_p (XEXP (lhs, 0))
9884 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9885 && CONST_INT_P (XEXP (lhs, 1))
9886 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9887 dest, true))
9888 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9889 else
9890 return x;
9892 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9893 if (pos < 0
9894 || pos + len > GET_MODE_PRECISION (mode)
9895 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9896 || (c1 & nonzero_bits (other, mode)) != 0)
9897 return x;
9899 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9900 if (assign == 0)
9901 return x;
9903 /* The mode to use for the source is the mode of the assignment, or of
9904 what is inside a possible STRICT_LOW_PART. */
9905 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9906 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9908 /* Shift OTHER right POS places and make it the source, restricting it
9909 to the proper length and mode. */
9911 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9912 src_mode, other, pos),
9913 dest);
9914 src = force_to_mode (src, new_mode,
9915 len >= HOST_BITS_PER_WIDE_INT
9916 ? HOST_WIDE_INT_M1U
9917 : (HOST_WIDE_INT_1U << len) - 1,
9920 /* If SRC is masked by an AND that does not make a difference in
9921 the value being stored, strip it. */
9922 if (GET_CODE (assign) == ZERO_EXTRACT
9923 && CONST_INT_P (XEXP (assign, 1))
9924 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9925 && GET_CODE (src) == AND
9926 && CONST_INT_P (XEXP (src, 1))
9927 && UINTVAL (XEXP (src, 1))
9928 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9929 src = XEXP (src, 0);
9931 return gen_rtx_SET (assign, src);
9934 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9935 if so. */
9937 static rtx
9938 apply_distributive_law (rtx x)
9940 enum rtx_code code = GET_CODE (x);
9941 enum rtx_code inner_code;
9942 rtx lhs, rhs, other;
9943 rtx tem;
9945 /* Distributivity is not true for floating point as it can change the
9946 value. So we don't do it unless -funsafe-math-optimizations. */
9947 if (FLOAT_MODE_P (GET_MODE (x))
9948 && ! flag_unsafe_math_optimizations)
9949 return x;
9951 /* The outer operation can only be one of the following: */
9952 if (code != IOR && code != AND && code != XOR
9953 && code != PLUS && code != MINUS)
9954 return x;
9956 lhs = XEXP (x, 0);
9957 rhs = XEXP (x, 1);
9959 /* If either operand is a primitive we can't do anything, so get out
9960 fast. */
9961 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9962 return x;
9964 lhs = expand_compound_operation (lhs);
9965 rhs = expand_compound_operation (rhs);
9966 inner_code = GET_CODE (lhs);
9967 if (inner_code != GET_CODE (rhs))
9968 return x;
9970 /* See if the inner and outer operations distribute. */
9971 switch (inner_code)
9973 case LSHIFTRT:
9974 case ASHIFTRT:
9975 case AND:
9976 case IOR:
9977 /* These all distribute except over PLUS. */
9978 if (code == PLUS || code == MINUS)
9979 return x;
9980 break;
9982 case MULT:
9983 if (code != PLUS && code != MINUS)
9984 return x;
9985 break;
9987 case ASHIFT:
9988 /* This is also a multiply, so it distributes over everything. */
9989 break;
9991 /* This used to handle SUBREG, but this turned out to be counter-
9992 productive, since (subreg (op ...)) usually is not handled by
9993 insn patterns, and this "optimization" therefore transformed
9994 recognizable patterns into unrecognizable ones. Therefore the
9995 SUBREG case was removed from here.
9997 It is possible that distributing SUBREG over arithmetic operations
9998 leads to an intermediate result than can then be optimized further,
9999 e.g. by moving the outer SUBREG to the other side of a SET as done
10000 in simplify_set. This seems to have been the original intent of
10001 handling SUBREGs here.
10003 However, with current GCC this does not appear to actually happen,
10004 at least on major platforms. If some case is found where removing
10005 the SUBREG case here prevents follow-on optimizations, distributing
10006 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10008 default:
10009 return x;
10012 /* Set LHS and RHS to the inner operands (A and B in the example
10013 above) and set OTHER to the common operand (C in the example).
10014 There is only one way to do this unless the inner operation is
10015 commutative. */
10016 if (COMMUTATIVE_ARITH_P (lhs)
10017 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10018 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10019 else if (COMMUTATIVE_ARITH_P (lhs)
10020 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10021 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10022 else if (COMMUTATIVE_ARITH_P (lhs)
10023 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10024 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10025 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10026 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10027 else
10028 return x;
10030 /* Form the new inner operation, seeing if it simplifies first. */
10031 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10033 /* There is one exception to the general way of distributing:
10034 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10035 if (code == XOR && inner_code == IOR)
10037 inner_code = AND;
10038 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10041 /* We may be able to continuing distributing the result, so call
10042 ourselves recursively on the inner operation before forming the
10043 outer operation, which we return. */
10044 return simplify_gen_binary (inner_code, GET_MODE (x),
10045 apply_distributive_law (tem), other);
10048 /* See if X is of the form (* (+ A B) C), and if so convert to
10049 (+ (* A C) (* B C)) and try to simplify.
10051 Most of the time, this results in no change. However, if some of
10052 the operands are the same or inverses of each other, simplifications
10053 will result.
10055 For example, (and (ior A B) (not B)) can occur as the result of
10056 expanding a bit field assignment. When we apply the distributive
10057 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10058 which then simplifies to (and (A (not B))).
10060 Note that no checks happen on the validity of applying the inverse
10061 distributive law. This is pointless since we can do it in the
10062 few places where this routine is called.
10064 N is the index of the term that is decomposed (the arithmetic operation,
10065 i.e. (+ A B) in the first example above). !N is the index of the term that
10066 is distributed, i.e. of C in the first example above. */
10067 static rtx
10068 distribute_and_simplify_rtx (rtx x, int n)
10070 machine_mode mode;
10071 enum rtx_code outer_code, inner_code;
10072 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10074 /* Distributivity is not true for floating point as it can change the
10075 value. So we don't do it unless -funsafe-math-optimizations. */
10076 if (FLOAT_MODE_P (GET_MODE (x))
10077 && ! flag_unsafe_math_optimizations)
10078 return NULL_RTX;
10080 decomposed = XEXP (x, n);
10081 if (!ARITHMETIC_P (decomposed))
10082 return NULL_RTX;
10084 mode = GET_MODE (x);
10085 outer_code = GET_CODE (x);
10086 distributed = XEXP (x, !n);
10088 inner_code = GET_CODE (decomposed);
10089 inner_op0 = XEXP (decomposed, 0);
10090 inner_op1 = XEXP (decomposed, 1);
10092 /* Special case (and (xor B C) (not A)), which is equivalent to
10093 (xor (ior A B) (ior A C)) */
10094 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10096 distributed = XEXP (distributed, 0);
10097 outer_code = IOR;
10100 if (n == 0)
10102 /* Distribute the second term. */
10103 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10104 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10106 else
10108 /* Distribute the first term. */
10109 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10110 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10113 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10114 new_op0, new_op1));
10115 if (GET_CODE (tmp) != outer_code
10116 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10117 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10118 return tmp;
10120 return NULL_RTX;
10123 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10124 in MODE. Return an equivalent form, if different from (and VAROP
10125 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10127 static rtx
10128 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10129 unsigned HOST_WIDE_INT constop)
10131 unsigned HOST_WIDE_INT nonzero;
10132 unsigned HOST_WIDE_INT orig_constop;
10133 rtx orig_varop;
10134 int i;
10136 orig_varop = varop;
10137 orig_constop = constop;
10138 if (GET_CODE (varop) == CLOBBER)
10139 return NULL_RTX;
10141 /* Simplify VAROP knowing that we will be only looking at some of the
10142 bits in it.
10144 Note by passing in CONSTOP, we guarantee that the bits not set in
10145 CONSTOP are not significant and will never be examined. We must
10146 ensure that is the case by explicitly masking out those bits
10147 before returning. */
10148 varop = force_to_mode (varop, mode, constop, 0);
10150 /* If VAROP is a CLOBBER, we will fail so return it. */
10151 if (GET_CODE (varop) == CLOBBER)
10152 return varop;
10154 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10155 to VAROP and return the new constant. */
10156 if (CONST_INT_P (varop))
10157 return gen_int_mode (INTVAL (varop) & constop, mode);
10159 /* See what bits may be nonzero in VAROP. Unlike the general case of
10160 a call to nonzero_bits, here we don't care about bits outside
10161 MODE. */
10163 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10165 /* Turn off all bits in the constant that are known to already be zero.
10166 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10167 which is tested below. */
10169 constop &= nonzero;
10171 /* If we don't have any bits left, return zero. */
10172 if (constop == 0)
10173 return const0_rtx;
10175 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10176 a power of two, we can replace this with an ASHIFT. */
10177 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10178 && (i = exact_log2 (constop)) >= 0)
10179 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10181 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10182 or XOR, then try to apply the distributive law. This may eliminate
10183 operations if either branch can be simplified because of the AND.
10184 It may also make some cases more complex, but those cases probably
10185 won't match a pattern either with or without this. */
10187 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10189 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10190 return
10191 gen_lowpart
10192 (mode,
10193 apply_distributive_law
10194 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10195 simplify_and_const_int (NULL_RTX, varop_mode,
10196 XEXP (varop, 0),
10197 constop),
10198 simplify_and_const_int (NULL_RTX, varop_mode,
10199 XEXP (varop, 1),
10200 constop))));
10203 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10204 the AND and see if one of the operands simplifies to zero. If so, we
10205 may eliminate it. */
10207 if (GET_CODE (varop) == PLUS
10208 && pow2p_hwi (constop + 1))
10210 rtx o0, o1;
10212 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10213 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10214 if (o0 == const0_rtx)
10215 return o1;
10216 if (o1 == const0_rtx)
10217 return o0;
10220 /* Make a SUBREG if necessary. If we can't make it, fail. */
10221 varop = gen_lowpart (mode, varop);
10222 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10223 return NULL_RTX;
10225 /* If we are only masking insignificant bits, return VAROP. */
10226 if (constop == nonzero)
10227 return varop;
10229 if (varop == orig_varop && constop == orig_constop)
10230 return NULL_RTX;
10232 /* Otherwise, return an AND. */
10233 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10237 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10238 in MODE.
10240 Return an equivalent form, if different from X. Otherwise, return X. If
10241 X is zero, we are to always construct the equivalent form. */
10243 static rtx
10244 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10245 unsigned HOST_WIDE_INT constop)
10247 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10248 if (tem)
10249 return tem;
10251 if (!x)
10252 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10253 gen_int_mode (constop, mode));
10254 if (GET_MODE (x) != mode)
10255 x = gen_lowpart (mode, x);
10256 return x;
10259 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10260 We don't care about bits outside of those defined in MODE.
10261 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10263 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10264 a shift, AND, or zero_extract, we can do better. */
10266 static rtx
10267 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10268 scalar_int_mode mode,
10269 unsigned HOST_WIDE_INT *nonzero)
10271 rtx tem;
10272 reg_stat_type *rsp;
10274 /* If X is a register whose nonzero bits value is current, use it.
10275 Otherwise, if X is a register whose value we can find, use that
10276 value. Otherwise, use the previously-computed global nonzero bits
10277 for this register. */
10279 rsp = &reg_stat[REGNO (x)];
10280 if (rsp->last_set_value != 0
10281 && (rsp->last_set_mode == mode
10282 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10283 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10284 && GET_MODE_CLASS (mode) == MODE_INT))
10285 && ((rsp->last_set_label >= label_tick_ebb_start
10286 && rsp->last_set_label < label_tick)
10287 || (rsp->last_set_label == label_tick
10288 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10289 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10290 && REGNO (x) < reg_n_sets_max
10291 && REG_N_SETS (REGNO (x)) == 1
10292 && !REGNO_REG_SET_P
10293 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10294 REGNO (x)))))
10296 /* Note that, even if the precision of last_set_mode is lower than that
10297 of mode, record_value_for_reg invoked nonzero_bits on the register
10298 with nonzero_bits_mode (because last_set_mode is necessarily integral
10299 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10300 are all valid, hence in mode too since nonzero_bits_mode is defined
10301 to the largest HWI_COMPUTABLE_MODE_P mode. */
10302 *nonzero &= rsp->last_set_nonzero_bits;
10303 return NULL;
10306 tem = get_last_value (x);
10307 if (tem)
10309 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10310 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10312 return tem;
10315 if (nonzero_sign_valid && rsp->nonzero_bits)
10317 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10319 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10320 /* We don't know anything about the upper bits. */
10321 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10323 *nonzero &= mask;
10326 return NULL;
10329 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10330 end of X that are known to be equal to the sign bit. X will be used
10331 in mode MODE; the returned value will always be between 1 and the
10332 number of bits in MODE. */
10334 static rtx
10335 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10336 scalar_int_mode mode,
10337 unsigned int *result)
10339 rtx tem;
10340 reg_stat_type *rsp;
10342 rsp = &reg_stat[REGNO (x)];
10343 if (rsp->last_set_value != 0
10344 && rsp->last_set_mode == mode
10345 && ((rsp->last_set_label >= label_tick_ebb_start
10346 && rsp->last_set_label < label_tick)
10347 || (rsp->last_set_label == label_tick
10348 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10349 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10350 && REGNO (x) < reg_n_sets_max
10351 && REG_N_SETS (REGNO (x)) == 1
10352 && !REGNO_REG_SET_P
10353 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10354 REGNO (x)))))
10356 *result = rsp->last_set_sign_bit_copies;
10357 return NULL;
10360 tem = get_last_value (x);
10361 if (tem != 0)
10362 return tem;
10364 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10365 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10366 *result = rsp->sign_bit_copies;
10368 return NULL;
10371 /* Return the number of "extended" bits there are in X, when interpreted
10372 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10373 unsigned quantities, this is the number of high-order zero bits.
10374 For signed quantities, this is the number of copies of the sign bit
10375 minus 1. In both case, this function returns the number of "spare"
10376 bits. For example, if two quantities for which this function returns
10377 at least 1 are added, the addition is known not to overflow.
10379 This function will always return 0 unless called during combine, which
10380 implies that it must be called from a define_split. */
10382 unsigned int
10383 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10385 if (nonzero_sign_valid == 0)
10386 return 0;
10388 scalar_int_mode int_mode;
10389 return (unsignedp
10390 ? (is_a <scalar_int_mode> (mode, &int_mode)
10391 && HWI_COMPUTABLE_MODE_P (int_mode)
10392 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10393 - floor_log2 (nonzero_bits (x, int_mode)))
10394 : 0)
10395 : num_sign_bit_copies (x, mode) - 1);
10398 /* This function is called from `simplify_shift_const' to merge two
10399 outer operations. Specifically, we have already found that we need
10400 to perform operation *POP0 with constant *PCONST0 at the outermost
10401 position. We would now like to also perform OP1 with constant CONST1
10402 (with *POP0 being done last).
10404 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10405 the resulting operation. *PCOMP_P is set to 1 if we would need to
10406 complement the innermost operand, otherwise it is unchanged.
10408 MODE is the mode in which the operation will be done. No bits outside
10409 the width of this mode matter. It is assumed that the width of this mode
10410 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10412 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10413 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10414 result is simply *PCONST0.
10416 If the resulting operation cannot be expressed as one operation, we
10417 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10419 static int
10420 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10422 enum rtx_code op0 = *pop0;
10423 HOST_WIDE_INT const0 = *pconst0;
10425 const0 &= GET_MODE_MASK (mode);
10426 const1 &= GET_MODE_MASK (mode);
10428 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10429 if (op0 == AND)
10430 const1 &= const0;
10432 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10433 if OP0 is SET. */
10435 if (op1 == UNKNOWN || op0 == SET)
10436 return 1;
10438 else if (op0 == UNKNOWN)
10439 op0 = op1, const0 = const1;
10441 else if (op0 == op1)
10443 switch (op0)
10445 case AND:
10446 const0 &= const1;
10447 break;
10448 case IOR:
10449 const0 |= const1;
10450 break;
10451 case XOR:
10452 const0 ^= const1;
10453 break;
10454 case PLUS:
10455 const0 += const1;
10456 break;
10457 case NEG:
10458 op0 = UNKNOWN;
10459 break;
10460 default:
10461 break;
10465 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10466 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10467 return 0;
10469 /* If the two constants aren't the same, we can't do anything. The
10470 remaining six cases can all be done. */
10471 else if (const0 != const1)
10472 return 0;
10474 else
10475 switch (op0)
10477 case IOR:
10478 if (op1 == AND)
10479 /* (a & b) | b == b */
10480 op0 = SET;
10481 else /* op1 == XOR */
10482 /* (a ^ b) | b == a | b */
10484 break;
10486 case XOR:
10487 if (op1 == AND)
10488 /* (a & b) ^ b == (~a) & b */
10489 op0 = AND, *pcomp_p = 1;
10490 else /* op1 == IOR */
10491 /* (a | b) ^ b == a & ~b */
10492 op0 = AND, const0 = ~const0;
10493 break;
10495 case AND:
10496 if (op1 == IOR)
10497 /* (a | b) & b == b */
10498 op0 = SET;
10499 else /* op1 == XOR */
10500 /* (a ^ b) & b) == (~a) & b */
10501 *pcomp_p = 1;
10502 break;
10503 default:
10504 break;
10507 /* Check for NO-OP cases. */
10508 const0 &= GET_MODE_MASK (mode);
10509 if (const0 == 0
10510 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10511 op0 = UNKNOWN;
10512 else if (const0 == 0 && op0 == AND)
10513 op0 = SET;
10514 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10515 && op0 == AND)
10516 op0 = UNKNOWN;
10518 *pop0 = op0;
10520 /* ??? Slightly redundant with the above mask, but not entirely.
10521 Moving this above means we'd have to sign-extend the mode mask
10522 for the final test. */
10523 if (op0 != UNKNOWN && op0 != NEG)
10524 *pconst0 = trunc_int_for_mode (const0, mode);
10526 return 1;
10529 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10530 the shift in. The original shift operation CODE is performed on OP in
10531 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10532 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10533 result of the shift is subject to operation OUTER_CODE with operand
10534 OUTER_CONST. */
10536 static scalar_int_mode
10537 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10538 scalar_int_mode orig_mode, scalar_int_mode mode,
10539 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10541 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10543 /* In general we can't perform in wider mode for right shift and rotate. */
10544 switch (code)
10546 case ASHIFTRT:
10547 /* We can still widen if the bits brought in from the left are identical
10548 to the sign bit of ORIG_MODE. */
10549 if (num_sign_bit_copies (op, mode)
10550 > (unsigned) (GET_MODE_PRECISION (mode)
10551 - GET_MODE_PRECISION (orig_mode)))
10552 return mode;
10553 return orig_mode;
10555 case LSHIFTRT:
10556 /* Similarly here but with zero bits. */
10557 if (HWI_COMPUTABLE_MODE_P (mode)
10558 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10559 return mode;
10561 /* We can also widen if the bits brought in will be masked off. This
10562 operation is performed in ORIG_MODE. */
10563 if (outer_code == AND)
10565 int care_bits = low_bitmask_len (orig_mode, outer_const);
10567 if (care_bits >= 0
10568 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10569 return mode;
10571 /* fall through */
10573 case ROTATE:
10574 return orig_mode;
10576 case ROTATERT:
10577 gcc_unreachable ();
10579 default:
10580 return mode;
10584 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10585 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10586 if we cannot simplify it. Otherwise, return a simplified value.
10588 The shift is normally computed in the widest mode we find in VAROP, as
10589 long as it isn't a different number of words than RESULT_MODE. Exceptions
10590 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10592 static rtx
10593 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10594 rtx varop, int orig_count)
10596 enum rtx_code orig_code = code;
10597 rtx orig_varop = varop;
10598 int count, log2;
10599 machine_mode mode = result_mode;
10600 machine_mode shift_mode;
10601 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10602 /* We form (outer_op (code varop count) (outer_const)). */
10603 enum rtx_code outer_op = UNKNOWN;
10604 HOST_WIDE_INT outer_const = 0;
10605 int complement_p = 0;
10606 rtx new_rtx, x;
10608 /* Make sure and truncate the "natural" shift on the way in. We don't
10609 want to do this inside the loop as it makes it more difficult to
10610 combine shifts. */
10611 if (SHIFT_COUNT_TRUNCATED)
10612 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10614 /* If we were given an invalid count, don't do anything except exactly
10615 what was requested. */
10617 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10618 return NULL_RTX;
10620 count = orig_count;
10622 /* Unless one of the branches of the `if' in this loop does a `continue',
10623 we will `break' the loop after the `if'. */
10625 while (count != 0)
10627 /* If we have an operand of (clobber (const_int 0)), fail. */
10628 if (GET_CODE (varop) == CLOBBER)
10629 return NULL_RTX;
10631 /* Convert ROTATERT to ROTATE. */
10632 if (code == ROTATERT)
10634 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10635 code = ROTATE;
10636 count = bitsize - count;
10639 shift_mode = result_mode;
10640 if (shift_mode != mode)
10642 /* We only change the modes of scalar shifts. */
10643 int_mode = as_a <scalar_int_mode> (mode);
10644 int_result_mode = as_a <scalar_int_mode> (result_mode);
10645 shift_mode = try_widen_shift_mode (code, varop, count,
10646 int_result_mode, int_mode,
10647 outer_op, outer_const);
10650 scalar_int_mode shift_unit_mode
10651 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10653 /* Handle cases where the count is greater than the size of the mode
10654 minus 1. For ASHIFT, use the size minus one as the count (this can
10655 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10656 take the count modulo the size. For other shifts, the result is
10657 zero.
10659 Since these shifts are being produced by the compiler by combining
10660 multiple operations, each of which are defined, we know what the
10661 result is supposed to be. */
10663 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10665 if (code == ASHIFTRT)
10666 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10667 else if (code == ROTATE || code == ROTATERT)
10668 count %= GET_MODE_PRECISION (shift_unit_mode);
10669 else
10671 /* We can't simply return zero because there may be an
10672 outer op. */
10673 varop = const0_rtx;
10674 count = 0;
10675 break;
10679 /* If we discovered we had to complement VAROP, leave. Making a NOT
10680 here would cause an infinite loop. */
10681 if (complement_p)
10682 break;
10684 if (shift_mode == shift_unit_mode)
10686 /* An arithmetic right shift of a quantity known to be -1 or 0
10687 is a no-op. */
10688 if (code == ASHIFTRT
10689 && (num_sign_bit_copies (varop, shift_unit_mode)
10690 == GET_MODE_PRECISION (shift_unit_mode)))
10692 count = 0;
10693 break;
10696 /* If we are doing an arithmetic right shift and discarding all but
10697 the sign bit copies, this is equivalent to doing a shift by the
10698 bitsize minus one. Convert it into that shift because it will
10699 often allow other simplifications. */
10701 if (code == ASHIFTRT
10702 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10703 >= GET_MODE_PRECISION (shift_unit_mode)))
10704 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10706 /* We simplify the tests below and elsewhere by converting
10707 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10708 `make_compound_operation' will convert it to an ASHIFTRT for
10709 those machines (such as VAX) that don't have an LSHIFTRT. */
10710 if (code == ASHIFTRT
10711 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10712 && val_signbit_known_clear_p (shift_unit_mode,
10713 nonzero_bits (varop,
10714 shift_unit_mode)))
10715 code = LSHIFTRT;
10717 if (((code == LSHIFTRT
10718 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10719 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10720 || (code == ASHIFT
10721 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10722 && !((nonzero_bits (varop, shift_unit_mode) << count)
10723 & GET_MODE_MASK (shift_unit_mode))))
10724 && !side_effects_p (varop))
10725 varop = const0_rtx;
10728 switch (GET_CODE (varop))
10730 case SIGN_EXTEND:
10731 case ZERO_EXTEND:
10732 case SIGN_EXTRACT:
10733 case ZERO_EXTRACT:
10734 new_rtx = expand_compound_operation (varop);
10735 if (new_rtx != varop)
10737 varop = new_rtx;
10738 continue;
10740 break;
10742 case MEM:
10743 /* The following rules apply only to scalars. */
10744 if (shift_mode != shift_unit_mode)
10745 break;
10746 int_mode = as_a <scalar_int_mode> (mode);
10748 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10749 minus the width of a smaller mode, we can do this with a
10750 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10751 if ((code == ASHIFTRT || code == LSHIFTRT)
10752 && ! mode_dependent_address_p (XEXP (varop, 0),
10753 MEM_ADDR_SPACE (varop))
10754 && ! MEM_VOLATILE_P (varop)
10755 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10756 .exists (&tmode)))
10758 new_rtx = adjust_address_nv (varop, tmode,
10759 BYTES_BIG_ENDIAN ? 0
10760 : count / BITS_PER_UNIT);
10762 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10763 : ZERO_EXTEND, int_mode, new_rtx);
10764 count = 0;
10765 continue;
10767 break;
10769 case SUBREG:
10770 /* The following rules apply only to scalars. */
10771 if (shift_mode != shift_unit_mode)
10772 break;
10773 int_mode = as_a <scalar_int_mode> (mode);
10774 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10776 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10777 the same number of words as what we've seen so far. Then store
10778 the widest mode in MODE. */
10779 if (subreg_lowpart_p (varop)
10780 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10781 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10782 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10783 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10784 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10786 varop = SUBREG_REG (varop);
10787 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10788 mode = inner_mode;
10789 continue;
10791 break;
10793 case MULT:
10794 /* Some machines use MULT instead of ASHIFT because MULT
10795 is cheaper. But it is still better on those machines to
10796 merge two shifts into one. */
10797 if (CONST_INT_P (XEXP (varop, 1))
10798 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10800 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10801 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10802 XEXP (varop, 0), log2_rtx);
10803 continue;
10805 break;
10807 case UDIV:
10808 /* Similar, for when divides are cheaper. */
10809 if (CONST_INT_P (XEXP (varop, 1))
10810 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10812 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10813 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10814 XEXP (varop, 0), log2_rtx);
10815 continue;
10817 break;
10819 case ASHIFTRT:
10820 /* If we are extracting just the sign bit of an arithmetic
10821 right shift, that shift is not needed. However, the sign
10822 bit of a wider mode may be different from what would be
10823 interpreted as the sign bit in a narrower mode, so, if
10824 the result is narrower, don't discard the shift. */
10825 if (code == LSHIFTRT
10826 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10827 && (GET_MODE_UNIT_BITSIZE (result_mode)
10828 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10830 varop = XEXP (varop, 0);
10831 continue;
10834 /* fall through */
10836 case LSHIFTRT:
10837 case ASHIFT:
10838 case ROTATE:
10839 /* The following rules apply only to scalars. */
10840 if (shift_mode != shift_unit_mode)
10841 break;
10842 int_mode = as_a <scalar_int_mode> (mode);
10843 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10844 int_result_mode = as_a <scalar_int_mode> (result_mode);
10846 /* Here we have two nested shifts. The result is usually the
10847 AND of a new shift with a mask. We compute the result below. */
10848 if (CONST_INT_P (XEXP (varop, 1))
10849 && INTVAL (XEXP (varop, 1)) >= 0
10850 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10851 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10852 && HWI_COMPUTABLE_MODE_P (int_mode))
10854 enum rtx_code first_code = GET_CODE (varop);
10855 unsigned int first_count = INTVAL (XEXP (varop, 1));
10856 unsigned HOST_WIDE_INT mask;
10857 rtx mask_rtx;
10859 /* We have one common special case. We can't do any merging if
10860 the inner code is an ASHIFTRT of a smaller mode. However, if
10861 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10862 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10863 we can convert it to
10864 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10865 This simplifies certain SIGN_EXTEND operations. */
10866 if (code == ASHIFT && first_code == ASHIFTRT
10867 && count == (GET_MODE_PRECISION (int_result_mode)
10868 - GET_MODE_PRECISION (int_varop_mode)))
10870 /* C3 has the low-order C1 bits zero. */
10872 mask = GET_MODE_MASK (int_mode)
10873 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10875 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10876 XEXP (varop, 0), mask);
10877 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10878 int_result_mode, varop, count);
10879 count = first_count;
10880 code = ASHIFTRT;
10881 continue;
10884 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10885 than C1 high-order bits equal to the sign bit, we can convert
10886 this to either an ASHIFT or an ASHIFTRT depending on the
10887 two counts.
10889 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10891 if (code == ASHIFTRT && first_code == ASHIFT
10892 && int_varop_mode == shift_unit_mode
10893 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10894 > first_count))
10896 varop = XEXP (varop, 0);
10897 count -= first_count;
10898 if (count < 0)
10900 count = -count;
10901 code = ASHIFT;
10904 continue;
10907 /* There are some cases we can't do. If CODE is ASHIFTRT,
10908 we can only do this if FIRST_CODE is also ASHIFTRT.
10910 We can't do the case when CODE is ROTATE and FIRST_CODE is
10911 ASHIFTRT.
10913 If the mode of this shift is not the mode of the outer shift,
10914 we can't do this if either shift is a right shift or ROTATE.
10916 Finally, we can't do any of these if the mode is too wide
10917 unless the codes are the same.
10919 Handle the case where the shift codes are the same
10920 first. */
10922 if (code == first_code)
10924 if (int_varop_mode != int_result_mode
10925 && (code == ASHIFTRT || code == LSHIFTRT
10926 || code == ROTATE))
10927 break;
10929 count += first_count;
10930 varop = XEXP (varop, 0);
10931 continue;
10934 if (code == ASHIFTRT
10935 || (code == ROTATE && first_code == ASHIFTRT)
10936 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10937 || (int_varop_mode != int_result_mode
10938 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10939 || first_code == ROTATE
10940 || code == ROTATE)))
10941 break;
10943 /* To compute the mask to apply after the shift, shift the
10944 nonzero bits of the inner shift the same way the
10945 outer shift will. */
10947 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10948 int_result_mode);
10949 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10950 mask_rtx
10951 = simplify_const_binary_operation (code, int_result_mode,
10952 mask_rtx, count_rtx);
10954 /* Give up if we can't compute an outer operation to use. */
10955 if (mask_rtx == 0
10956 || !CONST_INT_P (mask_rtx)
10957 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10958 INTVAL (mask_rtx),
10959 int_result_mode, &complement_p))
10960 break;
10962 /* If the shifts are in the same direction, we add the
10963 counts. Otherwise, we subtract them. */
10964 if ((code == ASHIFTRT || code == LSHIFTRT)
10965 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10966 count += first_count;
10967 else
10968 count -= first_count;
10970 /* If COUNT is positive, the new shift is usually CODE,
10971 except for the two exceptions below, in which case it is
10972 FIRST_CODE. If the count is negative, FIRST_CODE should
10973 always be used */
10974 if (count > 0
10975 && ((first_code == ROTATE && code == ASHIFT)
10976 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10977 code = first_code;
10978 else if (count < 0)
10979 code = first_code, count = -count;
10981 varop = XEXP (varop, 0);
10982 continue;
10985 /* If we have (A << B << C) for any shift, we can convert this to
10986 (A << C << B). This wins if A is a constant. Only try this if
10987 B is not a constant. */
10989 else if (GET_CODE (varop) == code
10990 && CONST_INT_P (XEXP (varop, 0))
10991 && !CONST_INT_P (XEXP (varop, 1)))
10993 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10994 sure the result will be masked. See PR70222. */
10995 if (code == LSHIFTRT
10996 && int_mode != int_result_mode
10997 && !merge_outer_ops (&outer_op, &outer_const, AND,
10998 GET_MODE_MASK (int_result_mode)
10999 >> orig_count, int_result_mode,
11000 &complement_p))
11001 break;
11002 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
11003 up outer sign extension (often left and right shift) is
11004 hardly more efficient than the original. See PR70429. */
11005 if (code == ASHIFTRT && int_mode != int_result_mode)
11006 break;
11008 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11009 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11010 XEXP (varop, 0),
11011 count_rtx);
11012 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11013 count = 0;
11014 continue;
11016 break;
11018 case NOT:
11019 /* The following rules apply only to scalars. */
11020 if (shift_mode != shift_unit_mode)
11021 break;
11023 /* Make this fit the case below. */
11024 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11025 continue;
11027 case IOR:
11028 case AND:
11029 case XOR:
11030 /* The following rules apply only to scalars. */
11031 if (shift_mode != shift_unit_mode)
11032 break;
11033 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11034 int_result_mode = as_a <scalar_int_mode> (result_mode);
11036 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11037 with C the size of VAROP - 1 and the shift is logical if
11038 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11039 we have an (le X 0) operation. If we have an arithmetic shift
11040 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11041 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11043 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11044 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11045 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11046 && (code == LSHIFTRT || code == ASHIFTRT)
11047 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11048 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11050 count = 0;
11051 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11052 const0_rtx);
11054 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11055 varop = gen_rtx_NEG (int_varop_mode, varop);
11057 continue;
11060 /* If we have (shift (logical)), move the logical to the outside
11061 to allow it to possibly combine with another logical and the
11062 shift to combine with another shift. This also canonicalizes to
11063 what a ZERO_EXTRACT looks like. Also, some machines have
11064 (and (shift)) insns. */
11066 if (CONST_INT_P (XEXP (varop, 1))
11067 /* We can't do this if we have (ashiftrt (xor)) and the
11068 constant has its sign bit set in shift_unit_mode with
11069 shift_unit_mode wider than result_mode. */
11070 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11071 && int_result_mode != shift_unit_mode
11072 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11073 shift_unit_mode) < 0)
11074 && (new_rtx = simplify_const_binary_operation
11075 (code, int_result_mode,
11076 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11077 gen_int_shift_amount (int_result_mode, count))) != 0
11078 && CONST_INT_P (new_rtx)
11079 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11080 INTVAL (new_rtx), int_result_mode,
11081 &complement_p))
11083 varop = XEXP (varop, 0);
11084 continue;
11087 /* If we can't do that, try to simplify the shift in each arm of the
11088 logical expression, make a new logical expression, and apply
11089 the inverse distributive law. This also can't be done for
11090 (ashiftrt (xor)) where we've widened the shift and the constant
11091 changes the sign bit. */
11092 if (CONST_INT_P (XEXP (varop, 1))
11093 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11094 && int_result_mode != shift_unit_mode
11095 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11096 shift_unit_mode) < 0))
11098 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11099 XEXP (varop, 0), count);
11100 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11101 XEXP (varop, 1), count);
11103 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11104 lhs, rhs);
11105 varop = apply_distributive_law (varop);
11107 count = 0;
11108 continue;
11110 break;
11112 case EQ:
11113 /* The following rules apply only to scalars. */
11114 if (shift_mode != shift_unit_mode)
11115 break;
11116 int_result_mode = as_a <scalar_int_mode> (result_mode);
11118 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11119 says that the sign bit can be tested, FOO has mode MODE, C is
11120 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11121 that may be nonzero. */
11122 if (code == LSHIFTRT
11123 && XEXP (varop, 1) == const0_rtx
11124 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11125 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11126 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11127 && STORE_FLAG_VALUE == -1
11128 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11129 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11130 int_result_mode, &complement_p))
11132 varop = XEXP (varop, 0);
11133 count = 0;
11134 continue;
11136 break;
11138 case NEG:
11139 /* The following rules apply only to scalars. */
11140 if (shift_mode != shift_unit_mode)
11141 break;
11142 int_result_mode = as_a <scalar_int_mode> (result_mode);
11144 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11145 than the number of bits in the mode is equivalent to A. */
11146 if (code == LSHIFTRT
11147 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11148 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11150 varop = XEXP (varop, 0);
11151 count = 0;
11152 continue;
11155 /* NEG commutes with ASHIFT since it is multiplication. Move the
11156 NEG outside to allow shifts to combine. */
11157 if (code == ASHIFT
11158 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11159 int_result_mode, &complement_p))
11161 varop = XEXP (varop, 0);
11162 continue;
11164 break;
11166 case PLUS:
11167 /* The following rules apply only to scalars. */
11168 if (shift_mode != shift_unit_mode)
11169 break;
11170 int_result_mode = as_a <scalar_int_mode> (result_mode);
11172 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11173 is one less than the number of bits in the mode is
11174 equivalent to (xor A 1). */
11175 if (code == LSHIFTRT
11176 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11177 && XEXP (varop, 1) == constm1_rtx
11178 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11179 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11180 int_result_mode, &complement_p))
11182 count = 0;
11183 varop = XEXP (varop, 0);
11184 continue;
11187 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11188 that might be nonzero in BAR are those being shifted out and those
11189 bits are known zero in FOO, we can replace the PLUS with FOO.
11190 Similarly in the other operand order. This code occurs when
11191 we are computing the size of a variable-size array. */
11193 if ((code == ASHIFTRT || code == LSHIFTRT)
11194 && count < HOST_BITS_PER_WIDE_INT
11195 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11196 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11197 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11199 varop = XEXP (varop, 0);
11200 continue;
11202 else if ((code == ASHIFTRT || code == LSHIFTRT)
11203 && count < HOST_BITS_PER_WIDE_INT
11204 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11205 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11206 >> count) == 0
11207 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11208 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11210 varop = XEXP (varop, 1);
11211 continue;
11214 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11215 if (code == ASHIFT
11216 && CONST_INT_P (XEXP (varop, 1))
11217 && (new_rtx = simplify_const_binary_operation
11218 (ASHIFT, int_result_mode,
11219 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11220 gen_int_shift_amount (int_result_mode, count))) != 0
11221 && CONST_INT_P (new_rtx)
11222 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11223 INTVAL (new_rtx), int_result_mode,
11224 &complement_p))
11226 varop = XEXP (varop, 0);
11227 continue;
11230 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11231 signbit', and attempt to change the PLUS to an XOR and move it to
11232 the outer operation as is done above in the AND/IOR/XOR case
11233 leg for shift(logical). See details in logical handling above
11234 for reasoning in doing so. */
11235 if (code == LSHIFTRT
11236 && CONST_INT_P (XEXP (varop, 1))
11237 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11238 && (new_rtx = simplify_const_binary_operation
11239 (code, int_result_mode,
11240 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11241 gen_int_shift_amount (int_result_mode, count))) != 0
11242 && CONST_INT_P (new_rtx)
11243 && merge_outer_ops (&outer_op, &outer_const, XOR,
11244 INTVAL (new_rtx), int_result_mode,
11245 &complement_p))
11247 varop = XEXP (varop, 0);
11248 continue;
11251 break;
11253 case MINUS:
11254 /* The following rules apply only to scalars. */
11255 if (shift_mode != shift_unit_mode)
11256 break;
11257 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11259 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11260 with C the size of VAROP - 1 and the shift is logical if
11261 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11262 we have a (gt X 0) operation. If the shift is arithmetic with
11263 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11264 we have a (neg (gt X 0)) operation. */
11266 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11267 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11268 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11269 && (code == LSHIFTRT || code == ASHIFTRT)
11270 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11271 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11272 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11274 count = 0;
11275 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11276 const0_rtx);
11278 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11279 varop = gen_rtx_NEG (int_varop_mode, varop);
11281 continue;
11283 break;
11285 case TRUNCATE:
11286 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11287 if the truncate does not affect the value. */
11288 if (code == LSHIFTRT
11289 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11290 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11291 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11292 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11293 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11295 rtx varop_inner = XEXP (varop, 0);
11296 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11297 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11298 new_count);
11299 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11300 XEXP (varop_inner, 0),
11301 new_count_rtx);
11302 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11303 count = 0;
11304 continue;
11306 break;
11308 default:
11309 break;
11312 break;
11315 shift_mode = result_mode;
11316 if (shift_mode != mode)
11318 /* We only change the modes of scalar shifts. */
11319 int_mode = as_a <scalar_int_mode> (mode);
11320 int_result_mode = as_a <scalar_int_mode> (result_mode);
11321 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11322 int_mode, outer_op, outer_const);
11325 /* We have now finished analyzing the shift. The result should be
11326 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11327 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11328 to the result of the shift. OUTER_CONST is the relevant constant,
11329 but we must turn off all bits turned off in the shift. */
11331 if (outer_op == UNKNOWN
11332 && orig_code == code && orig_count == count
11333 && varop == orig_varop
11334 && shift_mode == GET_MODE (varop))
11335 return NULL_RTX;
11337 /* Make a SUBREG if necessary. If we can't make it, fail. */
11338 varop = gen_lowpart (shift_mode, varop);
11339 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11340 return NULL_RTX;
11342 /* If we have an outer operation and we just made a shift, it is
11343 possible that we could have simplified the shift were it not
11344 for the outer operation. So try to do the simplification
11345 recursively. */
11347 if (outer_op != UNKNOWN)
11348 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11349 else
11350 x = NULL_RTX;
11352 if (x == NULL_RTX)
11353 x = simplify_gen_binary (code, shift_mode, varop,
11354 gen_int_shift_amount (shift_mode, count));
11356 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11357 turn off all the bits that the shift would have turned off. */
11358 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11359 /* We only change the modes of scalar shifts. */
11360 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11361 x, GET_MODE_MASK (result_mode) >> orig_count);
11363 /* Do the remainder of the processing in RESULT_MODE. */
11364 x = gen_lowpart_or_truncate (result_mode, x);
11366 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11367 operation. */
11368 if (complement_p)
11369 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11371 if (outer_op != UNKNOWN)
11373 int_result_mode = as_a <scalar_int_mode> (result_mode);
11375 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11376 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11377 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11379 if (outer_op == AND)
11380 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11381 else if (outer_op == SET)
11383 /* This means that we have determined that the result is
11384 equivalent to a constant. This should be rare. */
11385 if (!side_effects_p (x))
11386 x = GEN_INT (outer_const);
11388 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11389 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11390 else
11391 x = simplify_gen_binary (outer_op, int_result_mode, x,
11392 GEN_INT (outer_const));
11395 return x;
11398 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11399 The result of the shift is RESULT_MODE. If we cannot simplify it,
11400 return X or, if it is NULL, synthesize the expression with
11401 simplify_gen_binary. Otherwise, return a simplified value.
11403 The shift is normally computed in the widest mode we find in VAROP, as
11404 long as it isn't a different number of words than RESULT_MODE. Exceptions
11405 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11407 static rtx
11408 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11409 rtx varop, int count)
11411 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11412 if (tem)
11413 return tem;
11415 if (!x)
11416 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11417 gen_int_shift_amount (GET_MODE (varop), count));
11418 if (GET_MODE (x) != result_mode)
11419 x = gen_lowpart (result_mode, x);
11420 return x;
11424 /* A subroutine of recog_for_combine. See there for arguments and
11425 return value. */
11427 static int
11428 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11430 rtx pat = *pnewpat;
11431 rtx pat_without_clobbers;
11432 int insn_code_number;
11433 int num_clobbers_to_add = 0;
11434 int i;
11435 rtx notes = NULL_RTX;
11436 rtx old_notes, old_pat;
11437 int old_icode;
11439 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11440 we use to indicate that something didn't match. If we find such a
11441 thing, force rejection. */
11442 if (GET_CODE (pat) == PARALLEL)
11443 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11444 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11445 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11446 return -1;
11448 old_pat = PATTERN (insn);
11449 old_notes = REG_NOTES (insn);
11450 PATTERN (insn) = pat;
11451 REG_NOTES (insn) = NULL_RTX;
11453 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11454 if (dump_file && (dump_flags & TDF_DETAILS))
11456 if (insn_code_number < 0)
11457 fputs ("Failed to match this instruction:\n", dump_file);
11458 else
11459 fputs ("Successfully matched this instruction:\n", dump_file);
11460 print_rtl_single (dump_file, pat);
11463 /* If it isn't, there is the possibility that we previously had an insn
11464 that clobbered some register as a side effect, but the combined
11465 insn doesn't need to do that. So try once more without the clobbers
11466 unless this represents an ASM insn. */
11468 if (insn_code_number < 0 && ! check_asm_operands (pat)
11469 && GET_CODE (pat) == PARALLEL)
11471 int pos;
11473 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11474 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11476 if (i != pos)
11477 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11478 pos++;
11481 SUBST_INT (XVECLEN (pat, 0), pos);
11483 if (pos == 1)
11484 pat = XVECEXP (pat, 0, 0);
11486 PATTERN (insn) = pat;
11487 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11488 if (dump_file && (dump_flags & TDF_DETAILS))
11490 if (insn_code_number < 0)
11491 fputs ("Failed to match this instruction:\n", dump_file);
11492 else
11493 fputs ("Successfully matched this instruction:\n", dump_file);
11494 print_rtl_single (dump_file, pat);
11498 pat_without_clobbers = pat;
11500 PATTERN (insn) = old_pat;
11501 REG_NOTES (insn) = old_notes;
11503 /* Recognize all noop sets, these will be killed by followup pass. */
11504 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11505 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11507 /* If we had any clobbers to add, make a new pattern than contains
11508 them. Then check to make sure that all of them are dead. */
11509 if (num_clobbers_to_add)
11511 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11512 rtvec_alloc (GET_CODE (pat) == PARALLEL
11513 ? (XVECLEN (pat, 0)
11514 + num_clobbers_to_add)
11515 : num_clobbers_to_add + 1));
11517 if (GET_CODE (pat) == PARALLEL)
11518 for (i = 0; i < XVECLEN (pat, 0); i++)
11519 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11520 else
11521 XVECEXP (newpat, 0, 0) = pat;
11523 add_clobbers (newpat, insn_code_number);
11525 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11526 i < XVECLEN (newpat, 0); i++)
11528 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11529 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11530 return -1;
11531 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11533 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11534 notes = alloc_reg_note (REG_UNUSED,
11535 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11538 pat = newpat;
11541 if (insn_code_number >= 0
11542 && insn_code_number != NOOP_MOVE_INSN_CODE)
11544 old_pat = PATTERN (insn);
11545 old_notes = REG_NOTES (insn);
11546 old_icode = INSN_CODE (insn);
11547 PATTERN (insn) = pat;
11548 REG_NOTES (insn) = notes;
11549 INSN_CODE (insn) = insn_code_number;
11551 /* Allow targets to reject combined insn. */
11552 if (!targetm.legitimate_combined_insn (insn))
11554 if (dump_file && (dump_flags & TDF_DETAILS))
11555 fputs ("Instruction not appropriate for target.",
11556 dump_file);
11558 /* Callers expect recog_for_combine to strip
11559 clobbers from the pattern on failure. */
11560 pat = pat_without_clobbers;
11561 notes = NULL_RTX;
11563 insn_code_number = -1;
11566 PATTERN (insn) = old_pat;
11567 REG_NOTES (insn) = old_notes;
11568 INSN_CODE (insn) = old_icode;
11571 *pnewpat = pat;
11572 *pnotes = notes;
11574 return insn_code_number;
11577 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11578 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11579 Return whether anything was so changed. */
11581 static bool
11582 change_zero_ext (rtx pat)
11584 bool changed = false;
11585 rtx *src = &SET_SRC (pat);
11587 subrtx_ptr_iterator::array_type array;
11588 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11590 rtx x = **iter;
11591 scalar_int_mode mode, inner_mode;
11592 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11593 continue;
11594 int size;
11596 if (GET_CODE (x) == ZERO_EXTRACT
11597 && CONST_INT_P (XEXP (x, 1))
11598 && CONST_INT_P (XEXP (x, 2))
11599 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11600 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11602 size = INTVAL (XEXP (x, 1));
11604 int start = INTVAL (XEXP (x, 2));
11605 if (BITS_BIG_ENDIAN)
11606 start = GET_MODE_PRECISION (inner_mode) - size - start;
11608 if (start != 0)
11609 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11610 gen_int_shift_amount (inner_mode, start));
11611 else
11612 x = XEXP (x, 0);
11614 if (mode != inner_mode)
11616 if (REG_P (x) && HARD_REGISTER_P (x)
11617 && !can_change_dest_mode (x, 0, mode))
11618 continue;
11620 x = gen_lowpart_SUBREG (mode, x);
11623 else if (GET_CODE (x) == ZERO_EXTEND
11624 && GET_CODE (XEXP (x, 0)) == SUBREG
11625 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11626 && !paradoxical_subreg_p (XEXP (x, 0))
11627 && subreg_lowpart_p (XEXP (x, 0)))
11629 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11630 size = GET_MODE_PRECISION (inner_mode);
11631 x = SUBREG_REG (XEXP (x, 0));
11632 if (GET_MODE (x) != mode)
11634 if (REG_P (x) && HARD_REGISTER_P (x)
11635 && !can_change_dest_mode (x, 0, mode))
11636 continue;
11638 x = gen_lowpart_SUBREG (mode, x);
11641 else if (GET_CODE (x) == ZERO_EXTEND
11642 && REG_P (XEXP (x, 0))
11643 && HARD_REGISTER_P (XEXP (x, 0))
11644 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11646 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11647 size = GET_MODE_PRECISION (inner_mode);
11648 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11650 else
11651 continue;
11653 if (!(GET_CODE (x) == LSHIFTRT
11654 && CONST_INT_P (XEXP (x, 1))
11655 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11657 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11658 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11661 SUBST (**iter, x);
11662 changed = true;
11665 if (changed)
11666 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11667 maybe_swap_commutative_operands (**iter);
11669 rtx *dst = &SET_DEST (pat);
11670 scalar_int_mode mode;
11671 if (GET_CODE (*dst) == ZERO_EXTRACT
11672 && REG_P (XEXP (*dst, 0))
11673 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11674 && CONST_INT_P (XEXP (*dst, 1))
11675 && CONST_INT_P (XEXP (*dst, 2)))
11677 rtx reg = XEXP (*dst, 0);
11678 int width = INTVAL (XEXP (*dst, 1));
11679 int offset = INTVAL (XEXP (*dst, 2));
11680 int reg_width = GET_MODE_PRECISION (mode);
11681 if (BITS_BIG_ENDIAN)
11682 offset = reg_width - width - offset;
11684 rtx x, y, z, w;
11685 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11686 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11687 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11688 if (offset)
11689 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11690 else
11691 y = SET_SRC (pat);
11692 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11693 w = gen_rtx_IOR (mode, x, z);
11694 SUBST (SET_DEST (pat), reg);
11695 SUBST (SET_SRC (pat), w);
11697 changed = true;
11700 return changed;
11703 /* Like recog, but we receive the address of a pointer to a new pattern.
11704 We try to match the rtx that the pointer points to.
11705 If that fails, we may try to modify or replace the pattern,
11706 storing the replacement into the same pointer object.
11708 Modifications include deletion or addition of CLOBBERs. If the
11709 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11710 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11711 (and undo if that fails).
11713 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11714 the CLOBBERs are placed.
11716 The value is the final insn code from the pattern ultimately matched,
11717 or -1. */
11719 static int
11720 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11722 rtx pat = *pnewpat;
11723 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11724 if (insn_code_number >= 0 || check_asm_operands (pat))
11725 return insn_code_number;
11727 void *marker = get_undo_marker ();
11728 bool changed = false;
11730 if (GET_CODE (pat) == SET)
11731 changed = change_zero_ext (pat);
11732 else if (GET_CODE (pat) == PARALLEL)
11734 int i;
11735 for (i = 0; i < XVECLEN (pat, 0); i++)
11737 rtx set = XVECEXP (pat, 0, i);
11738 if (GET_CODE (set) == SET)
11739 changed |= change_zero_ext (set);
11743 if (changed)
11745 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11747 if (insn_code_number < 0)
11748 undo_to_marker (marker);
11751 return insn_code_number;
11754 /* Like gen_lowpart_general but for use by combine. In combine it
11755 is not possible to create any new pseudoregs. However, it is
11756 safe to create invalid memory addresses, because combine will
11757 try to recognize them and all they will do is make the combine
11758 attempt fail.
11760 If for some reason this cannot do its job, an rtx
11761 (clobber (const_int 0)) is returned.
11762 An insn containing that will not be recognized. */
11764 static rtx
11765 gen_lowpart_for_combine (machine_mode omode, rtx x)
11767 machine_mode imode = GET_MODE (x);
11768 rtx result;
11770 if (omode == imode)
11771 return x;
11773 /* We can only support MODE being wider than a word if X is a
11774 constant integer or has a mode the same size. */
11775 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11776 && ! (CONST_SCALAR_INT_P (x)
11777 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11778 goto fail;
11780 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11781 won't know what to do. So we will strip off the SUBREG here and
11782 process normally. */
11783 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11785 x = SUBREG_REG (x);
11787 /* For use in case we fall down into the address adjustments
11788 further below, we need to adjust the known mode and size of
11789 x; imode and isize, since we just adjusted x. */
11790 imode = GET_MODE (x);
11792 if (imode == omode)
11793 return x;
11796 result = gen_lowpart_common (omode, x);
11798 if (result)
11799 return result;
11801 if (MEM_P (x))
11803 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11804 address. */
11805 if (MEM_VOLATILE_P (x)
11806 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11807 goto fail;
11809 /* If we want to refer to something bigger than the original memref,
11810 generate a paradoxical subreg instead. That will force a reload
11811 of the original memref X. */
11812 if (paradoxical_subreg_p (omode, imode))
11813 return gen_rtx_SUBREG (omode, x, 0);
11815 poly_int64 offset = byte_lowpart_offset (omode, imode);
11816 return adjust_address_nv (x, omode, offset);
11819 /* If X is a comparison operator, rewrite it in a new mode. This
11820 probably won't match, but may allow further simplifications. */
11821 else if (COMPARISON_P (x)
11822 && SCALAR_INT_MODE_P (imode)
11823 && SCALAR_INT_MODE_P (omode))
11824 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11826 /* If we couldn't simplify X any other way, just enclose it in a
11827 SUBREG. Normally, this SUBREG won't match, but some patterns may
11828 include an explicit SUBREG or we may simplify it further in combine. */
11829 else
11831 rtx res;
11833 if (imode == VOIDmode)
11835 imode = int_mode_for_mode (omode).require ();
11836 x = gen_lowpart_common (imode, x);
11837 if (x == NULL)
11838 goto fail;
11840 res = lowpart_subreg (omode, x, imode);
11841 if (res)
11842 return res;
11845 fail:
11846 return gen_rtx_CLOBBER (omode, const0_rtx);
11849 /* Try to simplify a comparison between OP0 and a constant OP1,
11850 where CODE is the comparison code that will be tested, into a
11851 (CODE OP0 const0_rtx) form.
11853 The result is a possibly different comparison code to use.
11854 *POP1 may be updated. */
11856 static enum rtx_code
11857 simplify_compare_const (enum rtx_code code, machine_mode mode,
11858 rtx op0, rtx *pop1)
11860 scalar_int_mode int_mode;
11861 HOST_WIDE_INT const_op = INTVAL (*pop1);
11863 /* Get the constant we are comparing against and turn off all bits
11864 not on in our mode. */
11865 if (mode != VOIDmode)
11866 const_op = trunc_int_for_mode (const_op, mode);
11868 /* If we are comparing against a constant power of two and the value
11869 being compared can only have that single bit nonzero (e.g., it was
11870 `and'ed with that bit), we can replace this with a comparison
11871 with zero. */
11872 if (const_op
11873 && (code == EQ || code == NE || code == GE || code == GEU
11874 || code == LT || code == LTU)
11875 && is_a <scalar_int_mode> (mode, &int_mode)
11876 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11877 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11878 && (nonzero_bits (op0, int_mode)
11879 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11881 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11882 const_op = 0;
11885 /* Similarly, if we are comparing a value known to be either -1 or
11886 0 with -1, change it to the opposite comparison against zero. */
11887 if (const_op == -1
11888 && (code == EQ || code == NE || code == GT || code == LE
11889 || code == GEU || code == LTU)
11890 && is_a <scalar_int_mode> (mode, &int_mode)
11891 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11893 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11894 const_op = 0;
11897 /* Do some canonicalizations based on the comparison code. We prefer
11898 comparisons against zero and then prefer equality comparisons.
11899 If we can reduce the size of a constant, we will do that too. */
11900 switch (code)
11902 case LT:
11903 /* < C is equivalent to <= (C - 1) */
11904 if (const_op > 0)
11906 const_op -= 1;
11907 code = LE;
11908 /* ... fall through to LE case below. */
11909 gcc_fallthrough ();
11911 else
11912 break;
11914 case LE:
11915 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11916 if (const_op < 0)
11918 const_op += 1;
11919 code = LT;
11922 /* If we are doing a <= 0 comparison on a value known to have
11923 a zero sign bit, we can replace this with == 0. */
11924 else if (const_op == 0
11925 && is_a <scalar_int_mode> (mode, &int_mode)
11926 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11927 && (nonzero_bits (op0, int_mode)
11928 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11929 == 0)
11930 code = EQ;
11931 break;
11933 case GE:
11934 /* >= C is equivalent to > (C - 1). */
11935 if (const_op > 0)
11937 const_op -= 1;
11938 code = GT;
11939 /* ... fall through to GT below. */
11940 gcc_fallthrough ();
11942 else
11943 break;
11945 case GT:
11946 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11947 if (const_op < 0)
11949 const_op += 1;
11950 code = GE;
11953 /* If we are doing a > 0 comparison on a value known to have
11954 a zero sign bit, we can replace this with != 0. */
11955 else if (const_op == 0
11956 && is_a <scalar_int_mode> (mode, &int_mode)
11957 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11958 && (nonzero_bits (op0, int_mode)
11959 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11960 == 0)
11961 code = NE;
11962 break;
11964 case LTU:
11965 /* < C is equivalent to <= (C - 1). */
11966 if (const_op > 0)
11968 const_op -= 1;
11969 code = LEU;
11970 /* ... fall through ... */
11971 gcc_fallthrough ();
11973 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11974 else if (is_a <scalar_int_mode> (mode, &int_mode)
11975 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11976 && ((unsigned HOST_WIDE_INT) const_op
11977 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11979 const_op = 0;
11980 code = GE;
11981 break;
11983 else
11984 break;
11986 case LEU:
11987 /* unsigned <= 0 is equivalent to == 0 */
11988 if (const_op == 0)
11989 code = EQ;
11990 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11991 else if (is_a <scalar_int_mode> (mode, &int_mode)
11992 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11993 && ((unsigned HOST_WIDE_INT) const_op
11994 == ((HOST_WIDE_INT_1U
11995 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11997 const_op = 0;
11998 code = GE;
12000 break;
12002 case GEU:
12003 /* >= C is equivalent to > (C - 1). */
12004 if (const_op > 1)
12006 const_op -= 1;
12007 code = GTU;
12008 /* ... fall through ... */
12009 gcc_fallthrough ();
12012 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12013 else if (is_a <scalar_int_mode> (mode, &int_mode)
12014 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12015 && ((unsigned HOST_WIDE_INT) const_op
12016 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12018 const_op = 0;
12019 code = LT;
12020 break;
12022 else
12023 break;
12025 case GTU:
12026 /* unsigned > 0 is equivalent to != 0 */
12027 if (const_op == 0)
12028 code = NE;
12029 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12030 else if (is_a <scalar_int_mode> (mode, &int_mode)
12031 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12032 && ((unsigned HOST_WIDE_INT) const_op
12033 == (HOST_WIDE_INT_1U
12034 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12036 const_op = 0;
12037 code = LT;
12039 break;
12041 default:
12042 break;
12045 *pop1 = GEN_INT (const_op);
12046 return code;
12049 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12050 comparison code that will be tested.
12052 The result is a possibly different comparison code to use. *POP0 and
12053 *POP1 may be updated.
12055 It is possible that we might detect that a comparison is either always
12056 true or always false. However, we do not perform general constant
12057 folding in combine, so this knowledge isn't useful. Such tautologies
12058 should have been detected earlier. Hence we ignore all such cases. */
12060 static enum rtx_code
12061 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12063 rtx op0 = *pop0;
12064 rtx op1 = *pop1;
12065 rtx tem, tem1;
12066 int i;
12067 scalar_int_mode mode, inner_mode, tmode;
12068 opt_scalar_int_mode tmode_iter;
12070 /* Try a few ways of applying the same transformation to both operands. */
12071 while (1)
12073 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12074 so check specially. */
12075 if (!WORD_REGISTER_OPERATIONS
12076 && code != GTU && code != GEU && code != LTU && code != LEU
12077 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12078 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12079 && GET_CODE (XEXP (op1, 0)) == ASHIFT
12080 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12081 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12082 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12083 && (is_a <scalar_int_mode>
12084 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12085 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12086 && CONST_INT_P (XEXP (op0, 1))
12087 && XEXP (op0, 1) == XEXP (op1, 1)
12088 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12089 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12090 && (INTVAL (XEXP (op0, 1))
12091 == (GET_MODE_PRECISION (mode)
12092 - GET_MODE_PRECISION (inner_mode))))
12094 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12095 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12098 /* If both operands are the same constant shift, see if we can ignore the
12099 shift. We can if the shift is a rotate or if the bits shifted out of
12100 this shift are known to be zero for both inputs and if the type of
12101 comparison is compatible with the shift. */
12102 if (GET_CODE (op0) == GET_CODE (op1)
12103 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12104 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12105 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12106 && (code != GT && code != LT && code != GE && code != LE))
12107 || (GET_CODE (op0) == ASHIFTRT
12108 && (code != GTU && code != LTU
12109 && code != GEU && code != LEU)))
12110 && CONST_INT_P (XEXP (op0, 1))
12111 && INTVAL (XEXP (op0, 1)) >= 0
12112 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12113 && XEXP (op0, 1) == XEXP (op1, 1))
12115 machine_mode mode = GET_MODE (op0);
12116 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12117 int shift_count = INTVAL (XEXP (op0, 1));
12119 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12120 mask &= (mask >> shift_count) << shift_count;
12121 else if (GET_CODE (op0) == ASHIFT)
12122 mask = (mask & (mask << shift_count)) >> shift_count;
12124 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12125 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12126 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12127 else
12128 break;
12131 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12132 SUBREGs are of the same mode, and, in both cases, the AND would
12133 be redundant if the comparison was done in the narrower mode,
12134 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12135 and the operand's possibly nonzero bits are 0xffffff01; in that case
12136 if we only care about QImode, we don't need the AND). This case
12137 occurs if the output mode of an scc insn is not SImode and
12138 STORE_FLAG_VALUE == 1 (e.g., the 386).
12140 Similarly, check for a case where the AND's are ZERO_EXTEND
12141 operations from some narrower mode even though a SUBREG is not
12142 present. */
12144 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12145 && CONST_INT_P (XEXP (op0, 1))
12146 && CONST_INT_P (XEXP (op1, 1)))
12148 rtx inner_op0 = XEXP (op0, 0);
12149 rtx inner_op1 = XEXP (op1, 0);
12150 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12151 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12152 int changed = 0;
12154 if (paradoxical_subreg_p (inner_op0)
12155 && GET_CODE (inner_op1) == SUBREG
12156 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12157 && (GET_MODE (SUBREG_REG (inner_op0))
12158 == GET_MODE (SUBREG_REG (inner_op1)))
12159 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12160 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12161 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12162 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12164 op0 = SUBREG_REG (inner_op0);
12165 op1 = SUBREG_REG (inner_op1);
12167 /* The resulting comparison is always unsigned since we masked
12168 off the original sign bit. */
12169 code = unsigned_condition (code);
12171 changed = 1;
12174 else if (c0 == c1)
12175 FOR_EACH_MODE_UNTIL (tmode,
12176 as_a <scalar_int_mode> (GET_MODE (op0)))
12177 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12179 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12180 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12181 code = unsigned_condition (code);
12182 changed = 1;
12183 break;
12186 if (! changed)
12187 break;
12190 /* If both operands are NOT, we can strip off the outer operation
12191 and adjust the comparison code for swapped operands; similarly for
12192 NEG, except that this must be an equality comparison. */
12193 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12194 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12195 && (code == EQ || code == NE)))
12196 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12198 else
12199 break;
12202 /* If the first operand is a constant, swap the operands and adjust the
12203 comparison code appropriately, but don't do this if the second operand
12204 is already a constant integer. */
12205 if (swap_commutative_operands_p (op0, op1))
12207 std::swap (op0, op1);
12208 code = swap_condition (code);
12211 /* We now enter a loop during which we will try to simplify the comparison.
12212 For the most part, we only are concerned with comparisons with zero,
12213 but some things may really be comparisons with zero but not start
12214 out looking that way. */
12216 while (CONST_INT_P (op1))
12218 machine_mode raw_mode = GET_MODE (op0);
12219 scalar_int_mode int_mode;
12220 int equality_comparison_p;
12221 int sign_bit_comparison_p;
12222 int unsigned_comparison_p;
12223 HOST_WIDE_INT const_op;
12225 /* We only want to handle integral modes. This catches VOIDmode,
12226 CCmode, and the floating-point modes. An exception is that we
12227 can handle VOIDmode if OP0 is a COMPARE or a comparison
12228 operation. */
12230 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12231 && ! (raw_mode == VOIDmode
12232 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12233 break;
12235 /* Try to simplify the compare to constant, possibly changing the
12236 comparison op, and/or changing op1 to zero. */
12237 code = simplify_compare_const (code, raw_mode, op0, &op1);
12238 const_op = INTVAL (op1);
12240 /* Compute some predicates to simplify code below. */
12242 equality_comparison_p = (code == EQ || code == NE);
12243 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12244 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12245 || code == GEU);
12247 /* If this is a sign bit comparison and we can do arithmetic in
12248 MODE, say that we will only be needing the sign bit of OP0. */
12249 if (sign_bit_comparison_p
12250 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12251 && HWI_COMPUTABLE_MODE_P (int_mode))
12252 op0 = force_to_mode (op0, int_mode,
12253 HOST_WIDE_INT_1U
12254 << (GET_MODE_PRECISION (int_mode) - 1),
12257 if (COMPARISON_P (op0))
12259 /* We can't do anything if OP0 is a condition code value, rather
12260 than an actual data value. */
12261 if (const_op != 0
12262 || CC0_P (XEXP (op0, 0))
12263 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12264 break;
12266 /* Get the two operands being compared. */
12267 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12268 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12269 else
12270 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12272 /* Check for the cases where we simply want the result of the
12273 earlier test or the opposite of that result. */
12274 if (code == NE || code == EQ
12275 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12276 && (code == LT || code == GE)))
12278 enum rtx_code new_code;
12279 if (code == LT || code == NE)
12280 new_code = GET_CODE (op0);
12281 else
12282 new_code = reversed_comparison_code (op0, NULL);
12284 if (new_code != UNKNOWN)
12286 code = new_code;
12287 op0 = tem;
12288 op1 = tem1;
12289 continue;
12292 break;
12295 if (raw_mode == VOIDmode)
12296 break;
12297 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12299 /* Now try cases based on the opcode of OP0. If none of the cases
12300 does a "continue", we exit this loop immediately after the
12301 switch. */
12303 unsigned int mode_width = GET_MODE_PRECISION (mode);
12304 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12305 switch (GET_CODE (op0))
12307 case ZERO_EXTRACT:
12308 /* If we are extracting a single bit from a variable position in
12309 a constant that has only a single bit set and are comparing it
12310 with zero, we can convert this into an equality comparison
12311 between the position and the location of the single bit. */
12312 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12313 have already reduced the shift count modulo the word size. */
12314 if (!SHIFT_COUNT_TRUNCATED
12315 && CONST_INT_P (XEXP (op0, 0))
12316 && XEXP (op0, 1) == const1_rtx
12317 && equality_comparison_p && const_op == 0
12318 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12320 if (BITS_BIG_ENDIAN)
12321 i = BITS_PER_WORD - 1 - i;
12323 op0 = XEXP (op0, 2);
12324 op1 = GEN_INT (i);
12325 const_op = i;
12327 /* Result is nonzero iff shift count is equal to I. */
12328 code = reverse_condition (code);
12329 continue;
12332 /* fall through */
12334 case SIGN_EXTRACT:
12335 tem = expand_compound_operation (op0);
12336 if (tem != op0)
12338 op0 = tem;
12339 continue;
12341 break;
12343 case NOT:
12344 /* If testing for equality, we can take the NOT of the constant. */
12345 if (equality_comparison_p
12346 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12348 op0 = XEXP (op0, 0);
12349 op1 = tem;
12350 continue;
12353 /* If just looking at the sign bit, reverse the sense of the
12354 comparison. */
12355 if (sign_bit_comparison_p)
12357 op0 = XEXP (op0, 0);
12358 code = (code == GE ? LT : GE);
12359 continue;
12361 break;
12363 case NEG:
12364 /* If testing for equality, we can take the NEG of the constant. */
12365 if (equality_comparison_p
12366 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12368 op0 = XEXP (op0, 0);
12369 op1 = tem;
12370 continue;
12373 /* The remaining cases only apply to comparisons with zero. */
12374 if (const_op != 0)
12375 break;
12377 /* When X is ABS or is known positive,
12378 (neg X) is < 0 if and only if X != 0. */
12380 if (sign_bit_comparison_p
12381 && (GET_CODE (XEXP (op0, 0)) == ABS
12382 || (mode_width <= HOST_BITS_PER_WIDE_INT
12383 && (nonzero_bits (XEXP (op0, 0), mode)
12384 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12385 == 0)))
12387 op0 = XEXP (op0, 0);
12388 code = (code == LT ? NE : EQ);
12389 continue;
12392 /* If we have NEG of something whose two high-order bits are the
12393 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12394 if (num_sign_bit_copies (op0, mode) >= 2)
12396 op0 = XEXP (op0, 0);
12397 code = swap_condition (code);
12398 continue;
12400 break;
12402 case ROTATE:
12403 /* If we are testing equality and our count is a constant, we
12404 can perform the inverse operation on our RHS. */
12405 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12406 && (tem = simplify_binary_operation (ROTATERT, mode,
12407 op1, XEXP (op0, 1))) != 0)
12409 op0 = XEXP (op0, 0);
12410 op1 = tem;
12411 continue;
12414 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12415 a particular bit. Convert it to an AND of a constant of that
12416 bit. This will be converted into a ZERO_EXTRACT. */
12417 if (const_op == 0 && sign_bit_comparison_p
12418 && CONST_INT_P (XEXP (op0, 1))
12419 && mode_width <= HOST_BITS_PER_WIDE_INT
12420 && UINTVAL (XEXP (op0, 1)) < mode_width)
12422 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12423 (HOST_WIDE_INT_1U
12424 << (mode_width - 1
12425 - INTVAL (XEXP (op0, 1)))));
12426 code = (code == LT ? NE : EQ);
12427 continue;
12430 /* Fall through. */
12432 case ABS:
12433 /* ABS is ignorable inside an equality comparison with zero. */
12434 if (const_op == 0 && equality_comparison_p)
12436 op0 = XEXP (op0, 0);
12437 continue;
12439 break;
12441 case SIGN_EXTEND:
12442 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12443 (compare FOO CONST) if CONST fits in FOO's mode and we
12444 are either testing inequality or have an unsigned
12445 comparison with ZERO_EXTEND or a signed comparison with
12446 SIGN_EXTEND. But don't do it if we don't have a compare
12447 insn of the given mode, since we'd have to revert it
12448 later on, and then we wouldn't know whether to sign- or
12449 zero-extend. */
12450 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12451 && ! unsigned_comparison_p
12452 && HWI_COMPUTABLE_MODE_P (mode)
12453 && trunc_int_for_mode (const_op, mode) == const_op
12454 && have_insn_for (COMPARE, mode))
12456 op0 = XEXP (op0, 0);
12457 continue;
12459 break;
12461 case SUBREG:
12462 /* Check for the case where we are comparing A - C1 with C2, that is
12464 (subreg:MODE (plus (A) (-C1))) op (C2)
12466 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12467 comparison in the wider mode. One of the following two conditions
12468 must be true in order for this to be valid:
12470 1. The mode extension results in the same bit pattern being added
12471 on both sides and the comparison is equality or unsigned. As
12472 C2 has been truncated to fit in MODE, the pattern can only be
12473 all 0s or all 1s.
12475 2. The mode extension results in the sign bit being copied on
12476 each side.
12478 The difficulty here is that we have predicates for A but not for
12479 (A - C1) so we need to check that C1 is within proper bounds so
12480 as to perturbate A as little as possible. */
12482 if (mode_width <= HOST_BITS_PER_WIDE_INT
12483 && subreg_lowpart_p (op0)
12484 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12485 &inner_mode)
12486 && GET_MODE_PRECISION (inner_mode) > mode_width
12487 && GET_CODE (SUBREG_REG (op0)) == PLUS
12488 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12490 rtx a = XEXP (SUBREG_REG (op0), 0);
12491 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12493 if ((c1 > 0
12494 && (unsigned HOST_WIDE_INT) c1
12495 < HOST_WIDE_INT_1U << (mode_width - 1)
12496 && (equality_comparison_p || unsigned_comparison_p)
12497 /* (A - C1) zero-extends if it is positive and sign-extends
12498 if it is negative, C2 both zero- and sign-extends. */
12499 && (((nonzero_bits (a, inner_mode)
12500 & ~GET_MODE_MASK (mode)) == 0
12501 && const_op >= 0)
12502 /* (A - C1) sign-extends if it is positive and 1-extends
12503 if it is negative, C2 both sign- and 1-extends. */
12504 || (num_sign_bit_copies (a, inner_mode)
12505 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12506 - mode_width)
12507 && const_op < 0)))
12508 || ((unsigned HOST_WIDE_INT) c1
12509 < HOST_WIDE_INT_1U << (mode_width - 2)
12510 /* (A - C1) always sign-extends, like C2. */
12511 && num_sign_bit_copies (a, inner_mode)
12512 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12513 - (mode_width - 1))))
12515 op0 = SUBREG_REG (op0);
12516 continue;
12520 /* If the inner mode is narrower and we are extracting the low part,
12521 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12522 if (paradoxical_subreg_p (op0))
12524 else if (subreg_lowpart_p (op0)
12525 && GET_MODE_CLASS (mode) == MODE_INT
12526 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12527 && (code == NE || code == EQ)
12528 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12529 && !paradoxical_subreg_p (op0)
12530 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12531 & ~GET_MODE_MASK (mode)) == 0)
12533 /* Remove outer subregs that don't do anything. */
12534 tem = gen_lowpart (inner_mode, op1);
12536 if ((nonzero_bits (tem, inner_mode)
12537 & ~GET_MODE_MASK (mode)) == 0)
12539 op0 = SUBREG_REG (op0);
12540 op1 = tem;
12541 continue;
12543 break;
12545 else
12546 break;
12548 /* FALLTHROUGH */
12550 case ZERO_EXTEND:
12551 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12552 && (unsigned_comparison_p || equality_comparison_p)
12553 && HWI_COMPUTABLE_MODE_P (mode)
12554 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12555 && const_op >= 0
12556 && have_insn_for (COMPARE, mode))
12558 op0 = XEXP (op0, 0);
12559 continue;
12561 break;
12563 case PLUS:
12564 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12565 this for equality comparisons due to pathological cases involving
12566 overflows. */
12567 if (equality_comparison_p
12568 && (tem = simplify_binary_operation (MINUS, mode,
12569 op1, XEXP (op0, 1))) != 0)
12571 op0 = XEXP (op0, 0);
12572 op1 = tem;
12573 continue;
12576 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12577 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12578 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12580 op0 = XEXP (XEXP (op0, 0), 0);
12581 code = (code == LT ? EQ : NE);
12582 continue;
12584 break;
12586 case MINUS:
12587 /* We used to optimize signed comparisons against zero, but that
12588 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12589 arrive here as equality comparisons, or (GEU, LTU) are
12590 optimized away. No need to special-case them. */
12592 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12593 (eq B (minus A C)), whichever simplifies. We can only do
12594 this for equality comparisons due to pathological cases involving
12595 overflows. */
12596 if (equality_comparison_p
12597 && (tem = simplify_binary_operation (PLUS, mode,
12598 XEXP (op0, 1), op1)) != 0)
12600 op0 = XEXP (op0, 0);
12601 op1 = tem;
12602 continue;
12605 if (equality_comparison_p
12606 && (tem = simplify_binary_operation (MINUS, mode,
12607 XEXP (op0, 0), op1)) != 0)
12609 op0 = XEXP (op0, 1);
12610 op1 = tem;
12611 continue;
12614 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12615 of bits in X minus 1, is one iff X > 0. */
12616 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12617 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12618 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12619 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12621 op0 = XEXP (op0, 1);
12622 code = (code == GE ? LE : GT);
12623 continue;
12625 break;
12627 case XOR:
12628 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12629 if C is zero or B is a constant. */
12630 if (equality_comparison_p
12631 && (tem = simplify_binary_operation (XOR, mode,
12632 XEXP (op0, 1), op1)) != 0)
12634 op0 = XEXP (op0, 0);
12635 op1 = tem;
12636 continue;
12638 break;
12641 case IOR:
12642 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12643 iff X <= 0. */
12644 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12645 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12646 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12648 op0 = XEXP (op0, 1);
12649 code = (code == GE ? GT : LE);
12650 continue;
12652 break;
12654 case AND:
12655 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12656 will be converted to a ZERO_EXTRACT later. */
12657 if (const_op == 0 && equality_comparison_p
12658 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12659 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12661 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12662 XEXP (XEXP (op0, 0), 1));
12663 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12664 continue;
12667 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12668 zero and X is a comparison and C1 and C2 describe only bits set
12669 in STORE_FLAG_VALUE, we can compare with X. */
12670 if (const_op == 0 && equality_comparison_p
12671 && mode_width <= HOST_BITS_PER_WIDE_INT
12672 && CONST_INT_P (XEXP (op0, 1))
12673 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12674 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12675 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12676 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12678 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12679 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12680 if ((~STORE_FLAG_VALUE & mask) == 0
12681 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12682 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12683 && COMPARISON_P (tem))))
12685 op0 = XEXP (XEXP (op0, 0), 0);
12686 continue;
12690 /* If we are doing an equality comparison of an AND of a bit equal
12691 to the sign bit, replace this with a LT or GE comparison of
12692 the underlying value. */
12693 if (equality_comparison_p
12694 && const_op == 0
12695 && CONST_INT_P (XEXP (op0, 1))
12696 && mode_width <= HOST_BITS_PER_WIDE_INT
12697 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12698 == HOST_WIDE_INT_1U << (mode_width - 1)))
12700 op0 = XEXP (op0, 0);
12701 code = (code == EQ ? GE : LT);
12702 continue;
12705 /* If this AND operation is really a ZERO_EXTEND from a narrower
12706 mode, the constant fits within that mode, and this is either an
12707 equality or unsigned comparison, try to do this comparison in
12708 the narrower mode.
12710 Note that in:
12712 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12713 -> (ne:DI (reg:SI 4) (const_int 0))
12715 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12716 known to hold a value of the required mode the
12717 transformation is invalid. */
12718 if ((equality_comparison_p || unsigned_comparison_p)
12719 && CONST_INT_P (XEXP (op0, 1))
12720 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12721 & GET_MODE_MASK (mode))
12722 + 1)) >= 0
12723 && const_op >> i == 0
12724 && int_mode_for_size (i, 1).exists (&tmode))
12726 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12727 continue;
12730 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12731 fits in both M1 and M2 and the SUBREG is either paradoxical
12732 or represents the low part, permute the SUBREG and the AND
12733 and try again. */
12734 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12735 && CONST_INT_P (XEXP (op0, 1)))
12737 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12738 /* Require an integral mode, to avoid creating something like
12739 (AND:SF ...). */
12740 if ((is_a <scalar_int_mode>
12741 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12742 /* It is unsafe to commute the AND into the SUBREG if the
12743 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12744 not defined. As originally written the upper bits
12745 have a defined value due to the AND operation.
12746 However, if we commute the AND inside the SUBREG then
12747 they no longer have defined values and the meaning of
12748 the code has been changed.
12749 Also C1 should not change value in the smaller mode,
12750 see PR67028 (a positive C1 can become negative in the
12751 smaller mode, so that the AND does no longer mask the
12752 upper bits). */
12753 && ((WORD_REGISTER_OPERATIONS
12754 && mode_width > GET_MODE_PRECISION (tmode)
12755 && mode_width <= BITS_PER_WORD
12756 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12757 || (mode_width <= GET_MODE_PRECISION (tmode)
12758 && subreg_lowpart_p (XEXP (op0, 0))))
12759 && mode_width <= HOST_BITS_PER_WIDE_INT
12760 && HWI_COMPUTABLE_MODE_P (tmode)
12761 && (c1 & ~mask) == 0
12762 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12763 && c1 != mask
12764 && c1 != GET_MODE_MASK (tmode))
12766 op0 = simplify_gen_binary (AND, tmode,
12767 SUBREG_REG (XEXP (op0, 0)),
12768 gen_int_mode (c1, tmode));
12769 op0 = gen_lowpart (mode, op0);
12770 continue;
12774 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12775 if (const_op == 0 && equality_comparison_p
12776 && XEXP (op0, 1) == const1_rtx
12777 && GET_CODE (XEXP (op0, 0)) == NOT)
12779 op0 = simplify_and_const_int (NULL_RTX, mode,
12780 XEXP (XEXP (op0, 0), 0), 1);
12781 code = (code == NE ? EQ : NE);
12782 continue;
12785 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12786 (eq (and (lshiftrt X) 1) 0).
12787 Also handle the case where (not X) is expressed using xor. */
12788 if (const_op == 0 && equality_comparison_p
12789 && XEXP (op0, 1) == const1_rtx
12790 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12792 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12793 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12795 if (GET_CODE (shift_op) == NOT
12796 || (GET_CODE (shift_op) == XOR
12797 && CONST_INT_P (XEXP (shift_op, 1))
12798 && CONST_INT_P (shift_count)
12799 && HWI_COMPUTABLE_MODE_P (mode)
12800 && (UINTVAL (XEXP (shift_op, 1))
12801 == HOST_WIDE_INT_1U
12802 << INTVAL (shift_count))))
12805 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12806 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12807 code = (code == NE ? EQ : NE);
12808 continue;
12811 break;
12813 case ASHIFT:
12814 /* If we have (compare (ashift FOO N) (const_int C)) and
12815 the high order N bits of FOO (N+1 if an inequality comparison)
12816 are known to be zero, we can do this by comparing FOO with C
12817 shifted right N bits so long as the low-order N bits of C are
12818 zero. */
12819 if (CONST_INT_P (XEXP (op0, 1))
12820 && INTVAL (XEXP (op0, 1)) >= 0
12821 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12822 < HOST_BITS_PER_WIDE_INT)
12823 && (((unsigned HOST_WIDE_INT) const_op
12824 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12825 - 1)) == 0)
12826 && mode_width <= HOST_BITS_PER_WIDE_INT
12827 && (nonzero_bits (XEXP (op0, 0), mode)
12828 & ~(mask >> (INTVAL (XEXP (op0, 1))
12829 + ! equality_comparison_p))) == 0)
12831 /* We must perform a logical shift, not an arithmetic one,
12832 as we want the top N bits of C to be zero. */
12833 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12835 temp >>= INTVAL (XEXP (op0, 1));
12836 op1 = gen_int_mode (temp, mode);
12837 op0 = XEXP (op0, 0);
12838 continue;
12841 /* If we are doing a sign bit comparison, it means we are testing
12842 a particular bit. Convert it to the appropriate AND. */
12843 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12844 && mode_width <= HOST_BITS_PER_WIDE_INT)
12846 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12847 (HOST_WIDE_INT_1U
12848 << (mode_width - 1
12849 - INTVAL (XEXP (op0, 1)))));
12850 code = (code == LT ? NE : EQ);
12851 continue;
12854 /* If this an equality comparison with zero and we are shifting
12855 the low bit to the sign bit, we can convert this to an AND of the
12856 low-order bit. */
12857 if (const_op == 0 && equality_comparison_p
12858 && CONST_INT_P (XEXP (op0, 1))
12859 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12861 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12862 continue;
12864 break;
12866 case ASHIFTRT:
12867 /* If this is an equality comparison with zero, we can do this
12868 as a logical shift, which might be much simpler. */
12869 if (equality_comparison_p && const_op == 0
12870 && CONST_INT_P (XEXP (op0, 1)))
12872 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12873 XEXP (op0, 0),
12874 INTVAL (XEXP (op0, 1)));
12875 continue;
12878 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12879 do the comparison in a narrower mode. */
12880 if (! unsigned_comparison_p
12881 && CONST_INT_P (XEXP (op0, 1))
12882 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12883 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12884 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12885 .exists (&tmode))
12886 && (((unsigned HOST_WIDE_INT) const_op
12887 + (GET_MODE_MASK (tmode) >> 1) + 1)
12888 <= GET_MODE_MASK (tmode)))
12890 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12891 continue;
12894 /* Likewise if OP0 is a PLUS of a sign extension with a
12895 constant, which is usually represented with the PLUS
12896 between the shifts. */
12897 if (! unsigned_comparison_p
12898 && CONST_INT_P (XEXP (op0, 1))
12899 && GET_CODE (XEXP (op0, 0)) == PLUS
12900 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12901 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12902 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12903 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12904 .exists (&tmode))
12905 && (((unsigned HOST_WIDE_INT) const_op
12906 + (GET_MODE_MASK (tmode) >> 1) + 1)
12907 <= GET_MODE_MASK (tmode)))
12909 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12910 rtx add_const = XEXP (XEXP (op0, 0), 1);
12911 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12912 add_const, XEXP (op0, 1));
12914 op0 = simplify_gen_binary (PLUS, tmode,
12915 gen_lowpart (tmode, inner),
12916 new_const);
12917 continue;
12920 /* FALLTHROUGH */
12921 case LSHIFTRT:
12922 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12923 the low order N bits of FOO are known to be zero, we can do this
12924 by comparing FOO with C shifted left N bits so long as no
12925 overflow occurs. Even if the low order N bits of FOO aren't known
12926 to be zero, if the comparison is >= or < we can use the same
12927 optimization and for > or <= by setting all the low
12928 order N bits in the comparison constant. */
12929 if (CONST_INT_P (XEXP (op0, 1))
12930 && INTVAL (XEXP (op0, 1)) > 0
12931 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12932 && mode_width <= HOST_BITS_PER_WIDE_INT
12933 && (((unsigned HOST_WIDE_INT) const_op
12934 + (GET_CODE (op0) != LSHIFTRT
12935 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12936 + 1)
12937 : 0))
12938 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12940 unsigned HOST_WIDE_INT low_bits
12941 = (nonzero_bits (XEXP (op0, 0), mode)
12942 & ((HOST_WIDE_INT_1U
12943 << INTVAL (XEXP (op0, 1))) - 1));
12944 if (low_bits == 0 || !equality_comparison_p)
12946 /* If the shift was logical, then we must make the condition
12947 unsigned. */
12948 if (GET_CODE (op0) == LSHIFTRT)
12949 code = unsigned_condition (code);
12951 const_op = (unsigned HOST_WIDE_INT) const_op
12952 << INTVAL (XEXP (op0, 1));
12953 if (low_bits != 0
12954 && (code == GT || code == GTU
12955 || code == LE || code == LEU))
12956 const_op
12957 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12958 op1 = GEN_INT (const_op);
12959 op0 = XEXP (op0, 0);
12960 continue;
12964 /* If we are using this shift to extract just the sign bit, we
12965 can replace this with an LT or GE comparison. */
12966 if (const_op == 0
12967 && (equality_comparison_p || sign_bit_comparison_p)
12968 && CONST_INT_P (XEXP (op0, 1))
12969 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12971 op0 = XEXP (op0, 0);
12972 code = (code == NE || code == GT ? LT : GE);
12973 continue;
12975 break;
12977 default:
12978 break;
12981 break;
12984 /* Now make any compound operations involved in this comparison. Then,
12985 check for an outmost SUBREG on OP0 that is not doing anything or is
12986 paradoxical. The latter transformation must only be performed when
12987 it is known that the "extra" bits will be the same in op0 and op1 or
12988 that they don't matter. There are three cases to consider:
12990 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12991 care bits and we can assume they have any convenient value. So
12992 making the transformation is safe.
12994 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12995 In this case the upper bits of op0 are undefined. We should not make
12996 the simplification in that case as we do not know the contents of
12997 those bits.
12999 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
13000 In that case we know those bits are zeros or ones. We must also be
13001 sure that they are the same as the upper bits of op1.
13003 We can never remove a SUBREG for a non-equality comparison because
13004 the sign bit is in a different place in the underlying object. */
13006 rtx_code op0_mco_code = SET;
13007 if (op1 == const0_rtx)
13008 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13010 op0 = make_compound_operation (op0, op0_mco_code);
13011 op1 = make_compound_operation (op1, SET);
13013 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13014 && is_int_mode (GET_MODE (op0), &mode)
13015 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13016 && (code == NE || code == EQ))
13018 if (paradoxical_subreg_p (op0))
13020 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13021 implemented. */
13022 if (REG_P (SUBREG_REG (op0)))
13024 op0 = SUBREG_REG (op0);
13025 op1 = gen_lowpart (inner_mode, op1);
13028 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13029 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13030 & ~GET_MODE_MASK (mode)) == 0)
13032 tem = gen_lowpart (inner_mode, op1);
13034 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13035 op0 = SUBREG_REG (op0), op1 = tem;
13039 /* We now do the opposite procedure: Some machines don't have compare
13040 insns in all modes. If OP0's mode is an integer mode smaller than a
13041 word and we can't do a compare in that mode, see if there is a larger
13042 mode for which we can do the compare. There are a number of cases in
13043 which we can use the wider mode. */
13045 if (is_int_mode (GET_MODE (op0), &mode)
13046 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13047 && ! have_insn_for (COMPARE, mode))
13048 FOR_EACH_WIDER_MODE (tmode_iter, mode)
13050 tmode = tmode_iter.require ();
13051 if (!HWI_COMPUTABLE_MODE_P (tmode))
13052 break;
13053 if (have_insn_for (COMPARE, tmode))
13055 int zero_extended;
13057 /* If this is a test for negative, we can make an explicit
13058 test of the sign bit. Test this first so we can use
13059 a paradoxical subreg to extend OP0. */
13061 if (op1 == const0_rtx && (code == LT || code == GE)
13062 && HWI_COMPUTABLE_MODE_P (mode))
13064 unsigned HOST_WIDE_INT sign
13065 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13066 op0 = simplify_gen_binary (AND, tmode,
13067 gen_lowpart (tmode, op0),
13068 gen_int_mode (sign, tmode));
13069 code = (code == LT) ? NE : EQ;
13070 break;
13073 /* If the only nonzero bits in OP0 and OP1 are those in the
13074 narrower mode and this is an equality or unsigned comparison,
13075 we can use the wider mode. Similarly for sign-extended
13076 values, in which case it is true for all comparisons. */
13077 zero_extended = ((code == EQ || code == NE
13078 || code == GEU || code == GTU
13079 || code == LEU || code == LTU)
13080 && (nonzero_bits (op0, tmode)
13081 & ~GET_MODE_MASK (mode)) == 0
13082 && ((CONST_INT_P (op1)
13083 || (nonzero_bits (op1, tmode)
13084 & ~GET_MODE_MASK (mode)) == 0)));
13086 if (zero_extended
13087 || ((num_sign_bit_copies (op0, tmode)
13088 > (unsigned int) (GET_MODE_PRECISION (tmode)
13089 - GET_MODE_PRECISION (mode)))
13090 && (num_sign_bit_copies (op1, tmode)
13091 > (unsigned int) (GET_MODE_PRECISION (tmode)
13092 - GET_MODE_PRECISION (mode)))))
13094 /* If OP0 is an AND and we don't have an AND in MODE either,
13095 make a new AND in the proper mode. */
13096 if (GET_CODE (op0) == AND
13097 && !have_insn_for (AND, mode))
13098 op0 = simplify_gen_binary (AND, tmode,
13099 gen_lowpart (tmode,
13100 XEXP (op0, 0)),
13101 gen_lowpart (tmode,
13102 XEXP (op0, 1)));
13103 else
13105 if (zero_extended)
13107 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13108 op0, mode);
13109 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13110 op1, mode);
13112 else
13114 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13115 op0, mode);
13116 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13117 op1, mode);
13119 break;
13125 /* We may have changed the comparison operands. Re-canonicalize. */
13126 if (swap_commutative_operands_p (op0, op1))
13128 std::swap (op0, op1);
13129 code = swap_condition (code);
13132 /* If this machine only supports a subset of valid comparisons, see if we
13133 can convert an unsupported one into a supported one. */
13134 target_canonicalize_comparison (&code, &op0, &op1, 0);
13136 *pop0 = op0;
13137 *pop1 = op1;
13139 return code;
13142 /* Utility function for record_value_for_reg. Count number of
13143 rtxs in X. */
13144 static int
13145 count_rtxs (rtx x)
13147 enum rtx_code code = GET_CODE (x);
13148 const char *fmt;
13149 int i, j, ret = 1;
13151 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13152 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13154 rtx x0 = XEXP (x, 0);
13155 rtx x1 = XEXP (x, 1);
13157 if (x0 == x1)
13158 return 1 + 2 * count_rtxs (x0);
13160 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13161 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13162 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13163 return 2 + 2 * count_rtxs (x0)
13164 + count_rtxs (x == XEXP (x1, 0)
13165 ? XEXP (x1, 1) : XEXP (x1, 0));
13167 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13168 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13169 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13170 return 2 + 2 * count_rtxs (x1)
13171 + count_rtxs (x == XEXP (x0, 0)
13172 ? XEXP (x0, 1) : XEXP (x0, 0));
13175 fmt = GET_RTX_FORMAT (code);
13176 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13177 if (fmt[i] == 'e')
13178 ret += count_rtxs (XEXP (x, i));
13179 else if (fmt[i] == 'E')
13180 for (j = 0; j < XVECLEN (x, i); j++)
13181 ret += count_rtxs (XVECEXP (x, i, j));
13183 return ret;
13186 /* Utility function for following routine. Called when X is part of a value
13187 being stored into last_set_value. Sets last_set_table_tick
13188 for each register mentioned. Similar to mention_regs in cse.c */
13190 static void
13191 update_table_tick (rtx x)
13193 enum rtx_code code = GET_CODE (x);
13194 const char *fmt = GET_RTX_FORMAT (code);
13195 int i, j;
13197 if (code == REG)
13199 unsigned int regno = REGNO (x);
13200 unsigned int endregno = END_REGNO (x);
13201 unsigned int r;
13203 for (r = regno; r < endregno; r++)
13205 reg_stat_type *rsp = &reg_stat[r];
13206 rsp->last_set_table_tick = label_tick;
13209 return;
13212 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13213 if (fmt[i] == 'e')
13215 /* Check for identical subexpressions. If x contains
13216 identical subexpression we only have to traverse one of
13217 them. */
13218 if (i == 0 && ARITHMETIC_P (x))
13220 /* Note that at this point x1 has already been
13221 processed. */
13222 rtx x0 = XEXP (x, 0);
13223 rtx x1 = XEXP (x, 1);
13225 /* If x0 and x1 are identical then there is no need to
13226 process x0. */
13227 if (x0 == x1)
13228 break;
13230 /* If x0 is identical to a subexpression of x1 then while
13231 processing x1, x0 has already been processed. Thus we
13232 are done with x. */
13233 if (ARITHMETIC_P (x1)
13234 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13235 break;
13237 /* If x1 is identical to a subexpression of x0 then we
13238 still have to process the rest of x0. */
13239 if (ARITHMETIC_P (x0)
13240 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13242 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13243 break;
13247 update_table_tick (XEXP (x, i));
13249 else if (fmt[i] == 'E')
13250 for (j = 0; j < XVECLEN (x, i); j++)
13251 update_table_tick (XVECEXP (x, i, j));
13254 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13255 are saying that the register is clobbered and we no longer know its
13256 value. If INSN is zero, don't update reg_stat[].last_set; this is
13257 only permitted with VALUE also zero and is used to invalidate the
13258 register. */
13260 static void
13261 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13263 unsigned int regno = REGNO (reg);
13264 unsigned int endregno = END_REGNO (reg);
13265 unsigned int i;
13266 reg_stat_type *rsp;
13268 /* If VALUE contains REG and we have a previous value for REG, substitute
13269 the previous value. */
13270 if (value && insn && reg_overlap_mentioned_p (reg, value))
13272 rtx tem;
13274 /* Set things up so get_last_value is allowed to see anything set up to
13275 our insn. */
13276 subst_low_luid = DF_INSN_LUID (insn);
13277 tem = get_last_value (reg);
13279 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13280 it isn't going to be useful and will take a lot of time to process,
13281 so just use the CLOBBER. */
13283 if (tem)
13285 if (ARITHMETIC_P (tem)
13286 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13287 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13288 tem = XEXP (tem, 0);
13289 else if (count_occurrences (value, reg, 1) >= 2)
13291 /* If there are two or more occurrences of REG in VALUE,
13292 prevent the value from growing too much. */
13293 if (count_rtxs (tem) > param_max_last_value_rtl)
13294 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13297 value = replace_rtx (copy_rtx (value), reg, tem);
13301 /* For each register modified, show we don't know its value, that
13302 we don't know about its bitwise content, that its value has been
13303 updated, and that we don't know the location of the death of the
13304 register. */
13305 for (i = regno; i < endregno; i++)
13307 rsp = &reg_stat[i];
13309 if (insn)
13310 rsp->last_set = insn;
13312 rsp->last_set_value = 0;
13313 rsp->last_set_mode = VOIDmode;
13314 rsp->last_set_nonzero_bits = 0;
13315 rsp->last_set_sign_bit_copies = 0;
13316 rsp->last_death = 0;
13317 rsp->truncated_to_mode = VOIDmode;
13320 /* Mark registers that are being referenced in this value. */
13321 if (value)
13322 update_table_tick (value);
13324 /* Now update the status of each register being set.
13325 If someone is using this register in this block, set this register
13326 to invalid since we will get confused between the two lives in this
13327 basic block. This makes using this register always invalid. In cse, we
13328 scan the table to invalidate all entries using this register, but this
13329 is too much work for us. */
13331 for (i = regno; i < endregno; i++)
13333 rsp = &reg_stat[i];
13334 rsp->last_set_label = label_tick;
13335 if (!insn
13336 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13337 rsp->last_set_invalid = 1;
13338 else
13339 rsp->last_set_invalid = 0;
13342 /* The value being assigned might refer to X (like in "x++;"). In that
13343 case, we must replace it with (clobber (const_int 0)) to prevent
13344 infinite loops. */
13345 rsp = &reg_stat[regno];
13346 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13348 value = copy_rtx (value);
13349 if (!get_last_value_validate (&value, insn, label_tick, 1))
13350 value = 0;
13353 /* For the main register being modified, update the value, the mode, the
13354 nonzero bits, and the number of sign bit copies. */
13356 rsp->last_set_value = value;
13358 if (value)
13360 machine_mode mode = GET_MODE (reg);
13361 subst_low_luid = DF_INSN_LUID (insn);
13362 rsp->last_set_mode = mode;
13363 if (GET_MODE_CLASS (mode) == MODE_INT
13364 && HWI_COMPUTABLE_MODE_P (mode))
13365 mode = nonzero_bits_mode;
13366 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13367 rsp->last_set_sign_bit_copies
13368 = num_sign_bit_copies (value, GET_MODE (reg));
13372 /* Called via note_stores from record_dead_and_set_regs to handle one
13373 SET or CLOBBER in an insn. DATA is the instruction in which the
13374 set is occurring. */
13376 static void
13377 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13379 rtx_insn *record_dead_insn = (rtx_insn *) data;
13381 if (GET_CODE (dest) == SUBREG)
13382 dest = SUBREG_REG (dest);
13384 if (!record_dead_insn)
13386 if (REG_P (dest))
13387 record_value_for_reg (dest, NULL, NULL_RTX);
13388 return;
13391 if (REG_P (dest))
13393 /* If we are setting the whole register, we know its value. Otherwise
13394 show that we don't know the value. We can handle a SUBREG if it's
13395 the low part, but we must be careful with paradoxical SUBREGs on
13396 RISC architectures because we cannot strip e.g. an extension around
13397 a load and record the naked load since the RTL middle-end considers
13398 that the upper bits are defined according to LOAD_EXTEND_OP. */
13399 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13400 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13401 else if (GET_CODE (setter) == SET
13402 && GET_CODE (SET_DEST (setter)) == SUBREG
13403 && SUBREG_REG (SET_DEST (setter)) == dest
13404 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13405 BITS_PER_WORD)
13406 && subreg_lowpart_p (SET_DEST (setter)))
13407 record_value_for_reg (dest, record_dead_insn,
13408 WORD_REGISTER_OPERATIONS
13409 && word_register_operation_p (SET_SRC (setter))
13410 && paradoxical_subreg_p (SET_DEST (setter))
13411 ? SET_SRC (setter)
13412 : gen_lowpart (GET_MODE (dest),
13413 SET_SRC (setter)));
13414 else
13415 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13417 else if (MEM_P (dest)
13418 /* Ignore pushes, they clobber nothing. */
13419 && ! push_operand (dest, GET_MODE (dest)))
13420 mem_last_set = DF_INSN_LUID (record_dead_insn);
13423 /* Update the records of when each REG was most recently set or killed
13424 for the things done by INSN. This is the last thing done in processing
13425 INSN in the combiner loop.
13427 We update reg_stat[], in particular fields last_set, last_set_value,
13428 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13429 last_death, and also the similar information mem_last_set (which insn
13430 most recently modified memory) and last_call_luid (which insn was the
13431 most recent subroutine call). */
13433 static void
13434 record_dead_and_set_regs (rtx_insn *insn)
13436 rtx link;
13437 unsigned int i;
13439 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13441 if (REG_NOTE_KIND (link) == REG_DEAD
13442 && REG_P (XEXP (link, 0)))
13444 unsigned int regno = REGNO (XEXP (link, 0));
13445 unsigned int endregno = END_REGNO (XEXP (link, 0));
13447 for (i = regno; i < endregno; i++)
13449 reg_stat_type *rsp;
13451 rsp = &reg_stat[i];
13452 rsp->last_death = insn;
13455 else if (REG_NOTE_KIND (link) == REG_INC)
13456 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13459 if (CALL_P (insn))
13461 HARD_REG_SET callee_clobbers
13462 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
13463 hard_reg_set_iterator hrsi;
13464 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi)
13466 reg_stat_type *rsp;
13468 /* ??? We could try to preserve some information from the last
13469 set of register I if the call doesn't actually clobber
13470 (reg:last_set_mode I), which might be true for ABIs with
13471 partial clobbers. However, it would be difficult to
13472 update last_set_nonzero_bits and last_sign_bit_copies
13473 to account for the part of I that actually was clobbered.
13474 It wouldn't help much anyway, since we rarely see this
13475 situation before RA. */
13476 rsp = &reg_stat[i];
13477 rsp->last_set_invalid = 1;
13478 rsp->last_set = insn;
13479 rsp->last_set_value = 0;
13480 rsp->last_set_mode = VOIDmode;
13481 rsp->last_set_nonzero_bits = 0;
13482 rsp->last_set_sign_bit_copies = 0;
13483 rsp->last_death = 0;
13484 rsp->truncated_to_mode = VOIDmode;
13487 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13489 /* We can't combine into a call pattern. Remember, though, that
13490 the return value register is set at this LUID. We could
13491 still replace a register with the return value from the
13492 wrong subroutine call! */
13493 note_stores (insn, record_dead_and_set_regs_1, NULL_RTX);
13495 else
13496 note_stores (insn, record_dead_and_set_regs_1, insn);
13499 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13500 register present in the SUBREG, so for each such SUBREG go back and
13501 adjust nonzero and sign bit information of the registers that are
13502 known to have some zero/sign bits set.
13504 This is needed because when combine blows the SUBREGs away, the
13505 information on zero/sign bits is lost and further combines can be
13506 missed because of that. */
13508 static void
13509 record_promoted_value (rtx_insn *insn, rtx subreg)
13511 struct insn_link *links;
13512 rtx set;
13513 unsigned int regno = REGNO (SUBREG_REG (subreg));
13514 machine_mode mode = GET_MODE (subreg);
13516 if (!HWI_COMPUTABLE_MODE_P (mode))
13517 return;
13519 for (links = LOG_LINKS (insn); links;)
13521 reg_stat_type *rsp;
13523 insn = links->insn;
13524 set = single_set (insn);
13526 if (! set || !REG_P (SET_DEST (set))
13527 || REGNO (SET_DEST (set)) != regno
13528 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13530 links = links->next;
13531 continue;
13534 rsp = &reg_stat[regno];
13535 if (rsp->last_set == insn)
13537 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13538 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13541 if (REG_P (SET_SRC (set)))
13543 regno = REGNO (SET_SRC (set));
13544 links = LOG_LINKS (insn);
13546 else
13547 break;
13551 /* Check if X, a register, is known to contain a value already
13552 truncated to MODE. In this case we can use a subreg to refer to
13553 the truncated value even though in the generic case we would need
13554 an explicit truncation. */
13556 static bool
13557 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13559 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13560 machine_mode truncated = rsp->truncated_to_mode;
13562 if (truncated == 0
13563 || rsp->truncation_label < label_tick_ebb_start)
13564 return false;
13565 if (!partial_subreg_p (mode, truncated))
13566 return true;
13567 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13568 return true;
13569 return false;
13572 /* If X is a hard reg or a subreg record the mode that the register is
13573 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13574 able to turn a truncate into a subreg using this information. Return true
13575 if traversing X is complete. */
13577 static bool
13578 record_truncated_value (rtx x)
13580 machine_mode truncated_mode;
13581 reg_stat_type *rsp;
13583 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13585 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13586 truncated_mode = GET_MODE (x);
13588 if (!partial_subreg_p (truncated_mode, original_mode))
13589 return true;
13591 truncated_mode = GET_MODE (x);
13592 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13593 return true;
13595 x = SUBREG_REG (x);
13597 /* ??? For hard-regs we now record everything. We might be able to
13598 optimize this using last_set_mode. */
13599 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13600 truncated_mode = GET_MODE (x);
13601 else
13602 return false;
13604 rsp = &reg_stat[REGNO (x)];
13605 if (rsp->truncated_to_mode == 0
13606 || rsp->truncation_label < label_tick_ebb_start
13607 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13609 rsp->truncated_to_mode = truncated_mode;
13610 rsp->truncation_label = label_tick;
13613 return true;
13616 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13617 the modes they are used in. This can help truning TRUNCATEs into
13618 SUBREGs. */
13620 static void
13621 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13623 subrtx_var_iterator::array_type array;
13624 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13625 if (record_truncated_value (*iter))
13626 iter.skip_subrtxes ();
13629 /* Scan X for promoted SUBREGs. For each one found,
13630 note what it implies to the registers used in it. */
13632 static void
13633 check_promoted_subreg (rtx_insn *insn, rtx x)
13635 if (GET_CODE (x) == SUBREG
13636 && SUBREG_PROMOTED_VAR_P (x)
13637 && REG_P (SUBREG_REG (x)))
13638 record_promoted_value (insn, x);
13639 else
13641 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13642 int i, j;
13644 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13645 switch (format[i])
13647 case 'e':
13648 check_promoted_subreg (insn, XEXP (x, i));
13649 break;
13650 case 'V':
13651 case 'E':
13652 if (XVEC (x, i) != 0)
13653 for (j = 0; j < XVECLEN (x, i); j++)
13654 check_promoted_subreg (insn, XVECEXP (x, i, j));
13655 break;
13660 /* Verify that all the registers and memory references mentioned in *LOC are
13661 still valid. *LOC was part of a value set in INSN when label_tick was
13662 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13663 the invalid references with (clobber (const_int 0)) and return 1. This
13664 replacement is useful because we often can get useful information about
13665 the form of a value (e.g., if it was produced by a shift that always
13666 produces -1 or 0) even though we don't know exactly what registers it
13667 was produced from. */
13669 static int
13670 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13672 rtx x = *loc;
13673 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13674 int len = GET_RTX_LENGTH (GET_CODE (x));
13675 int i, j;
13677 if (REG_P (x))
13679 unsigned int regno = REGNO (x);
13680 unsigned int endregno = END_REGNO (x);
13681 unsigned int j;
13683 for (j = regno; j < endregno; j++)
13685 reg_stat_type *rsp = &reg_stat[j];
13686 if (rsp->last_set_invalid
13687 /* If this is a pseudo-register that was only set once and not
13688 live at the beginning of the function, it is always valid. */
13689 || (! (regno >= FIRST_PSEUDO_REGISTER
13690 && regno < reg_n_sets_max
13691 && REG_N_SETS (regno) == 1
13692 && (!REGNO_REG_SET_P
13693 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13694 regno)))
13695 && rsp->last_set_label > tick))
13697 if (replace)
13698 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13699 return replace;
13703 return 1;
13705 /* If this is a memory reference, make sure that there were no stores after
13706 it that might have clobbered the value. We don't have alias info, so we
13707 assume any store invalidates it. Moreover, we only have local UIDs, so
13708 we also assume that there were stores in the intervening basic blocks. */
13709 else if (MEM_P (x) && !MEM_READONLY_P (x)
13710 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13712 if (replace)
13713 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13714 return replace;
13717 for (i = 0; i < len; i++)
13719 if (fmt[i] == 'e')
13721 /* Check for identical subexpressions. If x contains
13722 identical subexpression we only have to traverse one of
13723 them. */
13724 if (i == 1 && ARITHMETIC_P (x))
13726 /* Note that at this point x0 has already been checked
13727 and found valid. */
13728 rtx x0 = XEXP (x, 0);
13729 rtx x1 = XEXP (x, 1);
13731 /* If x0 and x1 are identical then x is also valid. */
13732 if (x0 == x1)
13733 return 1;
13735 /* If x1 is identical to a subexpression of x0 then
13736 while checking x0, x1 has already been checked. Thus
13737 it is valid and so as x. */
13738 if (ARITHMETIC_P (x0)
13739 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13740 return 1;
13742 /* If x0 is identical to a subexpression of x1 then x is
13743 valid iff the rest of x1 is valid. */
13744 if (ARITHMETIC_P (x1)
13745 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13746 return
13747 get_last_value_validate (&XEXP (x1,
13748 x0 == XEXP (x1, 0) ? 1 : 0),
13749 insn, tick, replace);
13752 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13753 replace) == 0)
13754 return 0;
13756 else if (fmt[i] == 'E')
13757 for (j = 0; j < XVECLEN (x, i); j++)
13758 if (get_last_value_validate (&XVECEXP (x, i, j),
13759 insn, tick, replace) == 0)
13760 return 0;
13763 /* If we haven't found a reason for it to be invalid, it is valid. */
13764 return 1;
13767 /* Get the last value assigned to X, if known. Some registers
13768 in the value may be replaced with (clobber (const_int 0)) if their value
13769 is known longer known reliably. */
13771 static rtx
13772 get_last_value (const_rtx x)
13774 unsigned int regno;
13775 rtx value;
13776 reg_stat_type *rsp;
13778 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13779 then convert it to the desired mode. If this is a paradoxical SUBREG,
13780 we cannot predict what values the "extra" bits might have. */
13781 if (GET_CODE (x) == SUBREG
13782 && subreg_lowpart_p (x)
13783 && !paradoxical_subreg_p (x)
13784 && (value = get_last_value (SUBREG_REG (x))) != 0)
13785 return gen_lowpart (GET_MODE (x), value);
13787 if (!REG_P (x))
13788 return 0;
13790 regno = REGNO (x);
13791 rsp = &reg_stat[regno];
13792 value = rsp->last_set_value;
13794 /* If we don't have a value, or if it isn't for this basic block and
13795 it's either a hard register, set more than once, or it's a live
13796 at the beginning of the function, return 0.
13798 Because if it's not live at the beginning of the function then the reg
13799 is always set before being used (is never used without being set).
13800 And, if it's set only once, and it's always set before use, then all
13801 uses must have the same last value, even if it's not from this basic
13802 block. */
13804 if (value == 0
13805 || (rsp->last_set_label < label_tick_ebb_start
13806 && (regno < FIRST_PSEUDO_REGISTER
13807 || regno >= reg_n_sets_max
13808 || REG_N_SETS (regno) != 1
13809 || REGNO_REG_SET_P
13810 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13811 return 0;
13813 /* If the value was set in a later insn than the ones we are processing,
13814 we can't use it even if the register was only set once. */
13815 if (rsp->last_set_label == label_tick
13816 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13817 return 0;
13819 /* If fewer bits were set than what we are asked for now, we cannot use
13820 the value. */
13821 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13822 GET_MODE_PRECISION (GET_MODE (x))))
13823 return 0;
13825 /* If the value has all its registers valid, return it. */
13826 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13827 return value;
13829 /* Otherwise, make a copy and replace any invalid register with
13830 (clobber (const_int 0)). If that fails for some reason, return 0. */
13832 value = copy_rtx (value);
13833 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13834 return value;
13836 return 0;
13839 /* Define three variables used for communication between the following
13840 routines. */
13842 static unsigned int reg_dead_regno, reg_dead_endregno;
13843 static int reg_dead_flag;
13844 rtx reg_dead_reg;
13846 /* Function called via note_stores from reg_dead_at_p.
13848 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13849 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13851 static void
13852 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13854 unsigned int regno, endregno;
13856 if (!REG_P (dest))
13857 return;
13859 regno = REGNO (dest);
13860 endregno = END_REGNO (dest);
13861 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13862 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13865 /* Return nonzero if REG is known to be dead at INSN.
13867 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13868 referencing REG, it is dead. If we hit a SET referencing REG, it is
13869 live. Otherwise, see if it is live or dead at the start of the basic
13870 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13871 must be assumed to be always live. */
13873 static int
13874 reg_dead_at_p (rtx reg, rtx_insn *insn)
13876 basic_block block;
13877 unsigned int i;
13879 /* Set variables for reg_dead_at_p_1. */
13880 reg_dead_regno = REGNO (reg);
13881 reg_dead_endregno = END_REGNO (reg);
13882 reg_dead_reg = reg;
13884 reg_dead_flag = 0;
13886 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13887 we allow the machine description to decide whether use-and-clobber
13888 patterns are OK. */
13889 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13891 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13892 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13893 return 0;
13896 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13897 beginning of basic block. */
13898 block = BLOCK_FOR_INSN (insn);
13899 for (;;)
13901 if (INSN_P (insn))
13903 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13904 return 1;
13906 note_stores (insn, reg_dead_at_p_1, NULL);
13907 if (reg_dead_flag)
13908 return reg_dead_flag == 1 ? 1 : 0;
13910 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13911 return 1;
13914 if (insn == BB_HEAD (block))
13915 break;
13917 insn = PREV_INSN (insn);
13920 /* Look at live-in sets for the basic block that we were in. */
13921 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13922 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13923 return 0;
13925 return 1;
13928 /* Note hard registers in X that are used. */
13930 static void
13931 mark_used_regs_combine (rtx x)
13933 RTX_CODE code = GET_CODE (x);
13934 unsigned int regno;
13935 int i;
13937 switch (code)
13939 case LABEL_REF:
13940 case SYMBOL_REF:
13941 case CONST:
13942 CASE_CONST_ANY:
13943 case PC:
13944 case ADDR_VEC:
13945 case ADDR_DIFF_VEC:
13946 case ASM_INPUT:
13947 /* CC0 must die in the insn after it is set, so we don't need to take
13948 special note of it here. */
13949 case CC0:
13950 return;
13952 case CLOBBER:
13953 /* If we are clobbering a MEM, mark any hard registers inside the
13954 address as used. */
13955 if (MEM_P (XEXP (x, 0)))
13956 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13957 return;
13959 case REG:
13960 regno = REGNO (x);
13961 /* A hard reg in a wide mode may really be multiple registers.
13962 If so, mark all of them just like the first. */
13963 if (regno < FIRST_PSEUDO_REGISTER)
13965 /* None of this applies to the stack, frame or arg pointers. */
13966 if (regno == STACK_POINTER_REGNUM
13967 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13968 && regno == HARD_FRAME_POINTER_REGNUM)
13969 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13970 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13971 || regno == FRAME_POINTER_REGNUM)
13972 return;
13974 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13976 return;
13978 case SET:
13980 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13981 the address. */
13982 rtx testreg = SET_DEST (x);
13984 while (GET_CODE (testreg) == SUBREG
13985 || GET_CODE (testreg) == ZERO_EXTRACT
13986 || GET_CODE (testreg) == STRICT_LOW_PART)
13987 testreg = XEXP (testreg, 0);
13989 if (MEM_P (testreg))
13990 mark_used_regs_combine (XEXP (testreg, 0));
13992 mark_used_regs_combine (SET_SRC (x));
13994 return;
13996 default:
13997 break;
14000 /* Recursively scan the operands of this expression. */
14003 const char *fmt = GET_RTX_FORMAT (code);
14005 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
14007 if (fmt[i] == 'e')
14008 mark_used_regs_combine (XEXP (x, i));
14009 else if (fmt[i] == 'E')
14011 int j;
14013 for (j = 0; j < XVECLEN (x, i); j++)
14014 mark_used_regs_combine (XVECEXP (x, i, j));
14020 /* Remove register number REGNO from the dead registers list of INSN.
14022 Return the note used to record the death, if there was one. */
14025 remove_death (unsigned int regno, rtx_insn *insn)
14027 rtx note = find_regno_note (insn, REG_DEAD, regno);
14029 if (note)
14030 remove_note (insn, note);
14032 return note;
14035 /* For each register (hardware or pseudo) used within expression X, if its
14036 death is in an instruction with luid between FROM_LUID (inclusive) and
14037 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14038 list headed by PNOTES.
14040 That said, don't move registers killed by maybe_kill_insn.
14042 This is done when X is being merged by combination into TO_INSN. These
14043 notes will then be distributed as needed. */
14045 static void
14046 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14047 rtx *pnotes)
14049 const char *fmt;
14050 int len, i;
14051 enum rtx_code code = GET_CODE (x);
14053 if (code == REG)
14055 unsigned int regno = REGNO (x);
14056 rtx_insn *where_dead = reg_stat[regno].last_death;
14058 /* If we do not know where the register died, it may still die between
14059 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14060 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14062 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14063 while (insn
14064 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14065 && DF_INSN_LUID (insn) >= from_luid)
14067 if (dead_or_set_regno_p (insn, regno))
14069 if (find_regno_note (insn, REG_DEAD, regno))
14070 where_dead = insn;
14071 break;
14074 insn = prev_real_nondebug_insn (insn);
14078 /* Don't move the register if it gets killed in between from and to. */
14079 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14080 && ! reg_referenced_p (x, maybe_kill_insn))
14081 return;
14083 if (where_dead
14084 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14085 && DF_INSN_LUID (where_dead) >= from_luid
14086 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14088 rtx note = remove_death (regno, where_dead);
14090 /* It is possible for the call above to return 0. This can occur
14091 when last_death points to I2 or I1 that we combined with.
14092 In that case make a new note.
14094 We must also check for the case where X is a hard register
14095 and NOTE is a death note for a range of hard registers
14096 including X. In that case, we must put REG_DEAD notes for
14097 the remaining registers in place of NOTE. */
14099 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14100 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14102 unsigned int deadregno = REGNO (XEXP (note, 0));
14103 unsigned int deadend = END_REGNO (XEXP (note, 0));
14104 unsigned int ourend = END_REGNO (x);
14105 unsigned int i;
14107 for (i = deadregno; i < deadend; i++)
14108 if (i < regno || i >= ourend)
14109 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14112 /* If we didn't find any note, or if we found a REG_DEAD note that
14113 covers only part of the given reg, and we have a multi-reg hard
14114 register, then to be safe we must check for REG_DEAD notes
14115 for each register other than the first. They could have
14116 their own REG_DEAD notes lying around. */
14117 else if ((note == 0
14118 || (note != 0
14119 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14120 GET_MODE (x))))
14121 && regno < FIRST_PSEUDO_REGISTER
14122 && REG_NREGS (x) > 1)
14124 unsigned int ourend = END_REGNO (x);
14125 unsigned int i, offset;
14126 rtx oldnotes = 0;
14128 if (note)
14129 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14130 else
14131 offset = 1;
14133 for (i = regno + offset; i < ourend; i++)
14134 move_deaths (regno_reg_rtx[i],
14135 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14138 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14140 XEXP (note, 1) = *pnotes;
14141 *pnotes = note;
14143 else
14144 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14147 return;
14150 else if (GET_CODE (x) == SET)
14152 rtx dest = SET_DEST (x);
14154 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14156 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14157 that accesses one word of a multi-word item, some
14158 piece of everything register in the expression is used by
14159 this insn, so remove any old death. */
14160 /* ??? So why do we test for equality of the sizes? */
14162 if (GET_CODE (dest) == ZERO_EXTRACT
14163 || GET_CODE (dest) == STRICT_LOW_PART
14164 || (GET_CODE (dest) == SUBREG
14165 && !read_modify_subreg_p (dest)))
14167 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14168 return;
14171 /* If this is some other SUBREG, we know it replaces the entire
14172 value, so use that as the destination. */
14173 if (GET_CODE (dest) == SUBREG)
14174 dest = SUBREG_REG (dest);
14176 /* If this is a MEM, adjust deaths of anything used in the address.
14177 For a REG (the only other possibility), the entire value is
14178 being replaced so the old value is not used in this insn. */
14180 if (MEM_P (dest))
14181 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14182 to_insn, pnotes);
14183 return;
14186 else if (GET_CODE (x) == CLOBBER)
14187 return;
14189 len = GET_RTX_LENGTH (code);
14190 fmt = GET_RTX_FORMAT (code);
14192 for (i = 0; i < len; i++)
14194 if (fmt[i] == 'E')
14196 int j;
14197 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14198 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14199 to_insn, pnotes);
14201 else if (fmt[i] == 'e')
14202 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14206 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14207 pattern of an insn. X must be a REG. */
14209 static int
14210 reg_bitfield_target_p (rtx x, rtx body)
14212 int i;
14214 if (GET_CODE (body) == SET)
14216 rtx dest = SET_DEST (body);
14217 rtx target;
14218 unsigned int regno, tregno, endregno, endtregno;
14220 if (GET_CODE (dest) == ZERO_EXTRACT)
14221 target = XEXP (dest, 0);
14222 else if (GET_CODE (dest) == STRICT_LOW_PART)
14223 target = SUBREG_REG (XEXP (dest, 0));
14224 else
14225 return 0;
14227 if (GET_CODE (target) == SUBREG)
14228 target = SUBREG_REG (target);
14230 if (!REG_P (target))
14231 return 0;
14233 tregno = REGNO (target), regno = REGNO (x);
14234 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14235 return target == x;
14237 endtregno = end_hard_regno (GET_MODE (target), tregno);
14238 endregno = end_hard_regno (GET_MODE (x), regno);
14240 return endregno > tregno && regno < endtregno;
14243 else if (GET_CODE (body) == PARALLEL)
14244 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14245 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14246 return 1;
14248 return 0;
14251 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14252 as appropriate. I3 and I2 are the insns resulting from the combination
14253 insns including FROM (I2 may be zero).
14255 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14256 not need REG_DEAD notes because they are being substituted for. This
14257 saves searching in the most common cases.
14259 Each note in the list is either ignored or placed on some insns, depending
14260 on the type of note. */
14262 static void
14263 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14264 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14266 rtx note, next_note;
14267 rtx tem_note;
14268 rtx_insn *tem_insn;
14270 for (note = notes; note; note = next_note)
14272 rtx_insn *place = 0, *place2 = 0;
14274 next_note = XEXP (note, 1);
14275 switch (REG_NOTE_KIND (note))
14277 case REG_BR_PROB:
14278 case REG_BR_PRED:
14279 /* Doesn't matter much where we put this, as long as it's somewhere.
14280 It is preferable to keep these notes on branches, which is most
14281 likely to be i3. */
14282 place = i3;
14283 break;
14285 case REG_NON_LOCAL_GOTO:
14286 if (JUMP_P (i3))
14287 place = i3;
14288 else
14290 gcc_assert (i2 && JUMP_P (i2));
14291 place = i2;
14293 break;
14295 case REG_EH_REGION:
14296 /* These notes must remain with the call or trapping instruction. */
14297 if (CALL_P (i3))
14298 place = i3;
14299 else if (i2 && CALL_P (i2))
14300 place = i2;
14301 else
14303 gcc_assert (cfun->can_throw_non_call_exceptions);
14304 if (may_trap_p (i3))
14305 place = i3;
14306 else if (i2 && may_trap_p (i2))
14307 place = i2;
14308 /* ??? Otherwise assume we've combined things such that we
14309 can now prove that the instructions can't trap. Drop the
14310 note in this case. */
14312 break;
14314 case REG_ARGS_SIZE:
14315 /* ??? How to distribute between i3-i1. Assume i3 contains the
14316 entire adjustment. Assert i3 contains at least some adjust. */
14317 if (!noop_move_p (i3))
14319 poly_int64 old_size, args_size = get_args_size (note);
14320 /* fixup_args_size_notes looks at REG_NORETURN note,
14321 so ensure the note is placed there first. */
14322 if (CALL_P (i3))
14324 rtx *np;
14325 for (np = &next_note; *np; np = &XEXP (*np, 1))
14326 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14328 rtx n = *np;
14329 *np = XEXP (n, 1);
14330 XEXP (n, 1) = REG_NOTES (i3);
14331 REG_NOTES (i3) = n;
14332 break;
14335 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14336 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14337 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14338 gcc_assert (maybe_ne (old_size, args_size)
14339 || (CALL_P (i3)
14340 && !ACCUMULATE_OUTGOING_ARGS
14341 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14343 break;
14345 case REG_NORETURN:
14346 case REG_SETJMP:
14347 case REG_TM:
14348 case REG_CALL_DECL:
14349 case REG_CALL_NOCF_CHECK:
14350 /* These notes must remain with the call. It should not be
14351 possible for both I2 and I3 to be a call. */
14352 if (CALL_P (i3))
14353 place = i3;
14354 else
14356 gcc_assert (i2 && CALL_P (i2));
14357 place = i2;
14359 break;
14361 case REG_UNUSED:
14362 /* Any clobbers for i3 may still exist, and so we must process
14363 REG_UNUSED notes from that insn.
14365 Any clobbers from i2 or i1 can only exist if they were added by
14366 recog_for_combine. In that case, recog_for_combine created the
14367 necessary REG_UNUSED notes. Trying to keep any original
14368 REG_UNUSED notes from these insns can cause incorrect output
14369 if it is for the same register as the original i3 dest.
14370 In that case, we will notice that the register is set in i3,
14371 and then add a REG_UNUSED note for the destination of i3, which
14372 is wrong. However, it is possible to have REG_UNUSED notes from
14373 i2 or i1 for register which were both used and clobbered, so
14374 we keep notes from i2 or i1 if they will turn into REG_DEAD
14375 notes. */
14377 /* If this register is set or clobbered in I3, put the note there
14378 unless there is one already. */
14379 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14381 if (from_insn != i3)
14382 break;
14384 if (! (REG_P (XEXP (note, 0))
14385 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14386 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14387 place = i3;
14389 /* Otherwise, if this register is used by I3, then this register
14390 now dies here, so we must put a REG_DEAD note here unless there
14391 is one already. */
14392 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14393 && ! (REG_P (XEXP (note, 0))
14394 ? find_regno_note (i3, REG_DEAD,
14395 REGNO (XEXP (note, 0)))
14396 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14398 PUT_REG_NOTE_KIND (note, REG_DEAD);
14399 place = i3;
14402 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14403 but we can't tell which at this point. We must reset any
14404 expectations we had about the value that was previously
14405 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14406 and, if appropriate, restore its previous value, but we
14407 don't have enough information for that at this point. */
14408 else
14410 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14412 /* Otherwise, if this register is now referenced in i2
14413 then the register used to be modified in one of the
14414 original insns. If it was i3 (say, in an unused
14415 parallel), it's now completely gone, so the note can
14416 be discarded. But if it was modified in i2, i1 or i0
14417 and we still reference it in i2, then we're
14418 referencing the previous value, and since the
14419 register was modified and REG_UNUSED, we know that
14420 the previous value is now dead. So, if we only
14421 reference the register in i2, we change the note to
14422 REG_DEAD, to reflect the previous value. However, if
14423 we're also setting or clobbering the register as
14424 scratch, we know (because the register was not
14425 referenced in i3) that it's unused, just as it was
14426 unused before, and we place the note in i2. */
14427 if (from_insn != i3 && i2 && INSN_P (i2)
14428 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14430 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14431 PUT_REG_NOTE_KIND (note, REG_DEAD);
14432 if (! (REG_P (XEXP (note, 0))
14433 ? find_regno_note (i2, REG_NOTE_KIND (note),
14434 REGNO (XEXP (note, 0)))
14435 : find_reg_note (i2, REG_NOTE_KIND (note),
14436 XEXP (note, 0))))
14437 place = i2;
14441 break;
14443 case REG_EQUAL:
14444 case REG_EQUIV:
14445 case REG_NOALIAS:
14446 /* These notes say something about results of an insn. We can
14447 only support them if they used to be on I3 in which case they
14448 remain on I3. Otherwise they are ignored.
14450 If the note refers to an expression that is not a constant, we
14451 must also ignore the note since we cannot tell whether the
14452 equivalence is still true. It might be possible to do
14453 slightly better than this (we only have a problem if I2DEST
14454 or I1DEST is present in the expression), but it doesn't
14455 seem worth the trouble. */
14457 if (from_insn == i3
14458 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14459 place = i3;
14460 break;
14462 case REG_INC:
14463 /* These notes say something about how a register is used. They must
14464 be present on any use of the register in I2 or I3. */
14465 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14466 place = i3;
14468 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14470 if (place)
14471 place2 = i2;
14472 else
14473 place = i2;
14475 break;
14477 case REG_LABEL_TARGET:
14478 case REG_LABEL_OPERAND:
14479 /* This can show up in several ways -- either directly in the
14480 pattern, or hidden off in the constant pool with (or without?)
14481 a REG_EQUAL note. */
14482 /* ??? Ignore the without-reg_equal-note problem for now. */
14483 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14484 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14485 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14486 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14487 place = i3;
14489 if (i2
14490 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14491 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14492 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14493 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14495 if (place)
14496 place2 = i2;
14497 else
14498 place = i2;
14501 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14502 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14503 there. */
14504 if (place && JUMP_P (place)
14505 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14506 && (JUMP_LABEL (place) == NULL
14507 || JUMP_LABEL (place) == XEXP (note, 0)))
14509 rtx label = JUMP_LABEL (place);
14511 if (!label)
14512 JUMP_LABEL (place) = XEXP (note, 0);
14513 else if (LABEL_P (label))
14514 LABEL_NUSES (label)--;
14517 if (place2 && JUMP_P (place2)
14518 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14519 && (JUMP_LABEL (place2) == NULL
14520 || JUMP_LABEL (place2) == XEXP (note, 0)))
14522 rtx label = JUMP_LABEL (place2);
14524 if (!label)
14525 JUMP_LABEL (place2) = XEXP (note, 0);
14526 else if (LABEL_P (label))
14527 LABEL_NUSES (label)--;
14528 place2 = 0;
14530 break;
14532 case REG_NONNEG:
14533 /* This note says something about the value of a register prior
14534 to the execution of an insn. It is too much trouble to see
14535 if the note is still correct in all situations. It is better
14536 to simply delete it. */
14537 break;
14539 case REG_DEAD:
14540 /* If we replaced the right hand side of FROM_INSN with a
14541 REG_EQUAL note, the original use of the dying register
14542 will not have been combined into I3 and I2. In such cases,
14543 FROM_INSN is guaranteed to be the first of the combined
14544 instructions, so we simply need to search back before
14545 FROM_INSN for the previous use or set of this register,
14546 then alter the notes there appropriately.
14548 If the register is used as an input in I3, it dies there.
14549 Similarly for I2, if it is nonzero and adjacent to I3.
14551 If the register is not used as an input in either I3 or I2
14552 and it is not one of the registers we were supposed to eliminate,
14553 there are two possibilities. We might have a non-adjacent I2
14554 or we might have somehow eliminated an additional register
14555 from a computation. For example, we might have had A & B where
14556 we discover that B will always be zero. In this case we will
14557 eliminate the reference to A.
14559 In both cases, we must search to see if we can find a previous
14560 use of A and put the death note there. */
14562 if (from_insn
14563 && from_insn == i2mod
14564 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14565 tem_insn = from_insn;
14566 else
14568 if (from_insn
14569 && CALL_P (from_insn)
14570 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14571 place = from_insn;
14572 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14574 /* If the new I2 sets the same register that is marked
14575 dead in the note, we do not in general know where to
14576 put the note. One important case we _can_ handle is
14577 when the note comes from I3. */
14578 if (from_insn == i3)
14579 place = i3;
14580 else
14581 break;
14583 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14584 place = i3;
14585 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14586 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14587 place = i2;
14588 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14589 && !(i2mod
14590 && reg_overlap_mentioned_p (XEXP (note, 0),
14591 i2mod_old_rhs)))
14592 || rtx_equal_p (XEXP (note, 0), elim_i1)
14593 || rtx_equal_p (XEXP (note, 0), elim_i0))
14594 break;
14595 tem_insn = i3;
14598 if (place == 0)
14600 basic_block bb = this_basic_block;
14602 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14604 if (!NONDEBUG_INSN_P (tem_insn))
14606 if (tem_insn == BB_HEAD (bb))
14607 break;
14608 continue;
14611 /* If the register is being set at TEM_INSN, see if that is all
14612 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14613 into a REG_UNUSED note instead. Don't delete sets to
14614 global register vars. */
14615 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14616 || !global_regs[REGNO (XEXP (note, 0))])
14617 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14619 rtx set = single_set (tem_insn);
14620 rtx inner_dest = 0;
14621 rtx_insn *cc0_setter = NULL;
14623 if (set != 0)
14624 for (inner_dest = SET_DEST (set);
14625 (GET_CODE (inner_dest) == STRICT_LOW_PART
14626 || GET_CODE (inner_dest) == SUBREG
14627 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14628 inner_dest = XEXP (inner_dest, 0))
14631 /* Verify that it was the set, and not a clobber that
14632 modified the register.
14634 CC0 targets must be careful to maintain setter/user
14635 pairs. If we cannot delete the setter due to side
14636 effects, mark the user with an UNUSED note instead
14637 of deleting it. */
14639 if (set != 0 && ! side_effects_p (SET_SRC (set))
14640 && rtx_equal_p (XEXP (note, 0), inner_dest)
14641 && (!HAVE_cc0
14642 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14643 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14644 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14646 /* Move the notes and links of TEM_INSN elsewhere.
14647 This might delete other dead insns recursively.
14648 First set the pattern to something that won't use
14649 any register. */
14650 rtx old_notes = REG_NOTES (tem_insn);
14652 PATTERN (tem_insn) = pc_rtx;
14653 REG_NOTES (tem_insn) = NULL;
14655 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14656 NULL_RTX, NULL_RTX, NULL_RTX);
14657 distribute_links (LOG_LINKS (tem_insn));
14659 unsigned int regno = REGNO (XEXP (note, 0));
14660 reg_stat_type *rsp = &reg_stat[regno];
14661 if (rsp->last_set == tem_insn)
14662 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14664 SET_INSN_DELETED (tem_insn);
14665 if (tem_insn == i2)
14666 i2 = NULL;
14668 /* Delete the setter too. */
14669 if (cc0_setter)
14671 PATTERN (cc0_setter) = pc_rtx;
14672 old_notes = REG_NOTES (cc0_setter);
14673 REG_NOTES (cc0_setter) = NULL;
14675 distribute_notes (old_notes, cc0_setter,
14676 cc0_setter, NULL,
14677 NULL_RTX, NULL_RTX, NULL_RTX);
14678 distribute_links (LOG_LINKS (cc0_setter));
14680 SET_INSN_DELETED (cc0_setter);
14681 if (cc0_setter == i2)
14682 i2 = NULL;
14685 else
14687 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14689 /* If there isn't already a REG_UNUSED note, put one
14690 here. Do not place a REG_DEAD note, even if
14691 the register is also used here; that would not
14692 match the algorithm used in lifetime analysis
14693 and can cause the consistency check in the
14694 scheduler to fail. */
14695 if (! find_regno_note (tem_insn, REG_UNUSED,
14696 REGNO (XEXP (note, 0))))
14697 place = tem_insn;
14698 break;
14701 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14702 || (CALL_P (tem_insn)
14703 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14705 place = tem_insn;
14707 /* If we are doing a 3->2 combination, and we have a
14708 register which formerly died in i3 and was not used
14709 by i2, which now no longer dies in i3 and is used in
14710 i2 but does not die in i2, and place is between i2
14711 and i3, then we may need to move a link from place to
14712 i2. */
14713 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14714 && from_insn
14715 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14716 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14718 struct insn_link *links = LOG_LINKS (place);
14719 LOG_LINKS (place) = NULL;
14720 distribute_links (links);
14722 break;
14725 if (tem_insn == BB_HEAD (bb))
14726 break;
14731 /* If the register is set or already dead at PLACE, we needn't do
14732 anything with this note if it is still a REG_DEAD note.
14733 We check here if it is set at all, not if is it totally replaced,
14734 which is what `dead_or_set_p' checks, so also check for it being
14735 set partially. */
14737 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14739 unsigned int regno = REGNO (XEXP (note, 0));
14740 reg_stat_type *rsp = &reg_stat[regno];
14742 if (dead_or_set_p (place, XEXP (note, 0))
14743 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14745 /* Unless the register previously died in PLACE, clear
14746 last_death. [I no longer understand why this is
14747 being done.] */
14748 if (rsp->last_death != place)
14749 rsp->last_death = 0;
14750 place = 0;
14752 else
14753 rsp->last_death = place;
14755 /* If this is a death note for a hard reg that is occupying
14756 multiple registers, ensure that we are still using all
14757 parts of the object. If we find a piece of the object
14758 that is unused, we must arrange for an appropriate REG_DEAD
14759 note to be added for it. However, we can't just emit a USE
14760 and tag the note to it, since the register might actually
14761 be dead; so we recourse, and the recursive call then finds
14762 the previous insn that used this register. */
14764 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14766 unsigned int endregno = END_REGNO (XEXP (note, 0));
14767 bool all_used = true;
14768 unsigned int i;
14770 for (i = regno; i < endregno; i++)
14771 if ((! refers_to_regno_p (i, PATTERN (place))
14772 && ! find_regno_fusage (place, USE, i))
14773 || dead_or_set_regno_p (place, i))
14775 all_used = false;
14776 break;
14779 if (! all_used)
14781 /* Put only REG_DEAD notes for pieces that are
14782 not already dead or set. */
14784 for (i = regno; i < endregno;
14785 i += hard_regno_nregs (i, reg_raw_mode[i]))
14787 rtx piece = regno_reg_rtx[i];
14788 basic_block bb = this_basic_block;
14790 if (! dead_or_set_p (place, piece)
14791 && ! reg_bitfield_target_p (piece,
14792 PATTERN (place)))
14794 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14795 NULL_RTX);
14797 distribute_notes (new_note, place, place,
14798 NULL, NULL_RTX, NULL_RTX,
14799 NULL_RTX);
14801 else if (! refers_to_regno_p (i, PATTERN (place))
14802 && ! find_regno_fusage (place, USE, i))
14803 for (tem_insn = PREV_INSN (place); ;
14804 tem_insn = PREV_INSN (tem_insn))
14806 if (!NONDEBUG_INSN_P (tem_insn))
14808 if (tem_insn == BB_HEAD (bb))
14809 break;
14810 continue;
14812 if (dead_or_set_p (tem_insn, piece)
14813 || reg_bitfield_target_p (piece,
14814 PATTERN (tem_insn)))
14816 add_reg_note (tem_insn, REG_UNUSED, piece);
14817 break;
14822 place = 0;
14826 break;
14828 default:
14829 /* Any other notes should not be present at this point in the
14830 compilation. */
14831 gcc_unreachable ();
14834 if (place)
14836 XEXP (note, 1) = REG_NOTES (place);
14837 REG_NOTES (place) = note;
14839 /* Set added_notes_insn to the earliest insn we added a note to. */
14840 if (added_notes_insn == 0
14841 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14842 added_notes_insn = place;
14845 if (place2)
14847 add_shallow_copy_of_reg_note (place2, note);
14849 /* Set added_notes_insn to the earliest insn we added a note to. */
14850 if (added_notes_insn == 0
14851 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14852 added_notes_insn = place2;
14857 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14858 I3, I2, and I1 to new locations. This is also called to add a link
14859 pointing at I3 when I3's destination is changed. */
14861 static void
14862 distribute_links (struct insn_link *links)
14864 struct insn_link *link, *next_link;
14866 for (link = links; link; link = next_link)
14868 rtx_insn *place = 0;
14869 rtx_insn *insn;
14870 rtx set, reg;
14872 next_link = link->next;
14874 /* If the insn that this link points to is a NOTE, ignore it. */
14875 if (NOTE_P (link->insn))
14876 continue;
14878 set = 0;
14879 rtx pat = PATTERN (link->insn);
14880 if (GET_CODE (pat) == SET)
14881 set = pat;
14882 else if (GET_CODE (pat) == PARALLEL)
14884 int i;
14885 for (i = 0; i < XVECLEN (pat, 0); i++)
14887 set = XVECEXP (pat, 0, i);
14888 if (GET_CODE (set) != SET)
14889 continue;
14891 reg = SET_DEST (set);
14892 while (GET_CODE (reg) == ZERO_EXTRACT
14893 || GET_CODE (reg) == STRICT_LOW_PART
14894 || GET_CODE (reg) == SUBREG)
14895 reg = XEXP (reg, 0);
14897 if (!REG_P (reg))
14898 continue;
14900 if (REGNO (reg) == link->regno)
14901 break;
14903 if (i == XVECLEN (pat, 0))
14904 continue;
14906 else
14907 continue;
14909 reg = SET_DEST (set);
14911 while (GET_CODE (reg) == ZERO_EXTRACT
14912 || GET_CODE (reg) == STRICT_LOW_PART
14913 || GET_CODE (reg) == SUBREG)
14914 reg = XEXP (reg, 0);
14916 if (reg == pc_rtx)
14917 continue;
14919 /* A LOG_LINK is defined as being placed on the first insn that uses
14920 a register and points to the insn that sets the register. Start
14921 searching at the next insn after the target of the link and stop
14922 when we reach a set of the register or the end of the basic block.
14924 Note that this correctly handles the link that used to point from
14925 I3 to I2. Also note that not much searching is typically done here
14926 since most links don't point very far away. */
14928 for (insn = NEXT_INSN (link->insn);
14929 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14930 || BB_HEAD (this_basic_block->next_bb) != insn));
14931 insn = NEXT_INSN (insn))
14932 if (DEBUG_INSN_P (insn))
14933 continue;
14934 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14936 if (reg_referenced_p (reg, PATTERN (insn)))
14937 place = insn;
14938 break;
14940 else if (CALL_P (insn)
14941 && find_reg_fusage (insn, USE, reg))
14943 place = insn;
14944 break;
14946 else if (INSN_P (insn) && reg_set_p (reg, insn))
14947 break;
14949 /* If we found a place to put the link, place it there unless there
14950 is already a link to the same insn as LINK at that point. */
14952 if (place)
14954 struct insn_link *link2;
14956 FOR_EACH_LOG_LINK (link2, place)
14957 if (link2->insn == link->insn && link2->regno == link->regno)
14958 break;
14960 if (link2 == NULL)
14962 link->next = LOG_LINKS (place);
14963 LOG_LINKS (place) = link;
14965 /* Set added_links_insn to the earliest insn we added a
14966 link to. */
14967 if (added_links_insn == 0
14968 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14969 added_links_insn = place;
14975 /* Check for any register or memory mentioned in EQUIV that is not
14976 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14977 of EXPR where some registers may have been replaced by constants. */
14979 static bool
14980 unmentioned_reg_p (rtx equiv, rtx expr)
14982 subrtx_iterator::array_type array;
14983 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14985 const_rtx x = *iter;
14986 if ((REG_P (x) || MEM_P (x))
14987 && !reg_mentioned_p (x, expr))
14988 return true;
14990 return false;
14993 DEBUG_FUNCTION void
14994 dump_combine_stats (FILE *file)
14996 fprintf
14997 (file,
14998 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14999 combine_attempts, combine_merges, combine_extras, combine_successes);
15002 void
15003 dump_combine_total_stats (FILE *file)
15005 fprintf
15006 (file,
15007 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15008 total_attempts, total_merges, total_extras, total_successes);
15011 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15012 the reg-to-reg copy can usefully combine with later instructions, but we
15013 do not want to combine the hard reg into later instructions, for that
15014 restricts register allocation. */
15015 static void
15016 make_more_copies (void)
15018 basic_block bb;
15020 FOR_EACH_BB_FN (bb, cfun)
15022 rtx_insn *insn;
15024 FOR_BB_INSNS (bb, insn)
15026 if (!NONDEBUG_INSN_P (insn))
15027 continue;
15029 rtx set = single_set (insn);
15030 if (!set)
15031 continue;
15033 rtx dest = SET_DEST (set);
15034 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15035 continue;
15037 rtx src = SET_SRC (set);
15038 if (!(REG_P (src) && HARD_REGISTER_P (src)))
15039 continue;
15040 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15041 continue;
15043 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15044 rtx_insn *new_insn = gen_move_insn (new_reg, src);
15045 SET_SRC (set) = new_reg;
15046 emit_insn_before (new_insn, insn);
15047 df_insn_rescan (insn);
15052 /* Try combining insns through substitution. */
15053 static unsigned int
15054 rest_of_handle_combine (void)
15056 make_more_copies ();
15058 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15059 df_note_add_problem ();
15060 df_analyze ();
15062 regstat_init_n_sets_and_refs ();
15063 reg_n_sets_max = max_reg_num ();
15065 int rebuild_jump_labels_after_combine
15066 = combine_instructions (get_insns (), max_reg_num ());
15068 /* Combining insns may have turned an indirect jump into a
15069 direct jump. Rebuild the JUMP_LABEL fields of jumping
15070 instructions. */
15071 if (rebuild_jump_labels_after_combine)
15073 if (dom_info_available_p (CDI_DOMINATORS))
15074 free_dominance_info (CDI_DOMINATORS);
15075 timevar_push (TV_JUMP);
15076 rebuild_jump_labels (get_insns ());
15077 cleanup_cfg (0);
15078 timevar_pop (TV_JUMP);
15081 regstat_free_n_sets_and_refs ();
15082 return 0;
15085 namespace {
15087 const pass_data pass_data_combine =
15089 RTL_PASS, /* type */
15090 "combine", /* name */
15091 OPTGROUP_NONE, /* optinfo_flags */
15092 TV_COMBINE, /* tv_id */
15093 PROP_cfglayout, /* properties_required */
15094 0, /* properties_provided */
15095 0, /* properties_destroyed */
15096 0, /* todo_flags_start */
15097 TODO_df_finish, /* todo_flags_finish */
15100 class pass_combine : public rtl_opt_pass
15102 public:
15103 pass_combine (gcc::context *ctxt)
15104 : rtl_opt_pass (pass_data_combine, ctxt)
15107 /* opt_pass methods: */
15108 virtual bool gate (function *) { return (optimize > 0); }
15109 virtual unsigned int execute (function *)
15111 return rest_of_handle_combine ();
15114 }; // class pass_combine
15116 } // anon namespace
15118 rtl_opt_pass *
15119 make_pass_combine (gcc::context *ctxt)
15121 return new pass_combine (ctxt);