Don't warn when alignment of global common data exceeds maximum alignment.
[official-gcc.git] / gcc / combine.c
blobcb5fa401fcb354fa49a5dc1e82f47b61d3567f81
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
36 We check (with modified_between_p) to avoid combining in such a way
37 as to move a computation to a place where its value would be different.
39 Combination is done by mathematically substituting the previous
40 insn(s) values for the regs they set into the expressions in
41 the later insns that refer to these regs. If the result is a valid insn
42 for our target machine, according to the machine description,
43 we install it, delete the earlier insns, and update the data flow
44 information (LOG_LINKS and REG_NOTES) for what we did.
46 There are a few exceptions where the dataflow information isn't
47 completely updated (however this is only a local issue since it is
48 regenerated before the next pass that uses it):
50 - reg_live_length is not updated
51 - reg_n_refs is not adjusted in the rare case when a register is
52 no longer required in a computation
53 - there are extremely rare cases (see distribute_notes) when a
54 REG_DEAD note is lost
55 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
56 removed because there is no way to know which register it was
57 linking
59 To simplify substitution, we combine only when the earlier insn(s)
60 consist of only a single assignment. To simplify updating afterward,
61 we never combine when a subroutine call appears in the middle. */
63 #include "config.h"
64 #include "system.h"
65 #include "coretypes.h"
66 #include "backend.h"
67 #include "target.h"
68 #include "rtl.h"
69 #include "tree.h"
70 #include "cfghooks.h"
71 #include "predict.h"
72 #include "df.h"
73 #include "memmodel.h"
74 #include "tm_p.h"
75 #include "optabs.h"
76 #include "regs.h"
77 #include "emit-rtl.h"
78 #include "recog.h"
79 #include "cgraph.h"
80 #include "stor-layout.h"
81 #include "cfgrtl.h"
82 #include "cfgcleanup.h"
83 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
84 #include "explow.h"
85 #include "insn-attr.h"
86 #include "rtlhooks-def.h"
87 #include "expr.h"
88 #include "tree-pass.h"
89 #include "valtrack.h"
90 #include "rtl-iter.h"
91 #include "print-rtl.h"
92 #include "function-abi.h"
93 #include "rtlanal.h"
95 /* Number of attempts to combine instructions in this function. */
97 static int combine_attempts;
99 /* Number of attempts that got as far as substitution in this function. */
101 static int combine_merges;
103 /* Number of instructions combined with added SETs in this function. */
105 static int combine_extras;
107 /* Number of instructions combined in this function. */
109 static int combine_successes;
111 /* Totals over entire compilation. */
113 static int total_attempts, total_merges, total_extras, total_successes;
115 /* combine_instructions may try to replace the right hand side of the
116 second instruction with the value of an associated REG_EQUAL note
117 before throwing it at try_combine. That is problematic when there
118 is a REG_DEAD note for a register used in the old right hand side
119 and can cause distribute_notes to do wrong things. This is the
120 second instruction if it has been so modified, null otherwise. */
122 static rtx_insn *i2mod;
124 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
126 static rtx i2mod_old_rhs;
128 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
130 static rtx i2mod_new_rhs;
132 struct reg_stat_type {
133 /* Record last point of death of (hard or pseudo) register n. */
134 rtx_insn *last_death;
136 /* Record last point of modification of (hard or pseudo) register n. */
137 rtx_insn *last_set;
139 /* The next group of fields allows the recording of the last value assigned
140 to (hard or pseudo) register n. We use this information to see if an
141 operation being processed is redundant given a prior operation performed
142 on the register. For example, an `and' with a constant is redundant if
143 all the zero bits are already known to be turned off.
145 We use an approach similar to that used by cse, but change it in the
146 following ways:
148 (1) We do not want to reinitialize at each label.
149 (2) It is useful, but not critical, to know the actual value assigned
150 to a register. Often just its form is helpful.
152 Therefore, we maintain the following fields:
154 last_set_value the last value assigned
155 last_set_label records the value of label_tick when the
156 register was assigned
157 last_set_table_tick records the value of label_tick when a
158 value using the register is assigned
159 last_set_invalid set to nonzero when it is not valid
160 to use the value of this register in some
161 register's value
163 To understand the usage of these tables, it is important to understand
164 the distinction between the value in last_set_value being valid and
165 the register being validly contained in some other expression in the
166 table.
168 (The next two parameters are out of date).
170 reg_stat[i].last_set_value is valid if it is nonzero, and either
171 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
173 Register I may validly appear in any expression returned for the value
174 of another register if reg_n_sets[i] is 1. It may also appear in the
175 value for register J if reg_stat[j].last_set_invalid is zero, or
176 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
178 If an expression is found in the table containing a register which may
179 not validly appear in an expression, the register is replaced by
180 something that won't match, (clobber (const_int 0)). */
182 /* Record last value assigned to (hard or pseudo) register n. */
184 rtx last_set_value;
186 /* Record the value of label_tick when an expression involving register n
187 is placed in last_set_value. */
189 int last_set_table_tick;
191 /* Record the value of label_tick when the value for register n is placed in
192 last_set_value. */
194 int last_set_label;
196 /* These fields are maintained in parallel with last_set_value and are
197 used to store the mode in which the register was last set, the bits
198 that were known to be zero when it was last set, and the number of
199 sign bits copies it was known to have when it was last set. */
201 unsigned HOST_WIDE_INT last_set_nonzero_bits;
202 char last_set_sign_bit_copies;
203 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
205 /* Set nonzero if references to register n in expressions should not be
206 used. last_set_invalid is set nonzero when this register is being
207 assigned to and last_set_table_tick == label_tick. */
209 char last_set_invalid;
211 /* Some registers that are set more than once and used in more than one
212 basic block are nevertheless always set in similar ways. For example,
213 a QImode register may be loaded from memory in two places on a machine
214 where byte loads zero extend.
216 We record in the following fields if a register has some leading bits
217 that are always equal to the sign bit, and what we know about the
218 nonzero bits of a register, specifically which bits are known to be
219 zero.
221 If an entry is zero, it means that we don't know anything special. */
223 unsigned char sign_bit_copies;
225 unsigned HOST_WIDE_INT nonzero_bits;
227 /* Record the value of the label_tick when the last truncation
228 happened. The field truncated_to_mode is only valid if
229 truncation_label == label_tick. */
231 int truncation_label;
233 /* Record the last truncation seen for this register. If truncation
234 is not a nop to this mode we might be able to save an explicit
235 truncation if we know that value already contains a truncated
236 value. */
238 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
242 static vec<reg_stat_type> reg_stat;
244 /* One plus the highest pseudo for which we track REG_N_SETS.
245 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
246 but during combine_split_insns new pseudos can be created. As we don't have
247 updated DF information in that case, it is hard to initialize the array
248 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
249 so instead of growing the arrays, just assume all newly created pseudos
250 during combine might be set multiple times. */
252 static unsigned int reg_n_sets_max;
254 /* Record the luid of the last insn that invalidated memory
255 (anything that writes memory, and subroutine calls, but not pushes). */
257 static int mem_last_set;
259 /* Record the luid of the last CALL_INSN
260 so we can tell whether a potential combination crosses any calls. */
262 static int last_call_luid;
264 /* When `subst' is called, this is the insn that is being modified
265 (by combining in a previous insn). The PATTERN of this insn
266 is still the old pattern partially modified and it should not be
267 looked at, but this may be used to examine the successors of the insn
268 to judge whether a simplification is valid. */
270 static rtx_insn *subst_insn;
272 /* This is the lowest LUID that `subst' is currently dealing with.
273 get_last_value will not return a value if the register was set at or
274 after this LUID. If not for this mechanism, we could get confused if
275 I2 or I1 in try_combine were an insn that used the old value of a register
276 to obtain a new value. In that case, we might erroneously get the
277 new value of the register when we wanted the old one. */
279 static int subst_low_luid;
281 /* This contains any hard registers that are used in newpat; reg_dead_at_p
282 must consider all these registers to be always live. */
284 static HARD_REG_SET newpat_used_regs;
286 /* This is an insn to which a LOG_LINKS entry has been added. If this
287 insn is the earlier than I2 or I3, combine should rescan starting at
288 that location. */
290 static rtx_insn *added_links_insn;
292 /* And similarly, for notes. */
294 static rtx_insn *added_notes_insn;
296 /* Basic block in which we are performing combines. */
297 static basic_block this_basic_block;
298 static bool optimize_this_for_speed_p;
301 /* Length of the currently allocated uid_insn_cost array. */
303 static int max_uid_known;
305 /* The following array records the insn_cost for every insn
306 in the instruction stream. */
308 static int *uid_insn_cost;
310 /* The following array records the LOG_LINKS for every insn in the
311 instruction stream as struct insn_link pointers. */
313 struct insn_link {
314 rtx_insn *insn;
315 unsigned int regno;
316 struct insn_link *next;
319 static struct insn_link **uid_log_links;
321 static inline int
322 insn_uid_check (const_rtx insn)
324 int uid = INSN_UID (insn);
325 gcc_checking_assert (uid <= max_uid_known);
326 return uid;
329 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
330 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
332 #define FOR_EACH_LOG_LINK(L, INSN) \
333 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
335 /* Links for LOG_LINKS are allocated from this obstack. */
337 static struct obstack insn_link_obstack;
339 /* Allocate a link. */
341 static inline struct insn_link *
342 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
344 struct insn_link *l
345 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
346 sizeof (struct insn_link));
347 l->insn = insn;
348 l->regno = regno;
349 l->next = next;
350 return l;
353 /* Incremented for each basic block. */
355 static int label_tick;
357 /* Reset to label_tick for each extended basic block in scanning order. */
359 static int label_tick_ebb_start;
361 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
362 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
364 static scalar_int_mode nonzero_bits_mode;
366 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
367 be safely used. It is zero while computing them and after combine has
368 completed. This former test prevents propagating values based on
369 previously set values, which can be incorrect if a variable is modified
370 in a loop. */
372 static int nonzero_sign_valid;
375 /* Record one modification to rtl structure
376 to be undone by storing old_contents into *where. */
378 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
380 struct undo
382 struct undo *next;
383 enum undo_kind kind;
384 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
385 union { rtx *r; int *i; struct insn_link **l; } where;
388 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
389 num_undo says how many are currently recorded.
391 other_insn is nonzero if we have modified some other insn in the process
392 of working on subst_insn. It must be verified too. */
394 struct undobuf
396 struct undo *undos;
397 struct undo *frees;
398 rtx_insn *other_insn;
401 static struct undobuf undobuf;
403 /* Number of times the pseudo being substituted for
404 was found and replaced. */
406 static int n_occurrences;
408 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
409 scalar_int_mode,
410 unsigned HOST_WIDE_INT *);
411 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
412 scalar_int_mode,
413 unsigned int *);
414 static void do_SUBST (rtx *, rtx);
415 static void do_SUBST_INT (int *, int);
416 static void init_reg_last (void);
417 static void setup_incoming_promotions (rtx_insn *);
418 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
419 static int cant_combine_insn_p (rtx_insn *);
420 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
421 rtx_insn *, rtx_insn *, rtx *, rtx *);
422 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
423 static int contains_muldiv (rtx);
424 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
425 int *, rtx_insn *);
426 static void undo_all (void);
427 static void undo_commit (void);
428 static rtx *find_split_point (rtx *, rtx_insn *, bool);
429 static rtx subst (rtx, rtx, rtx, int, int, int);
430 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
431 static rtx simplify_if_then_else (rtx);
432 static rtx simplify_set (rtx);
433 static rtx simplify_logical (rtx);
434 static rtx expand_compound_operation (rtx);
435 static const_rtx expand_field_assignment (const_rtx);
436 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
437 rtx, unsigned HOST_WIDE_INT, int, int, int);
438 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
439 unsigned HOST_WIDE_INT *);
440 static rtx canon_reg_for_combine (rtx, rtx);
441 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
442 scalar_int_mode, unsigned HOST_WIDE_INT, int);
443 static rtx force_to_mode (rtx, machine_mode,
444 unsigned HOST_WIDE_INT, int);
445 static rtx if_then_else_cond (rtx, rtx *, rtx *);
446 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
447 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
448 static rtx make_field_assignment (rtx);
449 static rtx apply_distributive_law (rtx);
450 static rtx distribute_and_simplify_rtx (rtx, int);
451 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
452 unsigned HOST_WIDE_INT);
453 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
454 unsigned HOST_WIDE_INT);
455 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
456 HOST_WIDE_INT, machine_mode, int *);
457 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
458 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
459 int);
460 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
461 static rtx gen_lowpart_for_combine (machine_mode, rtx);
462 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
463 rtx, rtx *);
464 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
465 static void update_table_tick (rtx);
466 static void record_value_for_reg (rtx, rtx_insn *, rtx);
467 static void check_promoted_subreg (rtx_insn *, rtx);
468 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
469 static void record_dead_and_set_regs (rtx_insn *);
470 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
471 static rtx get_last_value (const_rtx);
472 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
473 static int reg_dead_at_p (rtx, rtx_insn *);
474 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
475 static int reg_bitfield_target_p (rtx, rtx);
476 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
477 static void distribute_links (struct insn_link *);
478 static void mark_used_regs_combine (rtx);
479 static void record_promoted_value (rtx_insn *, rtx);
480 static bool unmentioned_reg_p (rtx, rtx);
481 static void record_truncated_values (rtx *, void *);
482 static bool reg_truncated_to_mode (machine_mode, const_rtx);
483 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
486 /* It is not safe to use ordinary gen_lowpart in combine.
487 See comments in gen_lowpart_for_combine. */
488 #undef RTL_HOOKS_GEN_LOWPART
489 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
491 /* Our implementation of gen_lowpart never emits a new pseudo. */
492 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
493 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
495 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
496 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
498 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
499 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
501 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
502 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
504 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
507 /* Convenience wrapper for the canonicalize_comparison target hook.
508 Target hooks cannot use enum rtx_code. */
509 static inline void
510 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
511 bool op0_preserve_value)
513 int code_int = (int)*code;
514 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
515 *code = (enum rtx_code)code_int;
518 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
519 PATTERN cannot be split. Otherwise, it returns an insn sequence.
520 This is a wrapper around split_insns which ensures that the
521 reg_stat vector is made larger if the splitter creates a new
522 register. */
524 static rtx_insn *
525 combine_split_insns (rtx pattern, rtx_insn *insn)
527 rtx_insn *ret;
528 unsigned int nregs;
530 ret = split_insns (pattern, insn);
531 nregs = max_reg_num ();
532 if (nregs > reg_stat.length ())
533 reg_stat.safe_grow_cleared (nregs, true);
534 return ret;
537 /* This is used by find_single_use to locate an rtx in LOC that
538 contains exactly one use of DEST, which is typically a REG.
539 It returns a pointer to the innermost rtx expression
540 containing DEST. Appearances of DEST that are being used to
541 totally replace it are not counted. */
543 static rtx *
544 find_single_use_1 (rtx dest, rtx *loc)
546 rtx x = *loc;
547 enum rtx_code code = GET_CODE (x);
548 rtx *result = NULL;
549 rtx *this_result;
550 int i;
551 const char *fmt;
553 switch (code)
555 case CONST:
556 case LABEL_REF:
557 case SYMBOL_REF:
558 CASE_CONST_ANY:
559 case CLOBBER:
560 return 0;
562 case SET:
563 /* If the destination is anything other than PC, a REG or a SUBREG
564 of a REG that occupies all of the REG, the insn uses DEST if
565 it is mentioned in the destination or the source. Otherwise, we
566 need just check the source. */
567 if (GET_CODE (SET_DEST (x)) != PC
568 && !REG_P (SET_DEST (x))
569 && ! (GET_CODE (SET_DEST (x)) == SUBREG
570 && REG_P (SUBREG_REG (SET_DEST (x)))
571 && !read_modify_subreg_p (SET_DEST (x))))
572 break;
574 return find_single_use_1 (dest, &SET_SRC (x));
576 case MEM:
577 case SUBREG:
578 return find_single_use_1 (dest, &XEXP (x, 0));
580 default:
581 break;
584 /* If it wasn't one of the common cases above, check each expression and
585 vector of this code. Look for a unique usage of DEST. */
587 fmt = GET_RTX_FORMAT (code);
588 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
590 if (fmt[i] == 'e')
592 if (dest == XEXP (x, i)
593 || (REG_P (dest) && REG_P (XEXP (x, i))
594 && REGNO (dest) == REGNO (XEXP (x, i))))
595 this_result = loc;
596 else
597 this_result = find_single_use_1 (dest, &XEXP (x, i));
599 if (result == NULL)
600 result = this_result;
601 else if (this_result)
602 /* Duplicate usage. */
603 return NULL;
605 else if (fmt[i] == 'E')
607 int j;
609 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
611 if (XVECEXP (x, i, j) == dest
612 || (REG_P (dest)
613 && REG_P (XVECEXP (x, i, j))
614 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
615 this_result = loc;
616 else
617 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
619 if (result == NULL)
620 result = this_result;
621 else if (this_result)
622 return NULL;
627 return result;
631 /* See if DEST, produced in INSN, is used only a single time in the
632 sequel. If so, return a pointer to the innermost rtx expression in which
633 it is used.
635 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
637 Otherwise, we find the single use by finding an insn that has a
638 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
639 only referenced once in that insn, we know that it must be the first
640 and last insn referencing DEST. */
642 static rtx *
643 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
645 basic_block bb;
646 rtx_insn *next;
647 rtx *result;
648 struct insn_link *link;
650 if (!REG_P (dest))
651 return 0;
653 bb = BLOCK_FOR_INSN (insn);
654 for (next = NEXT_INSN (insn);
655 next && BLOCK_FOR_INSN (next) == bb;
656 next = NEXT_INSN (next))
657 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
659 FOR_EACH_LOG_LINK (link, next)
660 if (link->insn == insn && link->regno == REGNO (dest))
661 break;
663 if (link)
665 result = find_single_use_1 (dest, &PATTERN (next));
666 if (ploc)
667 *ploc = next;
668 return result;
672 return 0;
675 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
676 insn. The substitution can be undone by undo_all. If INTO is already
677 set to NEWVAL, do not record this change. Because computing NEWVAL might
678 also call SUBST, we have to compute it before we put anything into
679 the undo table. */
681 static void
682 do_SUBST (rtx *into, rtx newval)
684 struct undo *buf;
685 rtx oldval = *into;
687 if (oldval == newval)
688 return;
690 /* We'd like to catch as many invalid transformations here as
691 possible. Unfortunately, there are way too many mode changes
692 that are perfectly valid, so we'd waste too much effort for
693 little gain doing the checks here. Focus on catching invalid
694 transformations involving integer constants. */
695 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
696 && CONST_INT_P (newval))
698 /* Sanity check that we're replacing oldval with a CONST_INT
699 that is a valid sign-extension for the original mode. */
700 gcc_assert (INTVAL (newval)
701 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
703 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
704 CONST_INT is not valid, because after the replacement, the
705 original mode would be gone. Unfortunately, we can't tell
706 when do_SUBST is called to replace the operand thereof, so we
707 perform this test on oldval instead, checking whether an
708 invalid replacement took place before we got here. */
709 gcc_assert (!(GET_CODE (oldval) == SUBREG
710 && CONST_INT_P (SUBREG_REG (oldval))));
711 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
712 && CONST_INT_P (XEXP (oldval, 0))));
715 if (undobuf.frees)
716 buf = undobuf.frees, undobuf.frees = buf->next;
717 else
718 buf = XNEW (struct undo);
720 buf->kind = UNDO_RTX;
721 buf->where.r = into;
722 buf->old_contents.r = oldval;
723 *into = newval;
725 buf->next = undobuf.undos, undobuf.undos = buf;
728 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
730 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
731 for the value of a HOST_WIDE_INT value (including CONST_INT) is
732 not safe. */
734 static void
735 do_SUBST_INT (int *into, int newval)
737 struct undo *buf;
738 int oldval = *into;
740 if (oldval == newval)
741 return;
743 if (undobuf.frees)
744 buf = undobuf.frees, undobuf.frees = buf->next;
745 else
746 buf = XNEW (struct undo);
748 buf->kind = UNDO_INT;
749 buf->where.i = into;
750 buf->old_contents.i = oldval;
751 *into = newval;
753 buf->next = undobuf.undos, undobuf.undos = buf;
756 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
758 /* Similar to SUBST, but just substitute the mode. This is used when
759 changing the mode of a pseudo-register, so that any other
760 references to the entry in the regno_reg_rtx array will change as
761 well. */
763 static void
764 do_SUBST_MODE (rtx *into, machine_mode newval)
766 struct undo *buf;
767 machine_mode oldval = GET_MODE (*into);
769 if (oldval == newval)
770 return;
772 if (undobuf.frees)
773 buf = undobuf.frees, undobuf.frees = buf->next;
774 else
775 buf = XNEW (struct undo);
777 buf->kind = UNDO_MODE;
778 buf->where.r = into;
779 buf->old_contents.m = oldval;
780 adjust_reg_mode (*into, newval);
782 buf->next = undobuf.undos, undobuf.undos = buf;
785 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
787 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
789 static void
790 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
792 struct undo *buf;
793 struct insn_link * oldval = *into;
795 if (oldval == newval)
796 return;
798 if (undobuf.frees)
799 buf = undobuf.frees, undobuf.frees = buf->next;
800 else
801 buf = XNEW (struct undo);
803 buf->kind = UNDO_LINKS;
804 buf->where.l = into;
805 buf->old_contents.l = oldval;
806 *into = newval;
808 buf->next = undobuf.undos, undobuf.undos = buf;
811 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
813 /* Subroutine of try_combine. Determine whether the replacement patterns
814 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
815 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
816 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
817 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
818 of all the instructions can be estimated and the replacements are more
819 expensive than the original sequence. */
821 static bool
822 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
823 rtx newpat, rtx newi2pat, rtx newotherpat)
825 int i0_cost, i1_cost, i2_cost, i3_cost;
826 int new_i2_cost, new_i3_cost;
827 int old_cost, new_cost;
829 /* Lookup the original insn_costs. */
830 i2_cost = INSN_COST (i2);
831 i3_cost = INSN_COST (i3);
833 if (i1)
835 i1_cost = INSN_COST (i1);
836 if (i0)
838 i0_cost = INSN_COST (i0);
839 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
840 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
842 else
844 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
845 ? i1_cost + i2_cost + i3_cost : 0);
846 i0_cost = 0;
849 else
851 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
852 i1_cost = i0_cost = 0;
855 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
856 correct that. */
857 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
858 old_cost -= i1_cost;
861 /* Calculate the replacement insn_costs. */
862 rtx tmp = PATTERN (i3);
863 PATTERN (i3) = newpat;
864 int tmpi = INSN_CODE (i3);
865 INSN_CODE (i3) = -1;
866 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
867 PATTERN (i3) = tmp;
868 INSN_CODE (i3) = tmpi;
869 if (newi2pat)
871 tmp = PATTERN (i2);
872 PATTERN (i2) = newi2pat;
873 tmpi = INSN_CODE (i2);
874 INSN_CODE (i2) = -1;
875 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
876 PATTERN (i2) = tmp;
877 INSN_CODE (i2) = tmpi;
878 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
879 ? new_i2_cost + new_i3_cost : 0;
881 else
883 new_cost = new_i3_cost;
884 new_i2_cost = 0;
887 if (undobuf.other_insn)
889 int old_other_cost, new_other_cost;
891 old_other_cost = INSN_COST (undobuf.other_insn);
892 tmp = PATTERN (undobuf.other_insn);
893 PATTERN (undobuf.other_insn) = newotherpat;
894 tmpi = INSN_CODE (undobuf.other_insn);
895 INSN_CODE (undobuf.other_insn) = -1;
896 new_other_cost = insn_cost (undobuf.other_insn,
897 optimize_this_for_speed_p);
898 PATTERN (undobuf.other_insn) = tmp;
899 INSN_CODE (undobuf.other_insn) = tmpi;
900 if (old_other_cost > 0 && new_other_cost > 0)
902 old_cost += old_other_cost;
903 new_cost += new_other_cost;
905 else
906 old_cost = 0;
909 /* Disallow this combination if both new_cost and old_cost are greater than
910 zero, and new_cost is greater than old cost. */
911 int reject = old_cost > 0 && new_cost > old_cost;
913 if (dump_file)
915 fprintf (dump_file, "%s combination of insns ",
916 reject ? "rejecting" : "allowing");
917 if (i0)
918 fprintf (dump_file, "%d, ", INSN_UID (i0));
919 if (i1 && INSN_UID (i1) != INSN_UID (i2))
920 fprintf (dump_file, "%d, ", INSN_UID (i1));
921 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
923 fprintf (dump_file, "original costs ");
924 if (i0)
925 fprintf (dump_file, "%d + ", i0_cost);
926 if (i1 && INSN_UID (i1) != INSN_UID (i2))
927 fprintf (dump_file, "%d + ", i1_cost);
928 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
930 if (newi2pat)
931 fprintf (dump_file, "replacement costs %d + %d = %d\n",
932 new_i2_cost, new_i3_cost, new_cost);
933 else
934 fprintf (dump_file, "replacement cost %d\n", new_cost);
937 if (reject)
938 return false;
940 /* Update the uid_insn_cost array with the replacement costs. */
941 INSN_COST (i2) = new_i2_cost;
942 INSN_COST (i3) = new_i3_cost;
943 if (i1)
945 INSN_COST (i1) = 0;
946 if (i0)
947 INSN_COST (i0) = 0;
950 return true;
954 /* Delete any insns that copy a register to itself.
955 Return true if the CFG was changed. */
957 static bool
958 delete_noop_moves (void)
960 rtx_insn *insn, *next;
961 basic_block bb;
963 bool edges_deleted = false;
965 FOR_EACH_BB_FN (bb, cfun)
967 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
969 next = NEXT_INSN (insn);
970 if (INSN_P (insn) && noop_move_p (insn))
972 if (dump_file)
973 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
975 edges_deleted |= delete_insn_and_edges (insn);
980 return edges_deleted;
984 /* Return false if we do not want to (or cannot) combine DEF. */
985 static bool
986 can_combine_def_p (df_ref def)
988 /* Do not consider if it is pre/post modification in MEM. */
989 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
990 return false;
992 unsigned int regno = DF_REF_REGNO (def);
994 /* Do not combine frame pointer adjustments. */
995 if ((regno == FRAME_POINTER_REGNUM
996 && (!reload_completed || frame_pointer_needed))
997 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
998 && regno == HARD_FRAME_POINTER_REGNUM
999 && (!reload_completed || frame_pointer_needed))
1000 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1001 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1002 return false;
1004 return true;
1007 /* Return false if we do not want to (or cannot) combine USE. */
1008 static bool
1009 can_combine_use_p (df_ref use)
1011 /* Do not consider the usage of the stack pointer by function call. */
1012 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1013 return false;
1015 return true;
1018 /* Fill in log links field for all insns. */
1020 static void
1021 create_log_links (void)
1023 basic_block bb;
1024 rtx_insn **next_use;
1025 rtx_insn *insn;
1026 df_ref def, use;
1028 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1030 /* Pass through each block from the end, recording the uses of each
1031 register and establishing log links when def is encountered.
1032 Note that we do not clear next_use array in order to save time,
1033 so we have to test whether the use is in the same basic block as def.
1035 There are a few cases below when we do not consider the definition or
1036 usage -- these are taken from original flow.c did. Don't ask me why it is
1037 done this way; I don't know and if it works, I don't want to know. */
1039 FOR_EACH_BB_FN (bb, cfun)
1041 FOR_BB_INSNS_REVERSE (bb, insn)
1043 if (!NONDEBUG_INSN_P (insn))
1044 continue;
1046 /* Log links are created only once. */
1047 gcc_assert (!LOG_LINKS (insn));
1049 FOR_EACH_INSN_DEF (def, insn)
1051 unsigned int regno = DF_REF_REGNO (def);
1052 rtx_insn *use_insn;
1054 if (!next_use[regno])
1055 continue;
1057 if (!can_combine_def_p (def))
1058 continue;
1060 use_insn = next_use[regno];
1061 next_use[regno] = NULL;
1063 if (BLOCK_FOR_INSN (use_insn) != bb)
1064 continue;
1066 /* flow.c claimed:
1068 We don't build a LOG_LINK for hard registers contained
1069 in ASM_OPERANDs. If these registers get replaced,
1070 we might wind up changing the semantics of the insn,
1071 even if reload can make what appear to be valid
1072 assignments later. */
1073 if (regno < FIRST_PSEUDO_REGISTER
1074 && asm_noperands (PATTERN (use_insn)) >= 0)
1075 continue;
1077 /* Don't add duplicate links between instructions. */
1078 struct insn_link *links;
1079 FOR_EACH_LOG_LINK (links, use_insn)
1080 if (insn == links->insn && regno == links->regno)
1081 break;
1083 if (!links)
1084 LOG_LINKS (use_insn)
1085 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1088 FOR_EACH_INSN_USE (use, insn)
1089 if (can_combine_use_p (use))
1090 next_use[DF_REF_REGNO (use)] = insn;
1094 free (next_use);
1097 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1098 true if we found a LOG_LINK that proves that A feeds B. This only works
1099 if there are no instructions between A and B which could have a link
1100 depending on A, since in that case we would not record a link for B. */
1102 static bool
1103 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1105 struct insn_link *links;
1106 FOR_EACH_LOG_LINK (links, b)
1107 if (links->insn == a)
1108 return true;
1109 return false;
1112 /* Main entry point for combiner. F is the first insn of the function.
1113 NREGS is the first unused pseudo-reg number.
1115 Return nonzero if the CFG was changed (e.g. if the combiner has
1116 turned an indirect jump instruction into a direct jump). */
1117 static int
1118 combine_instructions (rtx_insn *f, unsigned int nregs)
1120 rtx_insn *insn, *next;
1121 struct insn_link *links, *nextlinks;
1122 rtx_insn *first;
1123 basic_block last_bb;
1125 int new_direct_jump_p = 0;
1127 for (first = f; first && !NONDEBUG_INSN_P (first); )
1128 first = NEXT_INSN (first);
1129 if (!first)
1130 return 0;
1132 combine_attempts = 0;
1133 combine_merges = 0;
1134 combine_extras = 0;
1135 combine_successes = 0;
1137 rtl_hooks = combine_rtl_hooks;
1139 reg_stat.safe_grow_cleared (nregs, true);
1141 init_recog_no_volatile ();
1143 /* Allocate array for insn info. */
1144 max_uid_known = get_max_uid ();
1145 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1146 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1147 gcc_obstack_init (&insn_link_obstack);
1149 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1151 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1152 problems when, for example, we have j <<= 1 in a loop. */
1154 nonzero_sign_valid = 0;
1155 label_tick = label_tick_ebb_start = 1;
1157 /* Scan all SETs and see if we can deduce anything about what
1158 bits are known to be zero for some registers and how many copies
1159 of the sign bit are known to exist for those registers.
1161 Also set any known values so that we can use it while searching
1162 for what bits are known to be set. */
1164 setup_incoming_promotions (first);
1165 /* Allow the entry block and the first block to fall into the same EBB.
1166 Conceptually the incoming promotions are assigned to the entry block. */
1167 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1169 create_log_links ();
1170 FOR_EACH_BB_FN (this_basic_block, cfun)
1172 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1173 last_call_luid = 0;
1174 mem_last_set = -1;
1176 label_tick++;
1177 if (!single_pred_p (this_basic_block)
1178 || single_pred (this_basic_block) != last_bb)
1179 label_tick_ebb_start = label_tick;
1180 last_bb = this_basic_block;
1182 FOR_BB_INSNS (this_basic_block, insn)
1183 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1185 rtx links;
1187 subst_low_luid = DF_INSN_LUID (insn);
1188 subst_insn = insn;
1190 note_stores (insn, set_nonzero_bits_and_sign_copies, insn);
1191 record_dead_and_set_regs (insn);
1193 if (AUTO_INC_DEC)
1194 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1195 if (REG_NOTE_KIND (links) == REG_INC)
1196 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1197 insn);
1199 /* Record the current insn_cost of this instruction. */
1200 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1201 if (dump_file)
1203 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1204 dump_insn_slim (dump_file, insn);
1209 nonzero_sign_valid = 1;
1211 /* Now scan all the insns in forward order. */
1212 label_tick = label_tick_ebb_start = 1;
1213 init_reg_last ();
1214 setup_incoming_promotions (first);
1215 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1216 int max_combine = param_max_combine_insns;
1218 FOR_EACH_BB_FN (this_basic_block, cfun)
1220 rtx_insn *last_combined_insn = NULL;
1222 /* Ignore instruction combination in basic blocks that are going to
1223 be removed as unreachable anyway. See PR82386. */
1224 if (EDGE_COUNT (this_basic_block->preds) == 0)
1225 continue;
1227 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1228 last_call_luid = 0;
1229 mem_last_set = -1;
1231 label_tick++;
1232 if (!single_pred_p (this_basic_block)
1233 || single_pred (this_basic_block) != last_bb)
1234 label_tick_ebb_start = label_tick;
1235 last_bb = this_basic_block;
1237 rtl_profile_for_bb (this_basic_block);
1238 for (insn = BB_HEAD (this_basic_block);
1239 insn != NEXT_INSN (BB_END (this_basic_block));
1240 insn = next ? next : NEXT_INSN (insn))
1242 next = 0;
1243 if (!NONDEBUG_INSN_P (insn))
1244 continue;
1246 while (last_combined_insn
1247 && (!NONDEBUG_INSN_P (last_combined_insn)
1248 || last_combined_insn->deleted ()))
1249 last_combined_insn = PREV_INSN (last_combined_insn);
1250 if (last_combined_insn == NULL_RTX
1251 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1252 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1253 last_combined_insn = insn;
1255 /* See if we know about function return values before this
1256 insn based upon SUBREG flags. */
1257 check_promoted_subreg (insn, PATTERN (insn));
1259 /* See if we can find hardregs and subreg of pseudos in
1260 narrower modes. This could help turning TRUNCATEs
1261 into SUBREGs. */
1262 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1264 /* Try this insn with each insn it links back to. */
1266 FOR_EACH_LOG_LINK (links, insn)
1267 if ((next = try_combine (insn, links->insn, NULL,
1268 NULL, &new_direct_jump_p,
1269 last_combined_insn)) != 0)
1271 statistics_counter_event (cfun, "two-insn combine", 1);
1272 goto retry;
1275 /* Try each sequence of three linked insns ending with this one. */
1277 if (max_combine >= 3)
1278 FOR_EACH_LOG_LINK (links, insn)
1280 rtx_insn *link = links->insn;
1282 /* If the linked insn has been replaced by a note, then there
1283 is no point in pursuing this chain any further. */
1284 if (NOTE_P (link))
1285 continue;
1287 FOR_EACH_LOG_LINK (nextlinks, link)
1288 if ((next = try_combine (insn, link, nextlinks->insn,
1289 NULL, &new_direct_jump_p,
1290 last_combined_insn)) != 0)
1292 statistics_counter_event (cfun, "three-insn combine", 1);
1293 goto retry;
1297 /* Try combining an insn with two different insns whose results it
1298 uses. */
1299 if (max_combine >= 3)
1300 FOR_EACH_LOG_LINK (links, insn)
1301 for (nextlinks = links->next; nextlinks;
1302 nextlinks = nextlinks->next)
1303 if ((next = try_combine (insn, links->insn,
1304 nextlinks->insn, NULL,
1305 &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1309 statistics_counter_event (cfun, "three-insn combine", 1);
1310 goto retry;
1313 /* Try four-instruction combinations. */
1314 if (max_combine >= 4)
1315 FOR_EACH_LOG_LINK (links, insn)
1317 struct insn_link *next1;
1318 rtx_insn *link = links->insn;
1320 /* If the linked insn has been replaced by a note, then there
1321 is no point in pursuing this chain any further. */
1322 if (NOTE_P (link))
1323 continue;
1325 FOR_EACH_LOG_LINK (next1, link)
1327 rtx_insn *link1 = next1->insn;
1328 if (NOTE_P (link1))
1329 continue;
1330 /* I0 -> I1 -> I2 -> I3. */
1331 FOR_EACH_LOG_LINK (nextlinks, link1)
1332 if ((next = try_combine (insn, link, link1,
1333 nextlinks->insn,
1334 &new_direct_jump_p,
1335 last_combined_insn)) != 0)
1337 statistics_counter_event (cfun, "four-insn combine", 1);
1338 goto retry;
1340 /* I0, I1 -> I2, I2 -> I3. */
1341 for (nextlinks = next1->next; nextlinks;
1342 nextlinks = nextlinks->next)
1343 if ((next = try_combine (insn, link, link1,
1344 nextlinks->insn,
1345 &new_direct_jump_p,
1346 last_combined_insn)) != 0)
1348 statistics_counter_event (cfun, "four-insn combine", 1);
1349 goto retry;
1353 for (next1 = links->next; next1; next1 = next1->next)
1355 rtx_insn *link1 = next1->insn;
1356 if (NOTE_P (link1))
1357 continue;
1358 /* I0 -> I2; I1, I2 -> I3. */
1359 FOR_EACH_LOG_LINK (nextlinks, link)
1360 if ((next = try_combine (insn, link, link1,
1361 nextlinks->insn,
1362 &new_direct_jump_p,
1363 last_combined_insn)) != 0)
1365 statistics_counter_event (cfun, "four-insn combine", 1);
1366 goto retry;
1368 /* I0 -> I1; I1, I2 -> I3. */
1369 FOR_EACH_LOG_LINK (nextlinks, link1)
1370 if ((next = try_combine (insn, link, link1,
1371 nextlinks->insn,
1372 &new_direct_jump_p,
1373 last_combined_insn)) != 0)
1375 statistics_counter_event (cfun, "four-insn combine", 1);
1376 goto retry;
1381 /* Try this insn with each REG_EQUAL note it links back to. */
1382 FOR_EACH_LOG_LINK (links, insn)
1384 rtx set, note;
1385 rtx_insn *temp = links->insn;
1386 if ((set = single_set (temp)) != 0
1387 && (note = find_reg_equal_equiv_note (temp)) != 0
1388 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1389 && ! side_effects_p (SET_SRC (set))
1390 /* Avoid using a register that may already been marked
1391 dead by an earlier instruction. */
1392 && ! unmentioned_reg_p (note, SET_SRC (set))
1393 && (GET_MODE (note) == VOIDmode
1394 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1395 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1396 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1397 || (GET_MODE (XEXP (SET_DEST (set), 0))
1398 == GET_MODE (note))))))
1400 /* Temporarily replace the set's source with the
1401 contents of the REG_EQUAL note. The insn will
1402 be deleted or recognized by try_combine. */
1403 rtx orig_src = SET_SRC (set);
1404 rtx orig_dest = SET_DEST (set);
1405 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1406 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1407 SET_SRC (set) = note;
1408 i2mod = temp;
1409 i2mod_old_rhs = copy_rtx (orig_src);
1410 i2mod_new_rhs = copy_rtx (note);
1411 next = try_combine (insn, i2mod, NULL, NULL,
1412 &new_direct_jump_p,
1413 last_combined_insn);
1414 i2mod = NULL;
1415 if (next)
1417 statistics_counter_event (cfun, "insn-with-note combine", 1);
1418 goto retry;
1420 SET_SRC (set) = orig_src;
1421 SET_DEST (set) = orig_dest;
1425 if (!NOTE_P (insn))
1426 record_dead_and_set_regs (insn);
1428 retry:
1433 default_rtl_profile ();
1434 clear_bb_flags ();
1435 new_direct_jump_p |= purge_all_dead_edges ();
1436 new_direct_jump_p |= delete_noop_moves ();
1438 /* Clean up. */
1439 obstack_free (&insn_link_obstack, NULL);
1440 free (uid_log_links);
1441 free (uid_insn_cost);
1442 reg_stat.release ();
1445 struct undo *undo, *next;
1446 for (undo = undobuf.frees; undo; undo = next)
1448 next = undo->next;
1449 free (undo);
1451 undobuf.frees = 0;
1454 total_attempts += combine_attempts;
1455 total_merges += combine_merges;
1456 total_extras += combine_extras;
1457 total_successes += combine_successes;
1459 nonzero_sign_valid = 0;
1460 rtl_hooks = general_rtl_hooks;
1462 /* Make recognizer allow volatile MEMs again. */
1463 init_recog ();
1465 return new_direct_jump_p;
1468 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1470 static void
1471 init_reg_last (void)
1473 unsigned int i;
1474 reg_stat_type *p;
1476 FOR_EACH_VEC_ELT (reg_stat, i, p)
1477 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1480 /* Set up any promoted values for incoming argument registers. */
1482 static void
1483 setup_incoming_promotions (rtx_insn *first)
1485 tree arg;
1486 bool strictly_local = false;
1488 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1489 arg = DECL_CHAIN (arg))
1491 rtx x, reg = DECL_INCOMING_RTL (arg);
1492 int uns1, uns3;
1493 machine_mode mode1, mode2, mode3, mode4;
1495 /* Only continue if the incoming argument is in a register. */
1496 if (!REG_P (reg))
1497 continue;
1499 /* Determine, if possible, whether all call sites of the current
1500 function lie within the current compilation unit. (This does
1501 take into account the exporting of a function via taking its
1502 address, and so forth.) */
1503 strictly_local
1504 = cgraph_node::local_info_node (current_function_decl)->local;
1506 /* The mode and signedness of the argument before any promotions happen
1507 (equal to the mode of the pseudo holding it at that stage). */
1508 mode1 = TYPE_MODE (TREE_TYPE (arg));
1509 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1511 /* The mode and signedness of the argument after any source language and
1512 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1513 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1514 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1516 /* The mode and signedness of the argument as it is actually passed,
1517 see assign_parm_setup_reg in function.c. */
1518 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1519 TREE_TYPE (cfun->decl), 0);
1521 /* The mode of the register in which the argument is being passed. */
1522 mode4 = GET_MODE (reg);
1524 /* Eliminate sign extensions in the callee when:
1525 (a) A mode promotion has occurred; */
1526 if (mode1 == mode3)
1527 continue;
1528 /* (b) The mode of the register is the same as the mode of
1529 the argument as it is passed; */
1530 if (mode3 != mode4)
1531 continue;
1532 /* (c) There's no language level extension; */
1533 if (mode1 == mode2)
1535 /* (c.1) All callers are from the current compilation unit. If that's
1536 the case we don't have to rely on an ABI, we only have to know
1537 what we're generating right now, and we know that we will do the
1538 mode1 to mode2 promotion with the given sign. */
1539 else if (!strictly_local)
1540 continue;
1541 /* (c.2) The combination of the two promotions is useful. This is
1542 true when the signs match, or if the first promotion is unsigned.
1543 In the later case, (sign_extend (zero_extend x)) is the same as
1544 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1545 else if (uns1)
1546 uns3 = true;
1547 else if (uns3)
1548 continue;
1550 /* Record that the value was promoted from mode1 to mode3,
1551 so that any sign extension at the head of the current
1552 function may be eliminated. */
1553 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1554 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1555 record_value_for_reg (reg, first, x);
1559 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1560 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1561 because some machines (maybe most) will actually do the sign-extension and
1562 this is the conservative approach.
1564 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1565 kludge. */
1567 static rtx
1568 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1570 scalar_int_mode int_mode;
1571 if (CONST_INT_P (src)
1572 && is_a <scalar_int_mode> (mode, &int_mode)
1573 && GET_MODE_PRECISION (int_mode) < prec
1574 && INTVAL (src) > 0
1575 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1576 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1578 return src;
1581 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1582 and SET. */
1584 static void
1585 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1586 rtx x)
1588 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1589 unsigned HOST_WIDE_INT bits = 0;
1590 rtx reg_equal = NULL, src = SET_SRC (set);
1591 unsigned int num = 0;
1593 if (reg_equal_note)
1594 reg_equal = XEXP (reg_equal_note, 0);
1596 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1598 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1599 if (reg_equal)
1600 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1603 /* Don't call nonzero_bits if it cannot change anything. */
1604 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1606 machine_mode mode = GET_MODE (x);
1607 if (GET_MODE_CLASS (mode) == MODE_INT
1608 && HWI_COMPUTABLE_MODE_P (mode))
1609 mode = nonzero_bits_mode;
1610 bits = nonzero_bits (src, mode);
1611 if (reg_equal && bits)
1612 bits &= nonzero_bits (reg_equal, mode);
1613 rsp->nonzero_bits |= bits;
1616 /* Don't call num_sign_bit_copies if it cannot change anything. */
1617 if (rsp->sign_bit_copies != 1)
1619 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1620 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1622 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1623 if (num == 0 || numeq > num)
1624 num = numeq;
1626 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1627 rsp->sign_bit_copies = num;
1631 /* Called via note_stores. If X is a pseudo that is narrower than
1632 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1634 If we are setting only a portion of X and we can't figure out what
1635 portion, assume all bits will be used since we don't know what will
1636 be happening.
1638 Similarly, set how many bits of X are known to be copies of the sign bit
1639 at all locations in the function. This is the smallest number implied
1640 by any set of X. */
1642 static void
1643 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1645 rtx_insn *insn = (rtx_insn *) data;
1646 scalar_int_mode mode;
1648 if (REG_P (x)
1649 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1650 /* If this register is undefined at the start of the file, we can't
1651 say what its contents were. */
1652 && ! REGNO_REG_SET_P
1653 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1654 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1655 && HWI_COMPUTABLE_MODE_P (mode))
1657 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1659 if (set == 0 || GET_CODE (set) == CLOBBER)
1661 rsp->nonzero_bits = GET_MODE_MASK (mode);
1662 rsp->sign_bit_copies = 1;
1663 return;
1666 /* If this register is being initialized using itself, and the
1667 register is uninitialized in this basic block, and there are
1668 no LOG_LINKS which set the register, then part of the
1669 register is uninitialized. In that case we can't assume
1670 anything about the number of nonzero bits.
1672 ??? We could do better if we checked this in
1673 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1674 could avoid making assumptions about the insn which initially
1675 sets the register, while still using the information in other
1676 insns. We would have to be careful to check every insn
1677 involved in the combination. */
1679 if (insn
1680 && reg_referenced_p (x, PATTERN (insn))
1681 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1682 REGNO (x)))
1684 struct insn_link *link;
1686 FOR_EACH_LOG_LINK (link, insn)
1687 if (dead_or_set_p (link->insn, x))
1688 break;
1689 if (!link)
1691 rsp->nonzero_bits = GET_MODE_MASK (mode);
1692 rsp->sign_bit_copies = 1;
1693 return;
1697 /* If this is a complex assignment, see if we can convert it into a
1698 simple assignment. */
1699 set = expand_field_assignment (set);
1701 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1702 set what we know about X. */
1704 if (SET_DEST (set) == x
1705 || (paradoxical_subreg_p (SET_DEST (set))
1706 && SUBREG_REG (SET_DEST (set)) == x))
1707 update_rsp_from_reg_equal (rsp, insn, set, x);
1708 else
1710 rsp->nonzero_bits = GET_MODE_MASK (mode);
1711 rsp->sign_bit_copies = 1;
1716 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1717 optionally insns that were previously combined into I3 or that will be
1718 combined into the merger of INSN and I3. The order is PRED, PRED2,
1719 INSN, SUCC, SUCC2, I3.
1721 Return 0 if the combination is not allowed for any reason.
1723 If the combination is allowed, *PDEST will be set to the single
1724 destination of INSN and *PSRC to the single source, and this function
1725 will return 1. */
1727 static int
1728 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1729 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1730 rtx *pdest, rtx *psrc)
1732 int i;
1733 const_rtx set = 0;
1734 rtx src, dest;
1735 rtx_insn *p;
1736 rtx link;
1737 bool all_adjacent = true;
1738 int (*is_volatile_p) (const_rtx);
1740 if (succ)
1742 if (succ2)
1744 if (next_active_insn (succ2) != i3)
1745 all_adjacent = false;
1746 if (next_active_insn (succ) != succ2)
1747 all_adjacent = false;
1749 else if (next_active_insn (succ) != i3)
1750 all_adjacent = false;
1751 if (next_active_insn (insn) != succ)
1752 all_adjacent = false;
1754 else if (next_active_insn (insn) != i3)
1755 all_adjacent = false;
1757 /* Can combine only if previous insn is a SET of a REG or a SUBREG,
1758 or a PARALLEL consisting of such a SET and CLOBBERs.
1760 If INSN has CLOBBER parallel parts, ignore them for our processing.
1761 By definition, these happen during the execution of the insn. When it
1762 is merged with another insn, all bets are off. If they are, in fact,
1763 needed and aren't also supplied in I3, they may be added by
1764 recog_for_combine. Otherwise, it won't match.
1766 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1767 note.
1769 Get the source and destination of INSN. If more than one, can't
1770 combine. */
1772 if (GET_CODE (PATTERN (insn)) == SET)
1773 set = PATTERN (insn);
1774 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1775 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1777 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1779 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1781 switch (GET_CODE (elt))
1783 /* This is important to combine floating point insns
1784 for the SH4 port. */
1785 case USE:
1786 /* Combining an isolated USE doesn't make sense.
1787 We depend here on combinable_i3pat to reject them. */
1788 /* The code below this loop only verifies that the inputs of
1789 the SET in INSN do not change. We call reg_set_between_p
1790 to verify that the REG in the USE does not change between
1791 I3 and INSN.
1792 If the USE in INSN was for a pseudo register, the matching
1793 insn pattern will likely match any register; combining this
1794 with any other USE would only be safe if we knew that the
1795 used registers have identical values, or if there was
1796 something to tell them apart, e.g. different modes. For
1797 now, we forgo such complicated tests and simply disallow
1798 combining of USES of pseudo registers with any other USE. */
1799 if (REG_P (XEXP (elt, 0))
1800 && GET_CODE (PATTERN (i3)) == PARALLEL)
1802 rtx i3pat = PATTERN (i3);
1803 int i = XVECLEN (i3pat, 0) - 1;
1804 unsigned int regno = REGNO (XEXP (elt, 0));
1808 rtx i3elt = XVECEXP (i3pat, 0, i);
1810 if (GET_CODE (i3elt) == USE
1811 && REG_P (XEXP (i3elt, 0))
1812 && (REGNO (XEXP (i3elt, 0)) == regno
1813 ? reg_set_between_p (XEXP (elt, 0),
1814 PREV_INSN (insn), i3)
1815 : regno >= FIRST_PSEUDO_REGISTER))
1816 return 0;
1818 while (--i >= 0);
1820 break;
1822 /* We can ignore CLOBBERs. */
1823 case CLOBBER:
1824 break;
1826 case SET:
1827 /* Ignore SETs whose result isn't used but not those that
1828 have side-effects. */
1829 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1830 && insn_nothrow_p (insn)
1831 && !side_effects_p (elt))
1832 break;
1834 /* If we have already found a SET, this is a second one and
1835 so we cannot combine with this insn. */
1836 if (set)
1837 return 0;
1839 set = elt;
1840 break;
1842 default:
1843 /* Anything else means we can't combine. */
1844 return 0;
1848 if (set == 0
1849 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1850 so don't do anything with it. */
1851 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1852 return 0;
1854 else
1855 return 0;
1857 if (set == 0)
1858 return 0;
1860 /* The simplification in expand_field_assignment may call back to
1861 get_last_value, so set safe guard here. */
1862 subst_low_luid = DF_INSN_LUID (insn);
1864 set = expand_field_assignment (set);
1865 src = SET_SRC (set), dest = SET_DEST (set);
1867 /* Do not eliminate user-specified register if it is in an
1868 asm input because we may break the register asm usage defined
1869 in GCC manual if allow to do so.
1870 Be aware that this may cover more cases than we expect but this
1871 should be harmless. */
1872 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1873 && extract_asm_operands (PATTERN (i3)))
1874 return 0;
1876 /* Don't eliminate a store in the stack pointer. */
1877 if (dest == stack_pointer_rtx
1878 /* Don't combine with an insn that sets a register to itself if it has
1879 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1880 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1881 /* Can't merge an ASM_OPERANDS. */
1882 || GET_CODE (src) == ASM_OPERANDS
1883 /* Can't merge a function call. */
1884 || GET_CODE (src) == CALL
1885 /* Don't eliminate a function call argument. */
1886 || (CALL_P (i3)
1887 && (find_reg_fusage (i3, USE, dest)
1888 || (REG_P (dest)
1889 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1890 && global_regs[REGNO (dest)])))
1891 /* Don't substitute into an incremented register. */
1892 || FIND_REG_INC_NOTE (i3, dest)
1893 || (succ && FIND_REG_INC_NOTE (succ, dest))
1894 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1895 /* Don't substitute into a non-local goto, this confuses CFG. */
1896 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1897 /* Make sure that DEST is not used after INSN but before SUCC, or
1898 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1899 || (!all_adjacent
1900 && ((succ2
1901 && (reg_used_between_p (dest, succ2, i3)
1902 || reg_used_between_p (dest, succ, succ2)))
1903 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1904 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
1905 || (succ
1906 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1907 that case SUCC is not in the insn stream, so use SUCC2
1908 instead for this test. */
1909 && reg_used_between_p (dest, insn,
1910 succ2
1911 && INSN_UID (succ) == INSN_UID (succ2)
1912 ? succ2 : succ))))
1913 /* Make sure that the value that is to be substituted for the register
1914 does not use any registers whose values alter in between. However,
1915 If the insns are adjacent, a use can't cross a set even though we
1916 think it might (this can happen for a sequence of insns each setting
1917 the same destination; last_set of that register might point to
1918 a NOTE). If INSN has a REG_EQUIV note, the register is always
1919 equivalent to the memory so the substitution is valid even if there
1920 are intervening stores. Also, don't move a volatile asm or
1921 UNSPEC_VOLATILE across any other insns. */
1922 || (! all_adjacent
1923 && (((!MEM_P (src)
1924 || ! find_reg_note (insn, REG_EQUIV, src))
1925 && modified_between_p (src, insn, i3))
1926 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1927 || GET_CODE (src) == UNSPEC_VOLATILE))
1928 /* Don't combine across a CALL_INSN, because that would possibly
1929 change whether the life span of some REGs crosses calls or not,
1930 and it is a pain to update that information.
1931 Exception: if source is a constant, moving it later can't hurt.
1932 Accept that as a special case. */
1933 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1934 return 0;
1936 /* DEST must be a REG. */
1937 if (REG_P (dest))
1939 /* If register alignment is being enforced for multi-word items in all
1940 cases except for parameters, it is possible to have a register copy
1941 insn referencing a hard register that is not allowed to contain the
1942 mode being copied and which would not be valid as an operand of most
1943 insns. Eliminate this problem by not combining with such an insn.
1945 Also, on some machines we don't want to extend the life of a hard
1946 register. */
1948 if (REG_P (src)
1949 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1950 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
1951 /* Don't extend the life of a hard register unless it is
1952 user variable (if we have few registers) or it can't
1953 fit into the desired register (meaning something special
1954 is going on).
1955 Also avoid substituting a return register into I3, because
1956 reload can't handle a conflict with constraints of other
1957 inputs. */
1958 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1959 && !targetm.hard_regno_mode_ok (REGNO (src),
1960 GET_MODE (src)))))
1961 return 0;
1963 else
1964 return 0;
1967 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1968 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1969 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1971 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1973 /* If the clobber represents an earlyclobber operand, we must not
1974 substitute an expression containing the clobbered register.
1975 As we do not analyze the constraint strings here, we have to
1976 make the conservative assumption. However, if the register is
1977 a fixed hard reg, the clobber cannot represent any operand;
1978 we leave it up to the machine description to either accept or
1979 reject use-and-clobber patterns. */
1980 if (!REG_P (reg)
1981 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1982 || !fixed_regs[REGNO (reg)])
1983 if (reg_overlap_mentioned_p (reg, src))
1984 return 0;
1987 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1988 or not), reject, unless nothing volatile comes between it and I3 */
1990 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1992 /* Make sure neither succ nor succ2 contains a volatile reference. */
1993 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1994 return 0;
1995 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1996 return 0;
1997 /* We'll check insns between INSN and I3 below. */
2000 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2001 to be an explicit register variable, and was chosen for a reason. */
2003 if (GET_CODE (src) == ASM_OPERANDS
2004 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2005 return 0;
2007 /* If INSN contains volatile references (specifically volatile MEMs),
2008 we cannot combine across any other volatile references.
2009 Even if INSN doesn't contain volatile references, any intervening
2010 volatile insn might affect machine state. */
2012 is_volatile_p = volatile_refs_p (PATTERN (insn))
2013 ? volatile_refs_p
2014 : volatile_insn_p;
2016 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2017 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2018 return 0;
2020 /* If INSN contains an autoincrement or autodecrement, make sure that
2021 register is not used between there and I3, and not already used in
2022 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2023 Also insist that I3 not be a jump if using LRA; if it were one
2024 and the incremented register were spilled, we would lose.
2025 Reload handles this correctly. */
2027 if (AUTO_INC_DEC)
2028 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2029 if (REG_NOTE_KIND (link) == REG_INC
2030 && ((JUMP_P (i3) && targetm.lra_p ())
2031 || reg_used_between_p (XEXP (link, 0), insn, i3)
2032 || (pred != NULL_RTX
2033 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2034 || (pred2 != NULL_RTX
2035 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2036 || (succ != NULL_RTX
2037 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2038 || (succ2 != NULL_RTX
2039 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2040 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2041 return 0;
2043 /* If we get here, we have passed all the tests and the combination is
2044 to be allowed. */
2046 *pdest = dest;
2047 *psrc = src;
2049 return 1;
2052 /* LOC is the location within I3 that contains its pattern or the component
2053 of a PARALLEL of the pattern. We validate that it is valid for combining.
2055 One problem is if I3 modifies its output, as opposed to replacing it
2056 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2057 doing so would produce an insn that is not equivalent to the original insns.
2059 Consider:
2061 (set (reg:DI 101) (reg:DI 100))
2062 (set (subreg:SI (reg:DI 101) 0) <foo>)
2064 This is NOT equivalent to:
2066 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2067 (set (reg:DI 101) (reg:DI 100))])
2069 Not only does this modify 100 (in which case it might still be valid
2070 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2072 We can also run into a problem if I2 sets a register that I1
2073 uses and I1 gets directly substituted into I3 (not via I2). In that
2074 case, we would be getting the wrong value of I2DEST into I3, so we
2075 must reject the combination. This case occurs when I2 and I1 both
2076 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2077 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2078 of a SET must prevent combination from occurring. The same situation
2079 can occur for I0, in which case I0_NOT_IN_SRC is set.
2081 Before doing the above check, we first try to expand a field assignment
2082 into a set of logical operations.
2084 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2085 we place a register that is both set and used within I3. If more than one
2086 such register is detected, we fail.
2088 Return 1 if the combination is valid, zero otherwise. */
2090 static int
2091 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2092 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2094 rtx x = *loc;
2096 if (GET_CODE (x) == SET)
2098 rtx set = x ;
2099 rtx dest = SET_DEST (set);
2100 rtx src = SET_SRC (set);
2101 rtx inner_dest = dest;
2102 rtx subdest;
2104 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2105 || GET_CODE (inner_dest) == SUBREG
2106 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2107 inner_dest = XEXP (inner_dest, 0);
2109 /* Check for the case where I3 modifies its output, as discussed
2110 above. We don't want to prevent pseudos from being combined
2111 into the address of a MEM, so only prevent the combination if
2112 i1 or i2 set the same MEM. */
2113 if ((inner_dest != dest &&
2114 (!MEM_P (inner_dest)
2115 || rtx_equal_p (i2dest, inner_dest)
2116 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2117 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2118 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2119 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2120 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2122 /* This is the same test done in can_combine_p except we can't test
2123 all_adjacent; we don't have to, since this instruction will stay
2124 in place, thus we are not considering increasing the lifetime of
2125 INNER_DEST.
2127 Also, if this insn sets a function argument, combining it with
2128 something that might need a spill could clobber a previous
2129 function argument; the all_adjacent test in can_combine_p also
2130 checks this; here, we do a more specific test for this case. */
2132 || (REG_P (inner_dest)
2133 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2134 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2135 GET_MODE (inner_dest)))
2136 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2137 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2138 return 0;
2140 /* If DEST is used in I3, it is being killed in this insn, so
2141 record that for later. We have to consider paradoxical
2142 subregs here, since they kill the whole register, but we
2143 ignore partial subregs, STRICT_LOW_PART, etc.
2144 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2145 STACK_POINTER_REGNUM, since these are always considered to be
2146 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2147 subdest = dest;
2148 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2149 subdest = SUBREG_REG (subdest);
2150 if (pi3dest_killed
2151 && REG_P (subdest)
2152 && reg_referenced_p (subdest, PATTERN (i3))
2153 && REGNO (subdest) != FRAME_POINTER_REGNUM
2154 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2155 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2156 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2157 || (REGNO (subdest) != ARG_POINTER_REGNUM
2158 || ! fixed_regs [REGNO (subdest)]))
2159 && REGNO (subdest) != STACK_POINTER_REGNUM)
2161 if (*pi3dest_killed)
2162 return 0;
2164 *pi3dest_killed = subdest;
2168 else if (GET_CODE (x) == PARALLEL)
2170 int i;
2172 for (i = 0; i < XVECLEN (x, 0); i++)
2173 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2174 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2175 return 0;
2178 return 1;
2181 /* Return 1 if X is an arithmetic expression that contains a multiplication
2182 and division. We don't count multiplications by powers of two here. */
2184 static int
2185 contains_muldiv (rtx x)
2187 switch (GET_CODE (x))
2189 case MOD: case DIV: case UMOD: case UDIV:
2190 return 1;
2192 case MULT:
2193 return ! (CONST_INT_P (XEXP (x, 1))
2194 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2195 default:
2196 if (BINARY_P (x))
2197 return contains_muldiv (XEXP (x, 0))
2198 || contains_muldiv (XEXP (x, 1));
2200 if (UNARY_P (x))
2201 return contains_muldiv (XEXP (x, 0));
2203 return 0;
2207 /* Determine whether INSN can be used in a combination. Return nonzero if
2208 not. This is used in try_combine to detect early some cases where we
2209 can't perform combinations. */
2211 static int
2212 cant_combine_insn_p (rtx_insn *insn)
2214 rtx set;
2215 rtx src, dest;
2217 /* If this isn't really an insn, we can't do anything.
2218 This can occur when flow deletes an insn that it has merged into an
2219 auto-increment address. */
2220 if (!NONDEBUG_INSN_P (insn))
2221 return 1;
2223 /* Never combine loads and stores involving hard regs that are likely
2224 to be spilled. The register allocator can usually handle such
2225 reg-reg moves by tying. If we allow the combiner to make
2226 substitutions of likely-spilled regs, reload might die.
2227 As an exception, we allow combinations involving fixed regs; these are
2228 not available to the register allocator so there's no risk involved. */
2230 set = single_set (insn);
2231 if (! set)
2232 return 0;
2233 src = SET_SRC (set);
2234 dest = SET_DEST (set);
2235 if (GET_CODE (src) == SUBREG)
2236 src = SUBREG_REG (src);
2237 if (GET_CODE (dest) == SUBREG)
2238 dest = SUBREG_REG (dest);
2239 if (REG_P (src) && REG_P (dest)
2240 && ((HARD_REGISTER_P (src)
2241 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2242 #ifdef LEAF_REGISTERS
2243 && ! LEAF_REGISTERS [REGNO (src)])
2244 #else
2246 #endif
2247 || (HARD_REGISTER_P (dest)
2248 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2249 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2250 return 1;
2252 return 0;
2255 struct likely_spilled_retval_info
2257 unsigned regno, nregs;
2258 unsigned mask;
2261 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2262 hard registers that are known to be written to / clobbered in full. */
2263 static void
2264 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2266 struct likely_spilled_retval_info *const info =
2267 (struct likely_spilled_retval_info *) data;
2268 unsigned regno, nregs;
2269 unsigned new_mask;
2271 if (!REG_P (XEXP (set, 0)))
2272 return;
2273 regno = REGNO (x);
2274 if (regno >= info->regno + info->nregs)
2275 return;
2276 nregs = REG_NREGS (x);
2277 if (regno + nregs <= info->regno)
2278 return;
2279 new_mask = (2U << (nregs - 1)) - 1;
2280 if (regno < info->regno)
2281 new_mask >>= info->regno - regno;
2282 else
2283 new_mask <<= regno - info->regno;
2284 info->mask &= ~new_mask;
2287 /* Return nonzero iff part of the return value is live during INSN, and
2288 it is likely spilled. This can happen when more than one insn is needed
2289 to copy the return value, e.g. when we consider to combine into the
2290 second copy insn for a complex value. */
2292 static int
2293 likely_spilled_retval_p (rtx_insn *insn)
2295 rtx_insn *use = BB_END (this_basic_block);
2296 rtx reg;
2297 rtx_insn *p;
2298 unsigned regno, nregs;
2299 /* We assume here that no machine mode needs more than
2300 32 hard registers when the value overlaps with a register
2301 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2302 unsigned mask;
2303 struct likely_spilled_retval_info info;
2305 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2306 return 0;
2307 reg = XEXP (PATTERN (use), 0);
2308 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2309 return 0;
2310 regno = REGNO (reg);
2311 nregs = REG_NREGS (reg);
2312 if (nregs == 1)
2313 return 0;
2314 mask = (2U << (nregs - 1)) - 1;
2316 /* Disregard parts of the return value that are set later. */
2317 info.regno = regno;
2318 info.nregs = nregs;
2319 info.mask = mask;
2320 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2321 if (INSN_P (p))
2322 note_stores (p, likely_spilled_retval_1, &info);
2323 mask = info.mask;
2325 /* Check if any of the (probably) live return value registers is
2326 likely spilled. */
2327 nregs --;
2330 if ((mask & 1 << nregs)
2331 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2332 return 1;
2333 } while (nregs--);
2334 return 0;
2337 /* Adjust INSN after we made a change to its destination.
2339 Changing the destination can invalidate notes that say something about
2340 the results of the insn and a LOG_LINK pointing to the insn. */
2342 static void
2343 adjust_for_new_dest (rtx_insn *insn)
2345 /* For notes, be conservative and simply remove them. */
2346 remove_reg_equal_equiv_notes (insn, true);
2348 /* The new insn will have a destination that was previously the destination
2349 of an insn just above it. Call distribute_links to make a LOG_LINK from
2350 the next use of that destination. */
2352 rtx set = single_set (insn);
2353 gcc_assert (set);
2355 rtx reg = SET_DEST (set);
2357 while (GET_CODE (reg) == ZERO_EXTRACT
2358 || GET_CODE (reg) == STRICT_LOW_PART
2359 || GET_CODE (reg) == SUBREG)
2360 reg = XEXP (reg, 0);
2361 gcc_assert (REG_P (reg));
2363 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2365 df_insn_rescan (insn);
2368 /* Return TRUE if combine can reuse reg X in mode MODE.
2369 ADDED_SETS is nonzero if the original set is still required. */
2370 static bool
2371 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2373 unsigned int regno;
2375 if (!REG_P (x))
2376 return false;
2378 /* Don't change between modes with different underlying register sizes,
2379 since this could lead to invalid subregs. */
2380 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2381 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2382 return false;
2384 regno = REGNO (x);
2385 /* Allow hard registers if the new mode is legal, and occupies no more
2386 registers than the old mode. */
2387 if (regno < FIRST_PSEUDO_REGISTER)
2388 return (targetm.hard_regno_mode_ok (regno, mode)
2389 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2391 /* Or a pseudo that is only used once. */
2392 return (regno < reg_n_sets_max
2393 && REG_N_SETS (regno) == 1
2394 && !added_sets
2395 && !REG_USERVAR_P (x));
2399 /* Check whether X, the destination of a set, refers to part of
2400 the register specified by REG. */
2402 static bool
2403 reg_subword_p (rtx x, rtx reg)
2405 /* Check that reg is an integer mode register. */
2406 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2407 return false;
2409 if (GET_CODE (x) == STRICT_LOW_PART
2410 || GET_CODE (x) == ZERO_EXTRACT)
2411 x = XEXP (x, 0);
2413 return GET_CODE (x) == SUBREG
2414 && SUBREG_REG (x) == reg
2415 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2418 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2419 by an arbitrary number of CLOBBERs. */
2420 static bool
2421 is_parallel_of_n_reg_sets (rtx pat, int n)
2423 if (GET_CODE (pat) != PARALLEL)
2424 return false;
2426 int len = XVECLEN (pat, 0);
2427 if (len < n)
2428 return false;
2430 int i;
2431 for (i = 0; i < n; i++)
2432 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2433 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2434 return false;
2435 for ( ; i < len; i++)
2436 switch (GET_CODE (XVECEXP (pat, 0, i)))
2438 case CLOBBER:
2439 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2440 return false;
2441 break;
2442 default:
2443 return false;
2445 return true;
2448 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2449 CLOBBERs), can be split into individual SETs in that order, without
2450 changing semantics. */
2451 static bool
2452 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2454 if (!insn_nothrow_p (insn))
2455 return false;
2457 rtx pat = PATTERN (insn);
2459 int i, j;
2460 for (i = 0; i < n; i++)
2462 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2463 return false;
2465 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2467 for (j = i + 1; j < n; j++)
2468 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2469 return false;
2472 return true;
2475 /* Return whether X is just a single_set, with the source
2476 a general_operand. */
2477 static bool
2478 is_just_move (rtx_insn *x)
2480 rtx set = single_set (x);
2481 if (!set)
2482 return false;
2484 return general_operand (SET_SRC (set), VOIDmode);
2487 /* Callback function to count autoincs. */
2489 static int
2490 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2492 (*((int *) arg))++;
2494 return 0;
2497 /* Try to combine the insns I0, I1 and I2 into I3.
2498 Here I0, I1 and I2 appear earlier than I3.
2499 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2502 If we are combining more than two insns and the resulting insn is not
2503 recognized, try splitting it into two insns. If that happens, I2 and I3
2504 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2505 Otherwise, I0, I1 and I2 are pseudo-deleted.
2507 Return 0 if the combination does not work. Then nothing is changed.
2508 If we did the combination, return the insn at which combine should
2509 resume scanning.
2511 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2512 new direct jump instruction.
2514 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2515 been I3 passed to an earlier try_combine within the same basic
2516 block. */
2518 static rtx_insn *
2519 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2520 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2522 /* New patterns for I3 and I2, respectively. */
2523 rtx newpat, newi2pat = 0;
2524 rtvec newpat_vec_with_clobbers = 0;
2525 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2526 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2527 dead. */
2528 int added_sets_0, added_sets_1, added_sets_2;
2529 /* Total number of SETs to put into I3. */
2530 int total_sets;
2531 /* Nonzero if I2's or I1's body now appears in I3. */
2532 int i2_is_used = 0, i1_is_used = 0;
2533 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2534 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2535 /* Contains I3 if the destination of I3 is used in its source, which means
2536 that the old life of I3 is being killed. If that usage is placed into
2537 I2 and not in I3, a REG_DEAD note must be made. */
2538 rtx i3dest_killed = 0;
2539 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2540 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2541 /* Copy of SET_SRC of I1 and I0, if needed. */
2542 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2543 /* Set if I2DEST was reused as a scratch register. */
2544 bool i2scratch = false;
2545 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2546 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2547 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2548 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2549 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2550 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2551 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2552 /* Notes that must be added to REG_NOTES in I3 and I2. */
2553 rtx new_i3_notes, new_i2_notes;
2554 /* Notes that we substituted I3 into I2 instead of the normal case. */
2555 int i3_subst_into_i2 = 0;
2556 /* Notes that I1, I2 or I3 is a MULT operation. */
2557 int have_mult = 0;
2558 int swap_i2i3 = 0;
2559 int split_i2i3 = 0;
2560 int changed_i3_dest = 0;
2561 bool i2_was_move = false, i3_was_move = false;
2562 int n_auto_inc = 0;
2564 int maxreg;
2565 rtx_insn *temp_insn;
2566 rtx temp_expr;
2567 struct insn_link *link;
2568 rtx other_pat = 0;
2569 rtx new_other_notes;
2570 int i;
2571 scalar_int_mode dest_mode, temp_mode;
2573 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2574 never be). */
2575 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2576 return 0;
2578 /* Only try four-insn combinations when there's high likelihood of
2579 success. Look for simple insns, such as loads of constants or
2580 binary operations involving a constant. */
2581 if (i0)
2583 int i;
2584 int ngood = 0;
2585 int nshift = 0;
2586 rtx set0, set3;
2588 if (!flag_expensive_optimizations)
2589 return 0;
2591 for (i = 0; i < 4; i++)
2593 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2594 rtx set = single_set (insn);
2595 rtx src;
2596 if (!set)
2597 continue;
2598 src = SET_SRC (set);
2599 if (CONSTANT_P (src))
2601 ngood += 2;
2602 break;
2604 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2605 ngood++;
2606 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2607 || GET_CODE (src) == LSHIFTRT)
2608 nshift++;
2611 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2612 are likely manipulating its value. Ideally we'll be able to combine
2613 all four insns into a bitfield insertion of some kind.
2615 Note the source in I0 might be inside a sign/zero extension and the
2616 memory modes in I0 and I3 might be different. So extract the address
2617 from the destination of I3 and search for it in the source of I0.
2619 In the event that there's a match but the source/dest do not actually
2620 refer to the same memory, the worst that happens is we try some
2621 combinations that we wouldn't have otherwise. */
2622 if ((set0 = single_set (i0))
2623 /* Ensure the source of SET0 is a MEM, possibly buried inside
2624 an extension. */
2625 && (GET_CODE (SET_SRC (set0)) == MEM
2626 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2627 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2628 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2629 && (set3 = single_set (i3))
2630 /* Ensure the destination of SET3 is a MEM. */
2631 && GET_CODE (SET_DEST (set3)) == MEM
2632 /* Would it be better to extract the base address for the MEM
2633 in SET3 and look for that? I don't have cases where it matters
2634 but I could envision such cases. */
2635 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2636 ngood += 2;
2638 if (ngood < 2 && nshift < 2)
2639 return 0;
2642 /* Exit early if one of the insns involved can't be used for
2643 combinations. */
2644 if (CALL_P (i2)
2645 || (i1 && CALL_P (i1))
2646 || (i0 && CALL_P (i0))
2647 || cant_combine_insn_p (i3)
2648 || cant_combine_insn_p (i2)
2649 || (i1 && cant_combine_insn_p (i1))
2650 || (i0 && cant_combine_insn_p (i0))
2651 || likely_spilled_retval_p (i3))
2652 return 0;
2654 combine_attempts++;
2655 undobuf.other_insn = 0;
2657 /* Reset the hard register usage information. */
2658 CLEAR_HARD_REG_SET (newpat_used_regs);
2660 if (dump_file && (dump_flags & TDF_DETAILS))
2662 if (i0)
2663 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2664 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2665 else if (i1)
2666 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2667 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2668 else
2669 fprintf (dump_file, "\nTrying %d -> %d:\n",
2670 INSN_UID (i2), INSN_UID (i3));
2672 if (i0)
2673 dump_insn_slim (dump_file, i0);
2674 if (i1)
2675 dump_insn_slim (dump_file, i1);
2676 dump_insn_slim (dump_file, i2);
2677 dump_insn_slim (dump_file, i3);
2680 /* If multiple insns feed into one of I2 or I3, they can be in any
2681 order. To simplify the code below, reorder them in sequence. */
2682 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2683 std::swap (i0, i2);
2684 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2685 std::swap (i0, i1);
2686 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2687 std::swap (i1, i2);
2689 added_links_insn = 0;
2690 added_notes_insn = 0;
2692 /* First check for one important special case that the code below will
2693 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2694 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2695 we may be able to replace that destination with the destination of I3.
2696 This occurs in the common code where we compute both a quotient and
2697 remainder into a structure, in which case we want to do the computation
2698 directly into the structure to avoid register-register copies.
2700 Note that this case handles both multiple sets in I2 and also cases
2701 where I2 has a number of CLOBBERs inside the PARALLEL.
2703 We make very conservative checks below and only try to handle the
2704 most common cases of this. For example, we only handle the case
2705 where I2 and I3 are adjacent to avoid making difficult register
2706 usage tests. */
2708 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2709 && REG_P (SET_SRC (PATTERN (i3)))
2710 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2711 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2712 && GET_CODE (PATTERN (i2)) == PARALLEL
2713 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2714 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2715 below would need to check what is inside (and reg_overlap_mentioned_p
2716 doesn't support those codes anyway). Don't allow those destinations;
2717 the resulting insn isn't likely to be recognized anyway. */
2718 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2719 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2720 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2721 SET_DEST (PATTERN (i3)))
2722 && next_active_insn (i2) == i3)
2724 rtx p2 = PATTERN (i2);
2726 /* Make sure that the destination of I3,
2727 which we are going to substitute into one output of I2,
2728 is not used within another output of I2. We must avoid making this:
2729 (parallel [(set (mem (reg 69)) ...)
2730 (set (reg 69) ...)])
2731 which is not well-defined as to order of actions.
2732 (Besides, reload can't handle output reloads for this.)
2734 The problem can also happen if the dest of I3 is a memory ref,
2735 if another dest in I2 is an indirect memory ref.
2737 Neither can this PARALLEL be an asm. We do not allow combining
2738 that usually (see can_combine_p), so do not here either. */
2739 bool ok = true;
2740 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2742 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2743 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2744 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2745 SET_DEST (XVECEXP (p2, 0, i))))
2746 ok = false;
2747 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2748 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2749 ok = false;
2752 if (ok)
2753 for (i = 0; i < XVECLEN (p2, 0); i++)
2754 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2755 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2757 combine_merges++;
2759 subst_insn = i3;
2760 subst_low_luid = DF_INSN_LUID (i2);
2762 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2763 i2src = SET_SRC (XVECEXP (p2, 0, i));
2764 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2765 i2dest_killed = dead_or_set_p (i2, i2dest);
2767 /* Replace the dest in I2 with our dest and make the resulting
2768 insn the new pattern for I3. Then skip to where we validate
2769 the pattern. Everything was set up above. */
2770 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2771 newpat = p2;
2772 i3_subst_into_i2 = 1;
2773 goto validate_replacement;
2777 /* If I2 is setting a pseudo to a constant and I3 is setting some
2778 sub-part of it to another constant, merge them by making a new
2779 constant. */
2780 if (i1 == 0
2781 && (temp_expr = single_set (i2)) != 0
2782 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2783 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2784 && GET_CODE (PATTERN (i3)) == SET
2785 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2786 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2788 rtx dest = SET_DEST (PATTERN (i3));
2789 rtx temp_dest = SET_DEST (temp_expr);
2790 int offset = -1;
2791 int width = 0;
2793 if (GET_CODE (dest) == ZERO_EXTRACT)
2795 if (CONST_INT_P (XEXP (dest, 1))
2796 && CONST_INT_P (XEXP (dest, 2))
2797 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2798 &dest_mode))
2800 width = INTVAL (XEXP (dest, 1));
2801 offset = INTVAL (XEXP (dest, 2));
2802 dest = XEXP (dest, 0);
2803 if (BITS_BIG_ENDIAN)
2804 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2807 else
2809 if (GET_CODE (dest) == STRICT_LOW_PART)
2810 dest = XEXP (dest, 0);
2811 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2813 width = GET_MODE_PRECISION (dest_mode);
2814 offset = 0;
2818 if (offset >= 0)
2820 /* If this is the low part, we're done. */
2821 if (subreg_lowpart_p (dest))
2823 /* Handle the case where inner is twice the size of outer. */
2824 else if (GET_MODE_PRECISION (temp_mode)
2825 == 2 * GET_MODE_PRECISION (dest_mode))
2826 offset += GET_MODE_PRECISION (dest_mode);
2827 /* Otherwise give up for now. */
2828 else
2829 offset = -1;
2832 if (offset >= 0)
2834 rtx inner = SET_SRC (PATTERN (i3));
2835 rtx outer = SET_SRC (temp_expr);
2837 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2838 rtx_mode_t (inner, dest_mode),
2839 offset, width);
2841 combine_merges++;
2842 subst_insn = i3;
2843 subst_low_luid = DF_INSN_LUID (i2);
2844 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2845 i2dest = temp_dest;
2846 i2dest_killed = dead_or_set_p (i2, i2dest);
2848 /* Replace the source in I2 with the new constant and make the
2849 resulting insn the new pattern for I3. Then skip to where we
2850 validate the pattern. Everything was set up above. */
2851 SUBST (SET_SRC (temp_expr),
2852 immed_wide_int_const (o, temp_mode));
2854 newpat = PATTERN (i2);
2856 /* The dest of I3 has been replaced with the dest of I2. */
2857 changed_i3_dest = 1;
2858 goto validate_replacement;
2862 /* If we have no I1 and I2 looks like:
2863 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2864 (set Y OP)])
2865 make up a dummy I1 that is
2866 (set Y OP)
2867 and change I2 to be
2868 (set (reg:CC X) (compare:CC Y (const_int 0)))
2870 (We can ignore any trailing CLOBBERs.)
2872 This undoes a previous combination and allows us to match a branch-and-
2873 decrement insn. */
2875 if (i1 == 0
2876 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2877 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2878 == MODE_CC)
2879 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2880 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2881 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2882 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2883 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2884 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2886 /* We make I1 with the same INSN_UID as I2. This gives it
2887 the same DF_INSN_LUID for value tracking. Our fake I1 will
2888 never appear in the insn stream so giving it the same INSN_UID
2889 as I2 will not cause a problem. */
2891 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2892 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2893 -1, NULL_RTX);
2894 INSN_UID (i1) = INSN_UID (i2);
2896 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2897 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2898 SET_DEST (PATTERN (i1)));
2899 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2900 SUBST_LINK (LOG_LINKS (i2),
2901 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2904 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2905 make those two SETs separate I1 and I2 insns, and make an I0 that is
2906 the original I1. */
2907 if (i0 == 0
2908 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2909 && can_split_parallel_of_n_reg_sets (i2, 2)
2910 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2911 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
2912 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2913 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2915 /* If there is no I1, there is no I0 either. */
2916 i0 = i1;
2918 /* We make I1 with the same INSN_UID as I2. This gives it
2919 the same DF_INSN_LUID for value tracking. Our fake I1 will
2920 never appear in the insn stream so giving it the same INSN_UID
2921 as I2 will not cause a problem. */
2923 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2924 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2925 -1, NULL_RTX);
2926 INSN_UID (i1) = INSN_UID (i2);
2928 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2931 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
2932 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
2934 if (dump_file && (dump_flags & TDF_DETAILS))
2935 fprintf (dump_file, "Can't combine i2 into i3\n");
2936 undo_all ();
2937 return 0;
2939 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
2941 if (dump_file && (dump_flags & TDF_DETAILS))
2942 fprintf (dump_file, "Can't combine i1 into i3\n");
2943 undo_all ();
2944 return 0;
2946 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
2948 if (dump_file && (dump_flags & TDF_DETAILS))
2949 fprintf (dump_file, "Can't combine i0 into i3\n");
2950 undo_all ();
2951 return 0;
2954 /* Record whether i2 and i3 are trivial moves. */
2955 i2_was_move = is_just_move (i2);
2956 i3_was_move = is_just_move (i3);
2958 /* Record whether I2DEST is used in I2SRC and similarly for the other
2959 cases. Knowing this will help in register status updating below. */
2960 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2961 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2962 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2963 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2964 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2965 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2966 i2dest_killed = dead_or_set_p (i2, i2dest);
2967 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2968 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2970 /* For the earlier insns, determine which of the subsequent ones they
2971 feed. */
2972 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2973 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2974 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2975 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2976 && reg_overlap_mentioned_p (i0dest, i2src))));
2978 /* Ensure that I3's pattern can be the destination of combines. */
2979 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2980 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2981 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2982 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2983 &i3dest_killed))
2985 undo_all ();
2986 return 0;
2989 /* See if any of the insns is a MULT operation. Unless one is, we will
2990 reject a combination that is, since it must be slower. Be conservative
2991 here. */
2992 if (GET_CODE (i2src) == MULT
2993 || (i1 != 0 && GET_CODE (i1src) == MULT)
2994 || (i0 != 0 && GET_CODE (i0src) == MULT)
2995 || (GET_CODE (PATTERN (i3)) == SET
2996 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2997 have_mult = 1;
2999 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3000 We used to do this EXCEPT in one case: I3 has a post-inc in an
3001 output operand. However, that exception can give rise to insns like
3002 mov r3,(r3)+
3003 which is a famous insn on the PDP-11 where the value of r3 used as the
3004 source was model-dependent. Avoid this sort of thing. */
3006 #if 0
3007 if (!(GET_CODE (PATTERN (i3)) == SET
3008 && REG_P (SET_SRC (PATTERN (i3)))
3009 && MEM_P (SET_DEST (PATTERN (i3)))
3010 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3011 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3012 /* It's not the exception. */
3013 #endif
3014 if (AUTO_INC_DEC)
3016 rtx link;
3017 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3018 if (REG_NOTE_KIND (link) == REG_INC
3019 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3020 || (i1 != 0
3021 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3023 undo_all ();
3024 return 0;
3028 /* See if the SETs in I1 or I2 need to be kept around in the merged
3029 instruction: whenever the value set there is still needed past I3.
3030 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3032 For the SET in I1, we have two cases: if I1 and I2 independently feed
3033 into I3, the set in I1 needs to be kept around unless I1DEST dies
3034 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3035 in I1 needs to be kept around unless I1DEST dies or is set in either
3036 I2 or I3. The same considerations apply to I0. */
3038 added_sets_2 = !dead_or_set_p (i3, i2dest);
3040 if (i1)
3041 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3042 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3043 else
3044 added_sets_1 = 0;
3046 if (i0)
3047 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3048 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3049 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3050 && dead_or_set_p (i2, i0dest)));
3051 else
3052 added_sets_0 = 0;
3054 /* We are about to copy insns for the case where they need to be kept
3055 around. Check that they can be copied in the merged instruction. */
3057 if (targetm.cannot_copy_insn_p
3058 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3059 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3060 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3062 undo_all ();
3063 return 0;
3066 /* Count how many auto_inc expressions there were in the original insns;
3067 we need to have the same number in the resulting patterns. */
3069 if (i0)
3070 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3071 if (i1)
3072 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3073 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3074 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3076 /* If the set in I2 needs to be kept around, we must make a copy of
3077 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3078 PATTERN (I2), we are only substituting for the original I1DEST, not into
3079 an already-substituted copy. This also prevents making self-referential
3080 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3081 I2DEST. */
3083 if (added_sets_2)
3085 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3086 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3087 else
3088 i2pat = copy_rtx (PATTERN (i2));
3091 if (added_sets_1)
3093 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3094 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3095 else
3096 i1pat = copy_rtx (PATTERN (i1));
3099 if (added_sets_0)
3101 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3102 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3103 else
3104 i0pat = copy_rtx (PATTERN (i0));
3107 combine_merges++;
3109 /* Substitute in the latest insn for the regs set by the earlier ones. */
3111 maxreg = max_reg_num ();
3113 subst_insn = i3;
3115 /* Many machines have insns that can both perform an
3116 arithmetic operation and set the condition code. These operations will
3117 be represented as a PARALLEL with the first element of the vector
3118 being a COMPARE of an arithmetic operation with the constant zero.
3119 The second element of the vector will set some pseudo to the result
3120 of the same arithmetic operation. If we simplify the COMPARE, we won't
3121 match such a pattern and so will generate an extra insn. Here we test
3122 for this case, where both the comparison and the operation result are
3123 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3124 I2SRC. Later we will make the PARALLEL that contains I2. */
3126 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3127 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3128 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3129 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3131 rtx newpat_dest;
3132 rtx *cc_use_loc = NULL;
3133 rtx_insn *cc_use_insn = NULL;
3134 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3135 machine_mode compare_mode, orig_compare_mode;
3136 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3137 scalar_int_mode mode;
3139 newpat = PATTERN (i3);
3140 newpat_dest = SET_DEST (newpat);
3141 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3143 if (undobuf.other_insn == 0
3144 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3145 &cc_use_insn)))
3147 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3148 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3149 compare_code = simplify_compare_const (compare_code, mode,
3150 op0, &op1);
3151 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3154 /* Do the rest only if op1 is const0_rtx, which may be the
3155 result of simplification. */
3156 if (op1 == const0_rtx)
3158 /* If a single use of the CC is found, prepare to modify it
3159 when SELECT_CC_MODE returns a new CC-class mode, or when
3160 the above simplify_compare_const() returned a new comparison
3161 operator. undobuf.other_insn is assigned the CC use insn
3162 when modifying it. */
3163 if (cc_use_loc)
3165 #ifdef SELECT_CC_MODE
3166 machine_mode new_mode
3167 = SELECT_CC_MODE (compare_code, op0, op1);
3168 if (new_mode != orig_compare_mode
3169 && can_change_dest_mode (SET_DEST (newpat),
3170 added_sets_2, new_mode))
3172 unsigned int regno = REGNO (newpat_dest);
3173 compare_mode = new_mode;
3174 if (regno < FIRST_PSEUDO_REGISTER)
3175 newpat_dest = gen_rtx_REG (compare_mode, regno);
3176 else
3178 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3179 newpat_dest = regno_reg_rtx[regno];
3182 #endif
3183 /* Cases for modifying the CC-using comparison. */
3184 if (compare_code != orig_compare_code
3185 /* ??? Do we need to verify the zero rtx? */
3186 && XEXP (*cc_use_loc, 1) == const0_rtx)
3188 /* Replace cc_use_loc with entire new RTX. */
3189 SUBST (*cc_use_loc,
3190 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3191 newpat_dest, const0_rtx));
3192 undobuf.other_insn = cc_use_insn;
3194 else if (compare_mode != orig_compare_mode)
3196 /* Just replace the CC reg with a new mode. */
3197 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3198 undobuf.other_insn = cc_use_insn;
3202 /* Now we modify the current newpat:
3203 First, SET_DEST(newpat) is updated if the CC mode has been
3204 altered. For targets without SELECT_CC_MODE, this should be
3205 optimized away. */
3206 if (compare_mode != orig_compare_mode)
3207 SUBST (SET_DEST (newpat), newpat_dest);
3208 /* This is always done to propagate i2src into newpat. */
3209 SUBST (SET_SRC (newpat),
3210 gen_rtx_COMPARE (compare_mode, op0, op1));
3211 /* Create new version of i2pat if needed; the below PARALLEL
3212 creation needs this to work correctly. */
3213 if (! rtx_equal_p (i2src, op0))
3214 i2pat = gen_rtx_SET (i2dest, op0);
3215 i2_is_used = 1;
3219 if (i2_is_used == 0)
3221 /* It is possible that the source of I2 or I1 may be performing
3222 an unneeded operation, such as a ZERO_EXTEND of something
3223 that is known to have the high part zero. Handle that case
3224 by letting subst look at the inner insns.
3226 Another way to do this would be to have a function that tries
3227 to simplify a single insn instead of merging two or more
3228 insns. We don't do this because of the potential of infinite
3229 loops and because of the potential extra memory required.
3230 However, doing it the way we are is a bit of a kludge and
3231 doesn't catch all cases.
3233 But only do this if -fexpensive-optimizations since it slows
3234 things down and doesn't usually win.
3236 This is not done in the COMPARE case above because the
3237 unmodified I2PAT is used in the PARALLEL and so a pattern
3238 with a modified I2SRC would not match. */
3240 if (flag_expensive_optimizations)
3242 /* Pass pc_rtx so no substitutions are done, just
3243 simplifications. */
3244 if (i1)
3246 subst_low_luid = DF_INSN_LUID (i1);
3247 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3250 subst_low_luid = DF_INSN_LUID (i2);
3251 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3254 n_occurrences = 0; /* `subst' counts here */
3255 subst_low_luid = DF_INSN_LUID (i2);
3257 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3258 copy of I2SRC each time we substitute it, in order to avoid creating
3259 self-referential RTL when we will be substituting I1SRC for I1DEST
3260 later. Likewise if I0 feeds into I2, either directly or indirectly
3261 through I1, and I0DEST is in I0SRC. */
3262 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3263 (i1_feeds_i2_n && i1dest_in_i1src)
3264 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3265 && i0dest_in_i0src));
3266 substed_i2 = 1;
3268 /* Record whether I2's body now appears within I3's body. */
3269 i2_is_used = n_occurrences;
3272 /* If we already got a failure, don't try to do more. Otherwise, try to
3273 substitute I1 if we have it. */
3275 if (i1 && GET_CODE (newpat) != CLOBBER)
3277 /* Before we can do this substitution, we must redo the test done
3278 above (see detailed comments there) that ensures I1DEST isn't
3279 mentioned in any SETs in NEWPAT that are field assignments. */
3280 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3281 0, 0, 0))
3283 undo_all ();
3284 return 0;
3287 n_occurrences = 0;
3288 subst_low_luid = DF_INSN_LUID (i1);
3290 /* If the following substitution will modify I1SRC, make a copy of it
3291 for the case where it is substituted for I1DEST in I2PAT later. */
3292 if (added_sets_2 && i1_feeds_i2_n)
3293 i1src_copy = copy_rtx (i1src);
3295 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3296 copy of I1SRC each time we substitute it, in order to avoid creating
3297 self-referential RTL when we will be substituting I0SRC for I0DEST
3298 later. */
3299 newpat = subst (newpat, i1dest, i1src, 0, 0,
3300 i0_feeds_i1_n && i0dest_in_i0src);
3301 substed_i1 = 1;
3303 /* Record whether I1's body now appears within I3's body. */
3304 i1_is_used = n_occurrences;
3307 /* Likewise for I0 if we have it. */
3309 if (i0 && GET_CODE (newpat) != CLOBBER)
3311 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3312 0, 0, 0))
3314 undo_all ();
3315 return 0;
3318 /* If the following substitution will modify I0SRC, make a copy of it
3319 for the case where it is substituted for I0DEST in I1PAT later. */
3320 if (added_sets_1 && i0_feeds_i1_n)
3321 i0src_copy = copy_rtx (i0src);
3322 /* And a copy for I0DEST in I2PAT substitution. */
3323 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3324 || (i0_feeds_i2_n)))
3325 i0src_copy2 = copy_rtx (i0src);
3327 n_occurrences = 0;
3328 subst_low_luid = DF_INSN_LUID (i0);
3329 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3330 substed_i0 = 1;
3333 if (n_auto_inc)
3335 int new_n_auto_inc = 0;
3336 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3338 if (n_auto_inc != new_n_auto_inc)
3340 if (dump_file && (dump_flags & TDF_DETAILS))
3341 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3342 undo_all ();
3343 return 0;
3347 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3348 to count all the ways that I2SRC and I1SRC can be used. */
3349 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3350 && i2_is_used + added_sets_2 > 1)
3351 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3352 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3353 > 1))
3354 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3355 && (n_occurrences + added_sets_0
3356 + (added_sets_1 && i0_feeds_i1_n)
3357 + (added_sets_2 && i0_feeds_i2_n)
3358 > 1))
3359 /* Fail if we tried to make a new register. */
3360 || max_reg_num () != maxreg
3361 /* Fail if we couldn't do something and have a CLOBBER. */
3362 || GET_CODE (newpat) == CLOBBER
3363 /* Fail if this new pattern is a MULT and we didn't have one before
3364 at the outer level. */
3365 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3366 && ! have_mult))
3368 undo_all ();
3369 return 0;
3372 /* If the actions of the earlier insns must be kept
3373 in addition to substituting them into the latest one,
3374 we must make a new PARALLEL for the latest insn
3375 to hold additional the SETs. */
3377 if (added_sets_0 || added_sets_1 || added_sets_2)
3379 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3380 combine_extras++;
3382 if (GET_CODE (newpat) == PARALLEL)
3384 rtvec old = XVEC (newpat, 0);
3385 total_sets = XVECLEN (newpat, 0) + extra_sets;
3386 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3387 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3388 sizeof (old->elem[0]) * old->num_elem);
3390 else
3392 rtx old = newpat;
3393 total_sets = 1 + extra_sets;
3394 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3395 XVECEXP (newpat, 0, 0) = old;
3398 if (added_sets_0)
3399 XVECEXP (newpat, 0, --total_sets) = i0pat;
3401 if (added_sets_1)
3403 rtx t = i1pat;
3404 if (i0_feeds_i1_n)
3405 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3407 XVECEXP (newpat, 0, --total_sets) = t;
3409 if (added_sets_2)
3411 rtx t = i2pat;
3412 if (i1_feeds_i2_n)
3413 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3414 i0_feeds_i1_n && i0dest_in_i0src);
3415 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3416 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3418 XVECEXP (newpat, 0, --total_sets) = t;
3422 validate_replacement:
3424 /* Note which hard regs this insn has as inputs. */
3425 mark_used_regs_combine (newpat);
3427 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3428 consider splitting this pattern, we might need these clobbers. */
3429 if (i1 && GET_CODE (newpat) == PARALLEL
3430 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3432 int len = XVECLEN (newpat, 0);
3434 newpat_vec_with_clobbers = rtvec_alloc (len);
3435 for (i = 0; i < len; i++)
3436 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3439 /* We have recognized nothing yet. */
3440 insn_code_number = -1;
3442 /* See if this is a PARALLEL of two SETs where one SET's destination is
3443 a register that is unused and this isn't marked as an instruction that
3444 might trap in an EH region. In that case, we just need the other SET.
3445 We prefer this over the PARALLEL.
3447 This can occur when simplifying a divmod insn. We *must* test for this
3448 case here because the code below that splits two independent SETs doesn't
3449 handle this case correctly when it updates the register status.
3451 It's pointless doing this if we originally had two sets, one from
3452 i3, and one from i2. Combining then splitting the parallel results
3453 in the original i2 again plus an invalid insn (which we delete).
3454 The net effect is only to move instructions around, which makes
3455 debug info less accurate.
3457 If the remaining SET came from I2 its destination should not be used
3458 between I2 and I3. See PR82024. */
3460 if (!(added_sets_2 && i1 == 0)
3461 && is_parallel_of_n_reg_sets (newpat, 2)
3462 && asm_noperands (newpat) < 0)
3464 rtx set0 = XVECEXP (newpat, 0, 0);
3465 rtx set1 = XVECEXP (newpat, 0, 1);
3466 rtx oldpat = newpat;
3468 if (((REG_P (SET_DEST (set1))
3469 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3470 || (GET_CODE (SET_DEST (set1)) == SUBREG
3471 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3472 && insn_nothrow_p (i3)
3473 && !side_effects_p (SET_SRC (set1)))
3475 newpat = set0;
3476 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3479 else if (((REG_P (SET_DEST (set0))
3480 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3481 || (GET_CODE (SET_DEST (set0)) == SUBREG
3482 && find_reg_note (i3, REG_UNUSED,
3483 SUBREG_REG (SET_DEST (set0)))))
3484 && insn_nothrow_p (i3)
3485 && !side_effects_p (SET_SRC (set0)))
3487 rtx dest = SET_DEST (set1);
3488 if (GET_CODE (dest) == SUBREG)
3489 dest = SUBREG_REG (dest);
3490 if (!reg_used_between_p (dest, i2, i3))
3492 newpat = set1;
3493 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3495 if (insn_code_number >= 0)
3496 changed_i3_dest = 1;
3500 if (insn_code_number < 0)
3501 newpat = oldpat;
3504 /* Is the result of combination a valid instruction? */
3505 if (insn_code_number < 0)
3506 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3508 /* If we were combining three insns and the result is a simple SET
3509 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3510 insns. There are two ways to do this. It can be split using a
3511 machine-specific method (like when you have an addition of a large
3512 constant) or by combine in the function find_split_point. */
3514 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3515 && asm_noperands (newpat) < 0)
3517 rtx parallel, *split;
3518 rtx_insn *m_split_insn;
3520 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3521 use I2DEST as a scratch register will help. In the latter case,
3522 convert I2DEST to the mode of the source of NEWPAT if we can. */
3524 m_split_insn = combine_split_insns (newpat, i3);
3526 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3527 inputs of NEWPAT. */
3529 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3530 possible to try that as a scratch reg. This would require adding
3531 more code to make it work though. */
3533 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3535 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3537 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3538 (temporarily, until we are committed to this instruction
3539 combination) does not work: for example, any call to nonzero_bits
3540 on the register (from a splitter in the MD file, for example)
3541 will get the old information, which is invalid.
3543 Since nowadays we can create registers during combine just fine,
3544 we should just create a new one here, not reuse i2dest. */
3546 /* First try to split using the original register as a
3547 scratch register. */
3548 parallel = gen_rtx_PARALLEL (VOIDmode,
3549 gen_rtvec (2, newpat,
3550 gen_rtx_CLOBBER (VOIDmode,
3551 i2dest)));
3552 m_split_insn = combine_split_insns (parallel, i3);
3554 /* If that didn't work, try changing the mode of I2DEST if
3555 we can. */
3556 if (m_split_insn == 0
3557 && new_mode != GET_MODE (i2dest)
3558 && new_mode != VOIDmode
3559 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3561 machine_mode old_mode = GET_MODE (i2dest);
3562 rtx ni2dest;
3564 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3565 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3566 else
3568 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3569 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3572 parallel = (gen_rtx_PARALLEL
3573 (VOIDmode,
3574 gen_rtvec (2, newpat,
3575 gen_rtx_CLOBBER (VOIDmode,
3576 ni2dest))));
3577 m_split_insn = combine_split_insns (parallel, i3);
3579 if (m_split_insn == 0
3580 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3582 struct undo *buf;
3584 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3585 buf = undobuf.undos;
3586 undobuf.undos = buf->next;
3587 buf->next = undobuf.frees;
3588 undobuf.frees = buf;
3592 i2scratch = m_split_insn != 0;
3595 /* If recog_for_combine has discarded clobbers, try to use them
3596 again for the split. */
3597 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3599 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3600 m_split_insn = combine_split_insns (parallel, i3);
3603 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3605 rtx m_split_pat = PATTERN (m_split_insn);
3606 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3607 if (insn_code_number >= 0)
3608 newpat = m_split_pat;
3610 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3611 && (next_nonnote_nondebug_insn (i2) == i3
3612 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3614 rtx i2set, i3set;
3615 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3616 newi2pat = PATTERN (m_split_insn);
3618 i3set = single_set (NEXT_INSN (m_split_insn));
3619 i2set = single_set (m_split_insn);
3621 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3623 /* If I2 or I3 has multiple SETs, we won't know how to track
3624 register status, so don't use these insns. If I2's destination
3625 is used between I2 and I3, we also can't use these insns. */
3627 if (i2_code_number >= 0 && i2set && i3set
3628 && (next_nonnote_nondebug_insn (i2) == i3
3629 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3630 insn_code_number = recog_for_combine (&newi3pat, i3,
3631 &new_i3_notes);
3632 if (insn_code_number >= 0)
3633 newpat = newi3pat;
3635 /* It is possible that both insns now set the destination of I3.
3636 If so, we must show an extra use of it. */
3638 if (insn_code_number >= 0)
3640 rtx new_i3_dest = SET_DEST (i3set);
3641 rtx new_i2_dest = SET_DEST (i2set);
3643 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3644 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3645 || GET_CODE (new_i3_dest) == SUBREG)
3646 new_i3_dest = XEXP (new_i3_dest, 0);
3648 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3649 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3650 || GET_CODE (new_i2_dest) == SUBREG)
3651 new_i2_dest = XEXP (new_i2_dest, 0);
3653 if (REG_P (new_i3_dest)
3654 && REG_P (new_i2_dest)
3655 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3656 && REGNO (new_i2_dest) < reg_n_sets_max)
3657 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3661 /* If we can split it and use I2DEST, go ahead and see if that
3662 helps things be recognized. Verify that none of the registers
3663 are set between I2 and I3. */
3664 if (insn_code_number < 0
3665 && (split = find_split_point (&newpat, i3, false)) != 0
3666 /* We need I2DEST in the proper mode. If it is a hard register
3667 or the only use of a pseudo, we can change its mode.
3668 Make sure we don't change a hard register to have a mode that
3669 isn't valid for it, or change the number of registers. */
3670 && (GET_MODE (*split) == GET_MODE (i2dest)
3671 || GET_MODE (*split) == VOIDmode
3672 || can_change_dest_mode (i2dest, added_sets_2,
3673 GET_MODE (*split)))
3674 && (next_nonnote_nondebug_insn (i2) == i3
3675 || !modified_between_p (*split, i2, i3))
3676 /* We can't overwrite I2DEST if its value is still used by
3677 NEWPAT. */
3678 && ! reg_referenced_p (i2dest, newpat))
3680 rtx newdest = i2dest;
3681 enum rtx_code split_code = GET_CODE (*split);
3682 machine_mode split_mode = GET_MODE (*split);
3683 bool subst_done = false;
3684 newi2pat = NULL_RTX;
3686 i2scratch = true;
3688 /* *SPLIT may be part of I2SRC, so make sure we have the
3689 original expression around for later debug processing.
3690 We should not need I2SRC any more in other cases. */
3691 if (MAY_HAVE_DEBUG_BIND_INSNS)
3692 i2src = copy_rtx (i2src);
3693 else
3694 i2src = NULL;
3696 /* Get NEWDEST as a register in the proper mode. We have already
3697 validated that we can do this. */
3698 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3700 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3701 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3702 else
3704 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3705 newdest = regno_reg_rtx[REGNO (i2dest)];
3709 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3710 an ASHIFT. This can occur if it was inside a PLUS and hence
3711 appeared to be a memory address. This is a kludge. */
3712 if (split_code == MULT
3713 && CONST_INT_P (XEXP (*split, 1))
3714 && INTVAL (XEXP (*split, 1)) > 0
3715 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3717 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3718 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3719 XEXP (*split, 0), i_rtx));
3720 /* Update split_code because we may not have a multiply
3721 anymore. */
3722 split_code = GET_CODE (*split);
3725 /* Similarly for (plus (mult FOO (const_int pow2))). */
3726 if (split_code == PLUS
3727 && GET_CODE (XEXP (*split, 0)) == MULT
3728 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3729 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3730 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3732 rtx nsplit = XEXP (*split, 0);
3733 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3734 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3735 XEXP (nsplit, 0),
3736 i_rtx));
3737 /* Update split_code because we may not have a multiply
3738 anymore. */
3739 split_code = GET_CODE (*split);
3742 #ifdef INSN_SCHEDULING
3743 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3744 be written as a ZERO_EXTEND. */
3745 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3747 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3748 what it really is. */
3749 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3750 == SIGN_EXTEND)
3751 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3752 SUBREG_REG (*split)));
3753 else
3754 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3755 SUBREG_REG (*split)));
3757 #endif
3759 /* Attempt to split binary operators using arithmetic identities. */
3760 if (BINARY_P (SET_SRC (newpat))
3761 && split_mode == GET_MODE (SET_SRC (newpat))
3762 && ! side_effects_p (SET_SRC (newpat)))
3764 rtx setsrc = SET_SRC (newpat);
3765 machine_mode mode = GET_MODE (setsrc);
3766 enum rtx_code code = GET_CODE (setsrc);
3767 rtx src_op0 = XEXP (setsrc, 0);
3768 rtx src_op1 = XEXP (setsrc, 1);
3770 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3771 if (rtx_equal_p (src_op0, src_op1))
3773 newi2pat = gen_rtx_SET (newdest, src_op0);
3774 SUBST (XEXP (setsrc, 0), newdest);
3775 SUBST (XEXP (setsrc, 1), newdest);
3776 subst_done = true;
3778 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3779 else if ((code == PLUS || code == MULT)
3780 && GET_CODE (src_op0) == code
3781 && GET_CODE (XEXP (src_op0, 0)) == code
3782 && (INTEGRAL_MODE_P (mode)
3783 || (FLOAT_MODE_P (mode)
3784 && flag_unsafe_math_optimizations)))
3786 rtx p = XEXP (XEXP (src_op0, 0), 0);
3787 rtx q = XEXP (XEXP (src_op0, 0), 1);
3788 rtx r = XEXP (src_op0, 1);
3789 rtx s = src_op1;
3791 /* Split both "((X op Y) op X) op Y" and
3792 "((X op Y) op Y) op X" as "T op T" where T is
3793 "X op Y". */
3794 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3795 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3797 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3798 SUBST (XEXP (setsrc, 0), newdest);
3799 SUBST (XEXP (setsrc, 1), newdest);
3800 subst_done = true;
3802 /* Split "((X op X) op Y) op Y)" as "T op T" where
3803 T is "X op Y". */
3804 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3806 rtx tmp = simplify_gen_binary (code, mode, p, r);
3807 newi2pat = gen_rtx_SET (newdest, tmp);
3808 SUBST (XEXP (setsrc, 0), newdest);
3809 SUBST (XEXP (setsrc, 1), newdest);
3810 subst_done = true;
3815 if (!subst_done)
3817 newi2pat = gen_rtx_SET (newdest, *split);
3818 SUBST (*split, newdest);
3821 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3823 /* recog_for_combine might have added CLOBBERs to newi2pat.
3824 Make sure NEWPAT does not depend on the clobbered regs. */
3825 if (GET_CODE (newi2pat) == PARALLEL)
3826 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3827 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3829 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3830 if (reg_overlap_mentioned_p (reg, newpat))
3832 undo_all ();
3833 return 0;
3837 /* If the split point was a MULT and we didn't have one before,
3838 don't use one now. */
3839 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3840 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3844 /* Check for a case where we loaded from memory in a narrow mode and
3845 then sign extended it, but we need both registers. In that case,
3846 we have a PARALLEL with both loads from the same memory location.
3847 We can split this into a load from memory followed by a register-register
3848 copy. This saves at least one insn, more if register allocation can
3849 eliminate the copy.
3851 We cannot do this if the destination of the first assignment is a
3852 condition code register. We eliminate this case by making sure
3853 the SET_DEST and SET_SRC have the same mode.
3855 We cannot do this if the destination of the second assignment is
3856 a register that we have already assumed is zero-extended. Similarly
3857 for a SUBREG of such a register. */
3859 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3860 && GET_CODE (newpat) == PARALLEL
3861 && XVECLEN (newpat, 0) == 2
3862 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3863 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3864 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3865 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3866 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3867 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3868 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3869 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
3870 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3871 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3872 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3873 (REG_P (temp_expr)
3874 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3875 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3876 BITS_PER_WORD)
3877 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3878 HOST_BITS_PER_INT)
3879 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3880 != GET_MODE_MASK (word_mode))))
3881 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3882 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3883 (REG_P (temp_expr)
3884 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3885 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3886 BITS_PER_WORD)
3887 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3888 HOST_BITS_PER_INT)
3889 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3890 != GET_MODE_MASK (word_mode)))))
3891 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3892 SET_SRC (XVECEXP (newpat, 0, 1)))
3893 && ! find_reg_note (i3, REG_UNUSED,
3894 SET_DEST (XVECEXP (newpat, 0, 0))))
3896 rtx ni2dest;
3898 newi2pat = XVECEXP (newpat, 0, 0);
3899 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3900 newpat = XVECEXP (newpat, 0, 1);
3901 SUBST (SET_SRC (newpat),
3902 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3903 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3905 if (i2_code_number >= 0)
3906 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3908 if (insn_code_number >= 0)
3909 swap_i2i3 = 1;
3912 /* Similarly, check for a case where we have a PARALLEL of two independent
3913 SETs but we started with three insns. In this case, we can do the sets
3914 as two separate insns. This case occurs when some SET allows two
3915 other insns to combine, but the destination of that SET is still live.
3917 Also do this if we started with two insns and (at least) one of the
3918 resulting sets is a noop; this noop will be deleted later.
3920 Also do this if we started with two insns neither of which was a simple
3921 move. */
3923 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3924 && GET_CODE (newpat) == PARALLEL
3925 && XVECLEN (newpat, 0) == 2
3926 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3927 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3928 && (i1
3929 || set_noop_p (XVECEXP (newpat, 0, 0))
3930 || set_noop_p (XVECEXP (newpat, 0, 1))
3931 || (!i2_was_move && !i3_was_move))
3932 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3933 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3934 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3935 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3936 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3937 XVECEXP (newpat, 0, 0))
3938 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3939 XVECEXP (newpat, 0, 1))
3940 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3941 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3943 rtx set0 = XVECEXP (newpat, 0, 0);
3944 rtx set1 = XVECEXP (newpat, 0, 1);
3946 /* Normally, it doesn't matter which of the two is done first, but
3947 one which uses any regs/memory set in between i2 and i3 can't
3948 be first. The PARALLEL might also have been pre-existing in i3,
3949 so we need to make sure that we won't wrongly hoist a SET to i2
3950 that would conflict with a death note present in there, or would
3951 have its dest modified between i2 and i3. */
3952 if (!modified_between_p (SET_SRC (set1), i2, i3)
3953 && !(REG_P (SET_DEST (set1))
3954 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3955 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3956 && find_reg_note (i2, REG_DEAD,
3957 SUBREG_REG (SET_DEST (set1))))
3958 && !modified_between_p (SET_DEST (set1), i2, i3)
3959 /* If I3 is a jump, ensure that set0 is a jump so that
3960 we do not create invalid RTL. */
3961 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3964 newi2pat = set1;
3965 newpat = set0;
3967 else if (!modified_between_p (SET_SRC (set0), i2, i3)
3968 && !(REG_P (SET_DEST (set0))
3969 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3970 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3971 && find_reg_note (i2, REG_DEAD,
3972 SUBREG_REG (SET_DEST (set0))))
3973 && !modified_between_p (SET_DEST (set0), i2, i3)
3974 /* If I3 is a jump, ensure that set1 is a jump so that
3975 we do not create invalid RTL. */
3976 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3979 newi2pat = set0;
3980 newpat = set1;
3982 else
3984 undo_all ();
3985 return 0;
3988 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3990 if (i2_code_number >= 0)
3992 /* recog_for_combine might have added CLOBBERs to newi2pat.
3993 Make sure NEWPAT does not depend on the clobbered regs. */
3994 if (GET_CODE (newi2pat) == PARALLEL)
3996 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3997 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3999 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4000 if (reg_overlap_mentioned_p (reg, newpat))
4002 undo_all ();
4003 return 0;
4008 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4010 if (insn_code_number >= 0)
4011 split_i2i3 = 1;
4015 /* If it still isn't recognized, fail and change things back the way they
4016 were. */
4017 if ((insn_code_number < 0
4018 /* Is the result a reasonable ASM_OPERANDS? */
4019 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4021 undo_all ();
4022 return 0;
4025 /* If we had to change another insn, make sure it is valid also. */
4026 if (undobuf.other_insn)
4028 CLEAR_HARD_REG_SET (newpat_used_regs);
4030 other_pat = PATTERN (undobuf.other_insn);
4031 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4032 &new_other_notes);
4034 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4036 undo_all ();
4037 return 0;
4041 /* Only allow this combination if insn_cost reports that the
4042 replacement instructions are cheaper than the originals. */
4043 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4045 undo_all ();
4046 return 0;
4049 if (MAY_HAVE_DEBUG_BIND_INSNS)
4051 struct undo *undo;
4053 for (undo = undobuf.undos; undo; undo = undo->next)
4054 if (undo->kind == UNDO_MODE)
4056 rtx reg = *undo->where.r;
4057 machine_mode new_mode = GET_MODE (reg);
4058 machine_mode old_mode = undo->old_contents.m;
4060 /* Temporarily revert mode back. */
4061 adjust_reg_mode (reg, old_mode);
4063 if (reg == i2dest && i2scratch)
4065 /* If we used i2dest as a scratch register with a
4066 different mode, substitute it for the original
4067 i2src while its original mode is temporarily
4068 restored, and then clear i2scratch so that we don't
4069 do it again later. */
4070 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4071 this_basic_block);
4072 i2scratch = false;
4073 /* Put back the new mode. */
4074 adjust_reg_mode (reg, new_mode);
4076 else
4078 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4079 rtx_insn *first, *last;
4081 if (reg == i2dest)
4083 first = i2;
4084 last = last_combined_insn;
4086 else
4088 first = i3;
4089 last = undobuf.other_insn;
4090 gcc_assert (last);
4091 if (DF_INSN_LUID (last)
4092 < DF_INSN_LUID (last_combined_insn))
4093 last = last_combined_insn;
4096 /* We're dealing with a reg that changed mode but not
4097 meaning, so we want to turn it into a subreg for
4098 the new mode. However, because of REG sharing and
4099 because its mode had already changed, we have to do
4100 it in two steps. First, replace any debug uses of
4101 reg, with its original mode temporarily restored,
4102 with this copy we have created; then, replace the
4103 copy with the SUBREG of the original shared reg,
4104 once again changed to the new mode. */
4105 propagate_for_debug (first, last, reg, tempreg,
4106 this_basic_block);
4107 adjust_reg_mode (reg, new_mode);
4108 propagate_for_debug (first, last, tempreg,
4109 lowpart_subreg (old_mode, reg, new_mode),
4110 this_basic_block);
4115 /* If we will be able to accept this, we have made a
4116 change to the destination of I3. This requires us to
4117 do a few adjustments. */
4119 if (changed_i3_dest)
4121 PATTERN (i3) = newpat;
4122 adjust_for_new_dest (i3);
4125 /* We now know that we can do this combination. Merge the insns and
4126 update the status of registers and LOG_LINKS. */
4128 if (undobuf.other_insn)
4130 rtx note, next;
4132 PATTERN (undobuf.other_insn) = other_pat;
4134 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4135 ensure that they are still valid. Then add any non-duplicate
4136 notes added by recog_for_combine. */
4137 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4139 next = XEXP (note, 1);
4141 if ((REG_NOTE_KIND (note) == REG_DEAD
4142 && !reg_referenced_p (XEXP (note, 0),
4143 PATTERN (undobuf.other_insn)))
4144 ||(REG_NOTE_KIND (note) == REG_UNUSED
4145 && !reg_set_p (XEXP (note, 0),
4146 PATTERN (undobuf.other_insn)))
4147 /* Simply drop equal note since it may be no longer valid
4148 for other_insn. It may be possible to record that CC
4149 register is changed and only discard those notes, but
4150 in practice it's unnecessary complication and doesn't
4151 give any meaningful improvement.
4153 See PR78559. */
4154 || REG_NOTE_KIND (note) == REG_EQUAL
4155 || REG_NOTE_KIND (note) == REG_EQUIV)
4156 remove_note (undobuf.other_insn, note);
4159 distribute_notes (new_other_notes, undobuf.other_insn,
4160 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4161 NULL_RTX);
4164 if (swap_i2i3)
4166 /* I3 now uses what used to be its destination and which is now
4167 I2's destination. This requires us to do a few adjustments. */
4168 PATTERN (i3) = newpat;
4169 adjust_for_new_dest (i3);
4172 if (swap_i2i3 || split_i2i3)
4174 /* We might need a LOG_LINK from I3 to I2. But then we used to
4175 have one, so we still will.
4177 However, some later insn might be using I2's dest and have
4178 a LOG_LINK pointing at I3. We should change it to point at
4179 I2 instead. */
4181 /* newi2pat is usually a SET here; however, recog_for_combine might
4182 have added some clobbers. */
4183 rtx x = newi2pat;
4184 if (GET_CODE (x) == PARALLEL)
4185 x = XVECEXP (newi2pat, 0, 0);
4187 if (REG_P (SET_DEST (x))
4188 || (GET_CODE (SET_DEST (x)) == SUBREG
4189 && REG_P (SUBREG_REG (SET_DEST (x)))))
4191 unsigned int regno = reg_or_subregno (SET_DEST (x));
4193 bool done = false;
4194 for (rtx_insn *insn = NEXT_INSN (i3);
4195 !done
4196 && insn
4197 && NONDEBUG_INSN_P (insn)
4198 && BLOCK_FOR_INSN (insn) == this_basic_block;
4199 insn = NEXT_INSN (insn))
4201 struct insn_link *link;
4202 FOR_EACH_LOG_LINK (link, insn)
4203 if (link->insn == i3 && link->regno == regno)
4205 link->insn = i2;
4206 done = true;
4207 break;
4214 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4215 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4216 rtx midnotes = 0;
4217 int from_luid;
4218 /* Compute which registers we expect to eliminate. newi2pat may be setting
4219 either i3dest or i2dest, so we must check it. */
4220 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4221 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4222 || !i2dest_killed
4223 ? 0 : i2dest);
4224 /* For i1, we need to compute both local elimination and global
4225 elimination information with respect to newi2pat because i1dest
4226 may be the same as i3dest, in which case newi2pat may be setting
4227 i1dest. Global information is used when distributing REG_DEAD
4228 note for i2 and i3, in which case it does matter if newi2pat sets
4229 i1dest or not.
4231 Local information is used when distributing REG_DEAD note for i1,
4232 in which case it doesn't matter if newi2pat sets i1dest or not.
4233 See PR62151, if we have four insns combination:
4234 i0: r0 <- i0src
4235 i1: r1 <- i1src (using r0)
4236 REG_DEAD (r0)
4237 i2: r0 <- i2src (using r1)
4238 i3: r3 <- i3src (using r0)
4239 ix: using r0
4240 From i1's point of view, r0 is eliminated, no matter if it is set
4241 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4242 should be discarded.
4244 Note local information only affects cases in forms like "I1->I2->I3",
4245 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4246 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4247 i0dest anyway. */
4248 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4249 || !i1dest_killed
4250 ? 0 : i1dest);
4251 rtx elim_i1 = (local_elim_i1 == 0
4252 || (newi2pat && reg_set_p (i1dest, newi2pat))
4253 ? 0 : i1dest);
4254 /* Same case as i1. */
4255 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4256 ? 0 : i0dest);
4257 rtx elim_i0 = (local_elim_i0 == 0
4258 || (newi2pat && reg_set_p (i0dest, newi2pat))
4259 ? 0 : i0dest);
4261 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4262 clear them. */
4263 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4264 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4265 if (i1)
4266 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4267 if (i0)
4268 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4270 /* Ensure that we do not have something that should not be shared but
4271 occurs multiple times in the new insns. Check this by first
4272 resetting all the `used' flags and then copying anything is shared. */
4274 reset_used_flags (i3notes);
4275 reset_used_flags (i2notes);
4276 reset_used_flags (i1notes);
4277 reset_used_flags (i0notes);
4278 reset_used_flags (newpat);
4279 reset_used_flags (newi2pat);
4280 if (undobuf.other_insn)
4281 reset_used_flags (PATTERN (undobuf.other_insn));
4283 i3notes = copy_rtx_if_shared (i3notes);
4284 i2notes = copy_rtx_if_shared (i2notes);
4285 i1notes = copy_rtx_if_shared (i1notes);
4286 i0notes = copy_rtx_if_shared (i0notes);
4287 newpat = copy_rtx_if_shared (newpat);
4288 newi2pat = copy_rtx_if_shared (newi2pat);
4289 if (undobuf.other_insn)
4290 reset_used_flags (PATTERN (undobuf.other_insn));
4292 INSN_CODE (i3) = insn_code_number;
4293 PATTERN (i3) = newpat;
4295 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4297 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4298 link = XEXP (link, 1))
4300 if (substed_i2)
4302 /* I2SRC must still be meaningful at this point. Some
4303 splitting operations can invalidate I2SRC, but those
4304 operations do not apply to calls. */
4305 gcc_assert (i2src);
4306 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4307 i2dest, i2src);
4309 if (substed_i1)
4310 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4311 i1dest, i1src);
4312 if (substed_i0)
4313 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4314 i0dest, i0src);
4318 if (undobuf.other_insn)
4319 INSN_CODE (undobuf.other_insn) = other_code_number;
4321 /* We had one special case above where I2 had more than one set and
4322 we replaced a destination of one of those sets with the destination
4323 of I3. In that case, we have to update LOG_LINKS of insns later
4324 in this basic block. Note that this (expensive) case is rare.
4326 Also, in this case, we must pretend that all REG_NOTEs for I2
4327 actually came from I3, so that REG_UNUSED notes from I2 will be
4328 properly handled. */
4330 if (i3_subst_into_i2)
4332 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4333 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4334 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4335 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4336 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4337 && ! find_reg_note (i2, REG_UNUSED,
4338 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4339 for (temp_insn = NEXT_INSN (i2);
4340 temp_insn
4341 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4342 || BB_HEAD (this_basic_block) != temp_insn);
4343 temp_insn = NEXT_INSN (temp_insn))
4344 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4345 FOR_EACH_LOG_LINK (link, temp_insn)
4346 if (link->insn == i2)
4347 link->insn = i3;
4349 if (i3notes)
4351 rtx link = i3notes;
4352 while (XEXP (link, 1))
4353 link = XEXP (link, 1);
4354 XEXP (link, 1) = i2notes;
4356 else
4357 i3notes = i2notes;
4358 i2notes = 0;
4361 LOG_LINKS (i3) = NULL;
4362 REG_NOTES (i3) = 0;
4363 LOG_LINKS (i2) = NULL;
4364 REG_NOTES (i2) = 0;
4366 if (newi2pat)
4368 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4369 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4370 this_basic_block);
4371 INSN_CODE (i2) = i2_code_number;
4372 PATTERN (i2) = newi2pat;
4374 else
4376 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4377 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4378 this_basic_block);
4379 SET_INSN_DELETED (i2);
4382 if (i1)
4384 LOG_LINKS (i1) = NULL;
4385 REG_NOTES (i1) = 0;
4386 if (MAY_HAVE_DEBUG_BIND_INSNS)
4387 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4388 this_basic_block);
4389 SET_INSN_DELETED (i1);
4392 if (i0)
4394 LOG_LINKS (i0) = NULL;
4395 REG_NOTES (i0) = 0;
4396 if (MAY_HAVE_DEBUG_BIND_INSNS)
4397 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4398 this_basic_block);
4399 SET_INSN_DELETED (i0);
4402 /* Get death notes for everything that is now used in either I3 or
4403 I2 and used to die in a previous insn. If we built two new
4404 patterns, move from I1 to I2 then I2 to I3 so that we get the
4405 proper movement on registers that I2 modifies. */
4407 if (i0)
4408 from_luid = DF_INSN_LUID (i0);
4409 else if (i1)
4410 from_luid = DF_INSN_LUID (i1);
4411 else
4412 from_luid = DF_INSN_LUID (i2);
4413 if (newi2pat)
4414 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4415 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4417 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4418 if (i3notes)
4419 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4420 elim_i2, elim_i1, elim_i0);
4421 if (i2notes)
4422 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4423 elim_i2, elim_i1, elim_i0);
4424 if (i1notes)
4425 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4426 elim_i2, local_elim_i1, local_elim_i0);
4427 if (i0notes)
4428 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4429 elim_i2, elim_i1, local_elim_i0);
4430 if (midnotes)
4431 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4432 elim_i2, elim_i1, elim_i0);
4434 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4435 know these are REG_UNUSED and want them to go to the desired insn,
4436 so we always pass it as i3. */
4438 if (newi2pat && new_i2_notes)
4439 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4440 NULL_RTX);
4442 if (new_i3_notes)
4443 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4444 NULL_RTX);
4446 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4447 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4448 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4449 in that case, it might delete I2. Similarly for I2 and I1.
4450 Show an additional death due to the REG_DEAD note we make here. If
4451 we discard it in distribute_notes, we will decrement it again. */
4453 if (i3dest_killed)
4455 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4456 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4457 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4458 elim_i1, elim_i0);
4459 else
4460 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4461 elim_i2, elim_i1, elim_i0);
4464 if (i2dest_in_i2src)
4466 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4467 if (newi2pat && reg_set_p (i2dest, newi2pat))
4468 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4469 NULL_RTX, NULL_RTX);
4470 else
4471 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4472 NULL_RTX, NULL_RTX, NULL_RTX);
4475 if (i1dest_in_i1src)
4477 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4478 if (newi2pat && reg_set_p (i1dest, newi2pat))
4479 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4480 NULL_RTX, NULL_RTX);
4481 else
4482 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4483 NULL_RTX, NULL_RTX, NULL_RTX);
4486 if (i0dest_in_i0src)
4488 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4489 if (newi2pat && reg_set_p (i0dest, newi2pat))
4490 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4491 NULL_RTX, NULL_RTX);
4492 else
4493 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4494 NULL_RTX, NULL_RTX, NULL_RTX);
4497 distribute_links (i3links);
4498 distribute_links (i2links);
4499 distribute_links (i1links);
4500 distribute_links (i0links);
4502 if (REG_P (i2dest))
4504 struct insn_link *link;
4505 rtx_insn *i2_insn = 0;
4506 rtx i2_val = 0, set;
4508 /* The insn that used to set this register doesn't exist, and
4509 this life of the register may not exist either. See if one of
4510 I3's links points to an insn that sets I2DEST. If it does,
4511 that is now the last known value for I2DEST. If we don't update
4512 this and I2 set the register to a value that depended on its old
4513 contents, we will get confused. If this insn is used, thing
4514 will be set correctly in combine_instructions. */
4515 FOR_EACH_LOG_LINK (link, i3)
4516 if ((set = single_set (link->insn)) != 0
4517 && rtx_equal_p (i2dest, SET_DEST (set)))
4518 i2_insn = link->insn, i2_val = SET_SRC (set);
4520 record_value_for_reg (i2dest, i2_insn, i2_val);
4522 /* If the reg formerly set in I2 died only once and that was in I3,
4523 zero its use count so it won't make `reload' do any work. */
4524 if (! added_sets_2
4525 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4526 && ! i2dest_in_i2src
4527 && REGNO (i2dest) < reg_n_sets_max)
4528 INC_REG_N_SETS (REGNO (i2dest), -1);
4531 if (i1 && REG_P (i1dest))
4533 struct insn_link *link;
4534 rtx_insn *i1_insn = 0;
4535 rtx i1_val = 0, set;
4537 FOR_EACH_LOG_LINK (link, i3)
4538 if ((set = single_set (link->insn)) != 0
4539 && rtx_equal_p (i1dest, SET_DEST (set)))
4540 i1_insn = link->insn, i1_val = SET_SRC (set);
4542 record_value_for_reg (i1dest, i1_insn, i1_val);
4544 if (! added_sets_1
4545 && ! i1dest_in_i1src
4546 && REGNO (i1dest) < reg_n_sets_max)
4547 INC_REG_N_SETS (REGNO (i1dest), -1);
4550 if (i0 && REG_P (i0dest))
4552 struct insn_link *link;
4553 rtx_insn *i0_insn = 0;
4554 rtx i0_val = 0, set;
4556 FOR_EACH_LOG_LINK (link, i3)
4557 if ((set = single_set (link->insn)) != 0
4558 && rtx_equal_p (i0dest, SET_DEST (set)))
4559 i0_insn = link->insn, i0_val = SET_SRC (set);
4561 record_value_for_reg (i0dest, i0_insn, i0_val);
4563 if (! added_sets_0
4564 && ! i0dest_in_i0src
4565 && REGNO (i0dest) < reg_n_sets_max)
4566 INC_REG_N_SETS (REGNO (i0dest), -1);
4569 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4570 been made to this insn. The order is important, because newi2pat
4571 can affect nonzero_bits of newpat. */
4572 if (newi2pat)
4573 note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4574 note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4577 if (undobuf.other_insn != NULL_RTX)
4579 if (dump_file)
4581 fprintf (dump_file, "modifying other_insn ");
4582 dump_insn_slim (dump_file, undobuf.other_insn);
4584 df_insn_rescan (undobuf.other_insn);
4587 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4589 if (dump_file)
4591 fprintf (dump_file, "modifying insn i0 ");
4592 dump_insn_slim (dump_file, i0);
4594 df_insn_rescan (i0);
4597 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4599 if (dump_file)
4601 fprintf (dump_file, "modifying insn i1 ");
4602 dump_insn_slim (dump_file, i1);
4604 df_insn_rescan (i1);
4607 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4609 if (dump_file)
4611 fprintf (dump_file, "modifying insn i2 ");
4612 dump_insn_slim (dump_file, i2);
4614 df_insn_rescan (i2);
4617 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4619 if (dump_file)
4621 fprintf (dump_file, "modifying insn i3 ");
4622 dump_insn_slim (dump_file, i3);
4624 df_insn_rescan (i3);
4627 /* Set new_direct_jump_p if a new return or simple jump instruction
4628 has been created. Adjust the CFG accordingly. */
4629 if (returnjump_p (i3) || any_uncondjump_p (i3))
4631 *new_direct_jump_p = 1;
4632 mark_jump_label (PATTERN (i3), i3, 0);
4633 update_cfg_for_uncondjump (i3);
4636 if (undobuf.other_insn != NULL_RTX
4637 && (returnjump_p (undobuf.other_insn)
4638 || any_uncondjump_p (undobuf.other_insn)))
4640 *new_direct_jump_p = 1;
4641 update_cfg_for_uncondjump (undobuf.other_insn);
4644 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4645 && XEXP (PATTERN (i3), 0) == const1_rtx)
4647 basic_block bb = BLOCK_FOR_INSN (i3);
4648 gcc_assert (bb);
4649 remove_edge (split_block (bb, i3));
4650 emit_barrier_after_bb (bb);
4651 *new_direct_jump_p = 1;
4654 if (undobuf.other_insn
4655 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4656 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4658 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4659 gcc_assert (bb);
4660 remove_edge (split_block (bb, undobuf.other_insn));
4661 emit_barrier_after_bb (bb);
4662 *new_direct_jump_p = 1;
4665 /* A noop might also need cleaning up of CFG, if it comes from the
4666 simplification of a jump. */
4667 if (JUMP_P (i3)
4668 && GET_CODE (newpat) == SET
4669 && SET_SRC (newpat) == pc_rtx
4670 && SET_DEST (newpat) == pc_rtx)
4672 *new_direct_jump_p = 1;
4673 update_cfg_for_uncondjump (i3);
4676 if (undobuf.other_insn != NULL_RTX
4677 && JUMP_P (undobuf.other_insn)
4678 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4679 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4680 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4682 *new_direct_jump_p = 1;
4683 update_cfg_for_uncondjump (undobuf.other_insn);
4686 combine_successes++;
4687 undo_commit ();
4689 rtx_insn *ret = newi2pat ? i2 : i3;
4690 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4691 ret = added_links_insn;
4692 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4693 ret = added_notes_insn;
4695 return ret;
4698 /* Get a marker for undoing to the current state. */
4700 static void *
4701 get_undo_marker (void)
4703 return undobuf.undos;
4706 /* Undo the modifications up to the marker. */
4708 static void
4709 undo_to_marker (void *marker)
4711 struct undo *undo, *next;
4713 for (undo = undobuf.undos; undo != marker; undo = next)
4715 gcc_assert (undo);
4717 next = undo->next;
4718 switch (undo->kind)
4720 case UNDO_RTX:
4721 *undo->where.r = undo->old_contents.r;
4722 break;
4723 case UNDO_INT:
4724 *undo->where.i = undo->old_contents.i;
4725 break;
4726 case UNDO_MODE:
4727 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4728 break;
4729 case UNDO_LINKS:
4730 *undo->where.l = undo->old_contents.l;
4731 break;
4732 default:
4733 gcc_unreachable ();
4736 undo->next = undobuf.frees;
4737 undobuf.frees = undo;
4740 undobuf.undos = (struct undo *) marker;
4743 /* Undo all the modifications recorded in undobuf. */
4745 static void
4746 undo_all (void)
4748 undo_to_marker (0);
4751 /* We've committed to accepting the changes we made. Move all
4752 of the undos to the free list. */
4754 static void
4755 undo_commit (void)
4757 struct undo *undo, *next;
4759 for (undo = undobuf.undos; undo; undo = next)
4761 next = undo->next;
4762 undo->next = undobuf.frees;
4763 undobuf.frees = undo;
4765 undobuf.undos = 0;
4768 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4769 where we have an arithmetic expression and return that point. LOC will
4770 be inside INSN.
4772 try_combine will call this function to see if an insn can be split into
4773 two insns. */
4775 static rtx *
4776 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4778 rtx x = *loc;
4779 enum rtx_code code = GET_CODE (x);
4780 rtx *split;
4781 unsigned HOST_WIDE_INT len = 0;
4782 HOST_WIDE_INT pos = 0;
4783 int unsignedp = 0;
4784 rtx inner = NULL_RTX;
4785 scalar_int_mode mode, inner_mode;
4787 /* First special-case some codes. */
4788 switch (code)
4790 case SUBREG:
4791 #ifdef INSN_SCHEDULING
4792 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4793 point. */
4794 if (MEM_P (SUBREG_REG (x)))
4795 return loc;
4796 #endif
4797 return find_split_point (&SUBREG_REG (x), insn, false);
4799 case MEM:
4800 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4801 using LO_SUM and HIGH. */
4802 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4803 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4805 machine_mode address_mode = get_address_mode (x);
4807 SUBST (XEXP (x, 0),
4808 gen_rtx_LO_SUM (address_mode,
4809 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4810 XEXP (x, 0)));
4811 return &XEXP (XEXP (x, 0), 0);
4814 /* If we have a PLUS whose second operand is a constant and the
4815 address is not valid, perhaps we can split it up using
4816 the machine-specific way to split large constants. We use
4817 the first pseudo-reg (one of the virtual regs) as a placeholder;
4818 it will not remain in the result. */
4819 if (GET_CODE (XEXP (x, 0)) == PLUS
4820 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4821 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4822 MEM_ADDR_SPACE (x)))
4824 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4825 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4826 subst_insn);
4828 /* This should have produced two insns, each of which sets our
4829 placeholder. If the source of the second is a valid address,
4830 we can put both sources together and make a split point
4831 in the middle. */
4833 if (seq
4834 && NEXT_INSN (seq) != NULL_RTX
4835 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4836 && NONJUMP_INSN_P (seq)
4837 && GET_CODE (PATTERN (seq)) == SET
4838 && SET_DEST (PATTERN (seq)) == reg
4839 && ! reg_mentioned_p (reg,
4840 SET_SRC (PATTERN (seq)))
4841 && NONJUMP_INSN_P (NEXT_INSN (seq))
4842 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4843 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4844 && memory_address_addr_space_p
4845 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4846 MEM_ADDR_SPACE (x)))
4848 rtx src1 = SET_SRC (PATTERN (seq));
4849 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4851 /* Replace the placeholder in SRC2 with SRC1. If we can
4852 find where in SRC2 it was placed, that can become our
4853 split point and we can replace this address with SRC2.
4854 Just try two obvious places. */
4856 src2 = replace_rtx (src2, reg, src1);
4857 split = 0;
4858 if (XEXP (src2, 0) == src1)
4859 split = &XEXP (src2, 0);
4860 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4861 && XEXP (XEXP (src2, 0), 0) == src1)
4862 split = &XEXP (XEXP (src2, 0), 0);
4864 if (split)
4866 SUBST (XEXP (x, 0), src2);
4867 return split;
4871 /* If that didn't work and we have a nested plus, like:
4872 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
4873 is valid address, try to split (REG1 * CONST1). */
4874 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
4875 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
4876 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
4877 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
4878 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
4879 0), 0)))))
4881 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
4882 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
4883 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4884 MEM_ADDR_SPACE (x)))
4886 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
4887 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
4889 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
4891 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
4892 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
4893 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
4894 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
4895 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
4896 0), 1)))))
4898 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
4899 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
4900 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4901 MEM_ADDR_SPACE (x)))
4903 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
4904 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
4906 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
4909 /* If that didn't work, perhaps the first operand is complex and
4910 needs to be computed separately, so make a split point there.
4911 This will occur on machines that just support REG + CONST
4912 and have a constant moved through some previous computation. */
4913 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4914 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4915 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4916 return &XEXP (XEXP (x, 0), 0);
4919 /* If we have a PLUS whose first operand is complex, try computing it
4920 separately by making a split there. */
4921 if (GET_CODE (XEXP (x, 0)) == PLUS
4922 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4923 MEM_ADDR_SPACE (x))
4924 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4925 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4926 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4927 return &XEXP (XEXP (x, 0), 0);
4928 break;
4930 case SET:
4931 /* See if we can split SET_SRC as it stands. */
4932 split = find_split_point (&SET_SRC (x), insn, true);
4933 if (split && split != &SET_SRC (x))
4934 return split;
4936 /* See if we can split SET_DEST as it stands. */
4937 split = find_split_point (&SET_DEST (x), insn, false);
4938 if (split && split != &SET_DEST (x))
4939 return split;
4941 /* See if this is a bitfield assignment with everything constant. If
4942 so, this is an IOR of an AND, so split it into that. */
4943 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4944 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4945 &inner_mode)
4946 && HWI_COMPUTABLE_MODE_P (inner_mode)
4947 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4948 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4949 && CONST_INT_P (SET_SRC (x))
4950 && ((INTVAL (XEXP (SET_DEST (x), 1))
4951 + INTVAL (XEXP (SET_DEST (x), 2)))
4952 <= GET_MODE_PRECISION (inner_mode))
4953 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4955 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4956 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4957 rtx dest = XEXP (SET_DEST (x), 0);
4958 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
4959 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
4960 rtx or_mask;
4962 if (BITS_BIG_ENDIAN)
4963 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
4965 or_mask = gen_int_mode (src << pos, inner_mode);
4966 if (src == mask)
4967 SUBST (SET_SRC (x),
4968 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
4969 else
4971 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
4972 SUBST (SET_SRC (x),
4973 simplify_gen_binary (IOR, inner_mode,
4974 simplify_gen_binary (AND, inner_mode,
4975 dest, negmask),
4976 or_mask));
4979 SUBST (SET_DEST (x), dest);
4981 split = find_split_point (&SET_SRC (x), insn, true);
4982 if (split && split != &SET_SRC (x))
4983 return split;
4986 /* Otherwise, see if this is an operation that we can split into two.
4987 If so, try to split that. */
4988 code = GET_CODE (SET_SRC (x));
4990 switch (code)
4992 case AND:
4993 /* If we are AND'ing with a large constant that is only a single
4994 bit and the result is only being used in a context where we
4995 need to know if it is zero or nonzero, replace it with a bit
4996 extraction. This will avoid the large constant, which might
4997 have taken more than one insn to make. If the constant were
4998 not a valid argument to the AND but took only one insn to make,
4999 this is no worse, but if it took more than one insn, it will
5000 be better. */
5002 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5003 && REG_P (XEXP (SET_SRC (x), 0))
5004 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5005 && REG_P (SET_DEST (x))
5006 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5007 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5008 && XEXP (*split, 0) == SET_DEST (x)
5009 && XEXP (*split, 1) == const0_rtx)
5011 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5012 XEXP (SET_SRC (x), 0),
5013 pos, NULL_RTX, 1, 1, 0, 0);
5014 if (extraction != 0)
5016 SUBST (SET_SRC (x), extraction);
5017 return find_split_point (loc, insn, false);
5020 break;
5022 case NE:
5023 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5024 is known to be on, this can be converted into a NEG of a shift. */
5025 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5026 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5027 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5028 GET_MODE (XEXP (SET_SRC (x),
5029 0))))) >= 1))
5031 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5032 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5033 SUBST (SET_SRC (x),
5034 gen_rtx_NEG (mode,
5035 gen_rtx_LSHIFTRT (mode,
5036 XEXP (SET_SRC (x), 0),
5037 pos_rtx)));
5039 split = find_split_point (&SET_SRC (x), insn, true);
5040 if (split && split != &SET_SRC (x))
5041 return split;
5043 break;
5045 case SIGN_EXTEND:
5046 inner = XEXP (SET_SRC (x), 0);
5048 /* We can't optimize if either mode is a partial integer
5049 mode as we don't know how many bits are significant
5050 in those modes. */
5051 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5052 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5053 break;
5055 pos = 0;
5056 len = GET_MODE_PRECISION (inner_mode);
5057 unsignedp = 0;
5058 break;
5060 case SIGN_EXTRACT:
5061 case ZERO_EXTRACT:
5062 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5063 &inner_mode)
5064 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5065 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5067 inner = XEXP (SET_SRC (x), 0);
5068 len = INTVAL (XEXP (SET_SRC (x), 1));
5069 pos = INTVAL (XEXP (SET_SRC (x), 2));
5071 if (BITS_BIG_ENDIAN)
5072 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5073 unsignedp = (code == ZERO_EXTRACT);
5075 break;
5077 default:
5078 break;
5081 if (len
5082 && known_subrange_p (pos, len,
5083 0, GET_MODE_PRECISION (GET_MODE (inner)))
5084 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5086 /* For unsigned, we have a choice of a shift followed by an
5087 AND or two shifts. Use two shifts for field sizes where the
5088 constant might be too large. We assume here that we can
5089 always at least get 8-bit constants in an AND insn, which is
5090 true for every current RISC. */
5092 if (unsignedp && len <= 8)
5094 unsigned HOST_WIDE_INT mask
5095 = (HOST_WIDE_INT_1U << len) - 1;
5096 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5097 SUBST (SET_SRC (x),
5098 gen_rtx_AND (mode,
5099 gen_rtx_LSHIFTRT
5100 (mode, gen_lowpart (mode, inner), pos_rtx),
5101 gen_int_mode (mask, mode)));
5103 split = find_split_point (&SET_SRC (x), insn, true);
5104 if (split && split != &SET_SRC (x))
5105 return split;
5107 else
5109 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5110 int right_bits = GET_MODE_PRECISION (mode) - len;
5111 SUBST (SET_SRC (x),
5112 gen_rtx_fmt_ee
5113 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5114 gen_rtx_ASHIFT (mode,
5115 gen_lowpart (mode, inner),
5116 gen_int_shift_amount (mode, left_bits)),
5117 gen_int_shift_amount (mode, right_bits)));
5119 split = find_split_point (&SET_SRC (x), insn, true);
5120 if (split && split != &SET_SRC (x))
5121 return split;
5125 /* See if this is a simple operation with a constant as the second
5126 operand. It might be that this constant is out of range and hence
5127 could be used as a split point. */
5128 if (BINARY_P (SET_SRC (x))
5129 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5130 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5131 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5132 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5133 return &XEXP (SET_SRC (x), 1);
5135 /* Finally, see if this is a simple operation with its first operand
5136 not in a register. The operation might require this operand in a
5137 register, so return it as a split point. We can always do this
5138 because if the first operand were another operation, we would have
5139 already found it as a split point. */
5140 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5141 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5142 return &XEXP (SET_SRC (x), 0);
5144 return 0;
5146 case AND:
5147 case IOR:
5148 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5149 it is better to write this as (not (ior A B)) so we can split it.
5150 Similarly for IOR. */
5151 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5153 SUBST (*loc,
5154 gen_rtx_NOT (GET_MODE (x),
5155 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5156 GET_MODE (x),
5157 XEXP (XEXP (x, 0), 0),
5158 XEXP (XEXP (x, 1), 0))));
5159 return find_split_point (loc, insn, set_src);
5162 /* Many RISC machines have a large set of logical insns. If the
5163 second operand is a NOT, put it first so we will try to split the
5164 other operand first. */
5165 if (GET_CODE (XEXP (x, 1)) == NOT)
5167 rtx tem = XEXP (x, 0);
5168 SUBST (XEXP (x, 0), XEXP (x, 1));
5169 SUBST (XEXP (x, 1), tem);
5171 break;
5173 case PLUS:
5174 case MINUS:
5175 /* Canonicalization can produce (minus A (mult B C)), where C is a
5176 constant. It may be better to try splitting (plus (mult B -C) A)
5177 instead if this isn't a multiply by a power of two. */
5178 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5179 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5180 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5182 machine_mode mode = GET_MODE (x);
5183 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5184 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5185 SUBST (*loc, gen_rtx_PLUS (mode,
5186 gen_rtx_MULT (mode,
5187 XEXP (XEXP (x, 1), 0),
5188 gen_int_mode (other_int,
5189 mode)),
5190 XEXP (x, 0)));
5191 return find_split_point (loc, insn, set_src);
5194 /* Split at a multiply-accumulate instruction. However if this is
5195 the SET_SRC, we likely do not have such an instruction and it's
5196 worthless to try this split. */
5197 if (!set_src
5198 && (GET_CODE (XEXP (x, 0)) == MULT
5199 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5200 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5201 return loc;
5203 default:
5204 break;
5207 /* Otherwise, select our actions depending on our rtx class. */
5208 switch (GET_RTX_CLASS (code))
5210 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5211 case RTX_TERNARY:
5212 split = find_split_point (&XEXP (x, 2), insn, false);
5213 if (split)
5214 return split;
5215 /* fall through */
5216 case RTX_BIN_ARITH:
5217 case RTX_COMM_ARITH:
5218 case RTX_COMPARE:
5219 case RTX_COMM_COMPARE:
5220 split = find_split_point (&XEXP (x, 1), insn, false);
5221 if (split)
5222 return split;
5223 /* fall through */
5224 case RTX_UNARY:
5225 /* Some machines have (and (shift ...) ...) insns. If X is not
5226 an AND, but XEXP (X, 0) is, use it as our split point. */
5227 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5228 return &XEXP (x, 0);
5230 split = find_split_point (&XEXP (x, 0), insn, false);
5231 if (split)
5232 return split;
5233 return loc;
5235 default:
5236 /* Otherwise, we don't have a split point. */
5237 return 0;
5241 /* Throughout X, replace FROM with TO, and return the result.
5242 The result is TO if X is FROM;
5243 otherwise the result is X, but its contents may have been modified.
5244 If they were modified, a record was made in undobuf so that
5245 undo_all will (among other things) return X to its original state.
5247 If the number of changes necessary is too much to record to undo,
5248 the excess changes are not made, so the result is invalid.
5249 The changes already made can still be undone.
5250 undobuf.num_undo is incremented for such changes, so by testing that
5251 the caller can tell whether the result is valid.
5253 `n_occurrences' is incremented each time FROM is replaced.
5255 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5257 IN_COND is nonzero if we are at the top level of a condition.
5259 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5260 by copying if `n_occurrences' is nonzero. */
5262 static rtx
5263 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5265 enum rtx_code code = GET_CODE (x);
5266 machine_mode op0_mode = VOIDmode;
5267 const char *fmt;
5268 int len, i;
5269 rtx new_rtx;
5271 /* Two expressions are equal if they are identical copies of a shared
5272 RTX or if they are both registers with the same register number
5273 and mode. */
5275 #define COMBINE_RTX_EQUAL_P(X,Y) \
5276 ((X) == (Y) \
5277 || (REG_P (X) && REG_P (Y) \
5278 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5280 /* Do not substitute into clobbers of regs -- this will never result in
5281 valid RTL. */
5282 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5283 return x;
5285 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5287 n_occurrences++;
5288 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5291 /* If X and FROM are the same register but different modes, they
5292 will not have been seen as equal above. However, the log links code
5293 will make a LOG_LINKS entry for that case. If we do nothing, we
5294 will try to rerecognize our original insn and, when it succeeds,
5295 we will delete the feeding insn, which is incorrect.
5297 So force this insn not to match in this (rare) case. */
5298 if (! in_dest && code == REG && REG_P (from)
5299 && reg_overlap_mentioned_p (x, from))
5300 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5302 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5303 of which may contain things that can be combined. */
5304 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5305 return x;
5307 /* It is possible to have a subexpression appear twice in the insn.
5308 Suppose that FROM is a register that appears within TO.
5309 Then, after that subexpression has been scanned once by `subst',
5310 the second time it is scanned, TO may be found. If we were
5311 to scan TO here, we would find FROM within it and create a
5312 self-referent rtl structure which is completely wrong. */
5313 if (COMBINE_RTX_EQUAL_P (x, to))
5314 return to;
5316 /* Parallel asm_operands need special attention because all of the
5317 inputs are shared across the arms. Furthermore, unsharing the
5318 rtl results in recognition failures. Failure to handle this case
5319 specially can result in circular rtl.
5321 Solve this by doing a normal pass across the first entry of the
5322 parallel, and only processing the SET_DESTs of the subsequent
5323 entries. Ug. */
5325 if (code == PARALLEL
5326 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5327 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5329 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5331 /* If this substitution failed, this whole thing fails. */
5332 if (GET_CODE (new_rtx) == CLOBBER
5333 && XEXP (new_rtx, 0) == const0_rtx)
5334 return new_rtx;
5336 SUBST (XVECEXP (x, 0, 0), new_rtx);
5338 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5340 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5342 if (!REG_P (dest) && GET_CODE (dest) != PC)
5344 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5346 /* If this substitution failed, this whole thing fails. */
5347 if (GET_CODE (new_rtx) == CLOBBER
5348 && XEXP (new_rtx, 0) == const0_rtx)
5349 return new_rtx;
5351 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5355 else
5357 len = GET_RTX_LENGTH (code);
5358 fmt = GET_RTX_FORMAT (code);
5360 /* We don't need to process a SET_DEST that is a register or PC, so
5361 set up to skip this common case. All other cases where we want
5362 to suppress replacing something inside a SET_SRC are handled via
5363 the IN_DEST operand. */
5364 if (code == SET
5365 && (REG_P (SET_DEST (x))
5366 || GET_CODE (SET_DEST (x)) == PC))
5367 fmt = "ie";
5369 /* Trying to simplify the operands of a widening MULT is not likely
5370 to create RTL matching a machine insn. */
5371 if (code == MULT
5372 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5373 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5374 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5375 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5376 && REG_P (XEXP (XEXP (x, 0), 0))
5377 && REG_P (XEXP (XEXP (x, 1), 0))
5378 && from == to)
5379 return x;
5382 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5383 constant. */
5384 if (fmt[0] == 'e')
5385 op0_mode = GET_MODE (XEXP (x, 0));
5387 for (i = 0; i < len; i++)
5389 if (fmt[i] == 'E')
5391 int j;
5392 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5394 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5396 new_rtx = (unique_copy && n_occurrences
5397 ? copy_rtx (to) : to);
5398 n_occurrences++;
5400 else
5402 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5403 unique_copy);
5405 /* If this substitution failed, this whole thing
5406 fails. */
5407 if (GET_CODE (new_rtx) == CLOBBER
5408 && XEXP (new_rtx, 0) == const0_rtx)
5409 return new_rtx;
5412 SUBST (XVECEXP (x, i, j), new_rtx);
5415 else if (fmt[i] == 'e')
5417 /* If this is a register being set, ignore it. */
5418 new_rtx = XEXP (x, i);
5419 if (in_dest
5420 && i == 0
5421 && (((code == SUBREG || code == ZERO_EXTRACT)
5422 && REG_P (new_rtx))
5423 || code == STRICT_LOW_PART))
5426 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5428 /* In general, don't install a subreg involving two
5429 modes not tieable. It can worsen register
5430 allocation, and can even make invalid reload
5431 insns, since the reg inside may need to be copied
5432 from in the outside mode, and that may be invalid
5433 if it is an fp reg copied in integer mode.
5435 We allow an exception to this: It is valid if
5436 it is inside another SUBREG and the mode of that
5437 SUBREG and the mode of the inside of TO is
5438 tieable. */
5440 if (GET_CODE (to) == SUBREG
5441 && !targetm.modes_tieable_p (GET_MODE (to),
5442 GET_MODE (SUBREG_REG (to)))
5443 && ! (code == SUBREG
5444 && (targetm.modes_tieable_p
5445 (GET_MODE (x), GET_MODE (SUBREG_REG (to))))))
5446 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5448 if (code == SUBREG
5449 && REG_P (to)
5450 && REGNO (to) < FIRST_PSEUDO_REGISTER
5451 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5452 SUBREG_BYTE (x),
5453 GET_MODE (x)) < 0)
5454 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5456 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5457 n_occurrences++;
5459 else
5460 /* If we are in a SET_DEST, suppress most cases unless we
5461 have gone inside a MEM, in which case we want to
5462 simplify the address. We assume here that things that
5463 are actually part of the destination have their inner
5464 parts in the first expression. This is true for SUBREG,
5465 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5466 things aside from REG and MEM that should appear in a
5467 SET_DEST. */
5468 new_rtx = subst (XEXP (x, i), from, to,
5469 (((in_dest
5470 && (code == SUBREG || code == STRICT_LOW_PART
5471 || code == ZERO_EXTRACT))
5472 || code == SET)
5473 && i == 0),
5474 code == IF_THEN_ELSE && i == 0,
5475 unique_copy);
5477 /* If we found that we will have to reject this combination,
5478 indicate that by returning the CLOBBER ourselves, rather than
5479 an expression containing it. This will speed things up as
5480 well as prevent accidents where two CLOBBERs are considered
5481 to be equal, thus producing an incorrect simplification. */
5483 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5484 return new_rtx;
5486 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5488 machine_mode mode = GET_MODE (x);
5490 x = simplify_subreg (GET_MODE (x), new_rtx,
5491 GET_MODE (SUBREG_REG (x)),
5492 SUBREG_BYTE (x));
5493 if (! x)
5494 x = gen_rtx_CLOBBER (mode, const0_rtx);
5496 else if (CONST_SCALAR_INT_P (new_rtx)
5497 && (GET_CODE (x) == ZERO_EXTEND
5498 || GET_CODE (x) == SIGN_EXTEND
5499 || GET_CODE (x) == FLOAT
5500 || GET_CODE (x) == UNSIGNED_FLOAT))
5502 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5503 new_rtx,
5504 GET_MODE (XEXP (x, 0)));
5505 if (!x)
5506 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5508 else
5509 SUBST (XEXP (x, i), new_rtx);
5514 /* Check if we are loading something from the constant pool via float
5515 extension; in this case we would undo compress_float_constant
5516 optimization and degenerate constant load to an immediate value. */
5517 if (GET_CODE (x) == FLOAT_EXTEND
5518 && MEM_P (XEXP (x, 0))
5519 && MEM_READONLY_P (XEXP (x, 0)))
5521 rtx tmp = avoid_constant_pool_reference (x);
5522 if (x != tmp)
5523 return x;
5526 /* Try to simplify X. If the simplification changed the code, it is likely
5527 that further simplification will help, so loop, but limit the number
5528 of repetitions that will be performed. */
5530 for (i = 0; i < 4; i++)
5532 /* If X is sufficiently simple, don't bother trying to do anything
5533 with it. */
5534 if (code != CONST_INT && code != REG && code != CLOBBER)
5535 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5537 if (GET_CODE (x) == code)
5538 break;
5540 code = GET_CODE (x);
5542 /* We no longer know the original mode of operand 0 since we
5543 have changed the form of X) */
5544 op0_mode = VOIDmode;
5547 return x;
5550 /* If X is a commutative operation whose operands are not in the canonical
5551 order, use substitutions to swap them. */
5553 static void
5554 maybe_swap_commutative_operands (rtx x)
5556 if (COMMUTATIVE_ARITH_P (x)
5557 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5559 rtx temp = XEXP (x, 0);
5560 SUBST (XEXP (x, 0), XEXP (x, 1));
5561 SUBST (XEXP (x, 1), temp);
5565 /* Simplify X, a piece of RTL. We just operate on the expression at the
5566 outer level; call `subst' to simplify recursively. Return the new
5567 expression.
5569 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5570 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5571 of a condition. */
5573 static rtx
5574 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5575 int in_cond)
5577 enum rtx_code code = GET_CODE (x);
5578 machine_mode mode = GET_MODE (x);
5579 scalar_int_mode int_mode;
5580 rtx temp;
5581 int i;
5583 /* If this is a commutative operation, put a constant last and a complex
5584 expression first. We don't need to do this for comparisons here. */
5585 maybe_swap_commutative_operands (x);
5587 /* Try to fold this expression in case we have constants that weren't
5588 present before. */
5589 temp = 0;
5590 switch (GET_RTX_CLASS (code))
5592 case RTX_UNARY:
5593 if (op0_mode == VOIDmode)
5594 op0_mode = GET_MODE (XEXP (x, 0));
5595 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5596 break;
5597 case RTX_COMPARE:
5598 case RTX_COMM_COMPARE:
5600 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5601 if (cmp_mode == VOIDmode)
5603 cmp_mode = GET_MODE (XEXP (x, 1));
5604 if (cmp_mode == VOIDmode)
5605 cmp_mode = op0_mode;
5607 temp = simplify_relational_operation (code, mode, cmp_mode,
5608 XEXP (x, 0), XEXP (x, 1));
5610 break;
5611 case RTX_COMM_ARITH:
5612 case RTX_BIN_ARITH:
5613 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5614 break;
5615 case RTX_BITFIELD_OPS:
5616 case RTX_TERNARY:
5617 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5618 XEXP (x, 1), XEXP (x, 2));
5619 break;
5620 default:
5621 break;
5624 if (temp)
5626 x = temp;
5627 code = GET_CODE (temp);
5628 op0_mode = VOIDmode;
5629 mode = GET_MODE (temp);
5632 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5633 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5634 things. Check for cases where both arms are testing the same
5635 condition.
5637 Don't do anything if all operands are very simple. */
5639 if ((BINARY_P (x)
5640 && ((!OBJECT_P (XEXP (x, 0))
5641 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5642 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5643 || (!OBJECT_P (XEXP (x, 1))
5644 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5645 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5646 || (UNARY_P (x)
5647 && (!OBJECT_P (XEXP (x, 0))
5648 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5649 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5651 rtx cond, true_rtx, false_rtx;
5653 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5654 if (cond != 0
5655 /* If everything is a comparison, what we have is highly unlikely
5656 to be simpler, so don't use it. */
5657 && ! (COMPARISON_P (x)
5658 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5659 /* Similarly, if we end up with one of the expressions the same
5660 as the original, it is certainly not simpler. */
5661 && ! rtx_equal_p (x, true_rtx)
5662 && ! rtx_equal_p (x, false_rtx))
5664 rtx cop1 = const0_rtx;
5665 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5667 if (cond_code == NE && COMPARISON_P (cond))
5668 return x;
5670 /* Simplify the alternative arms; this may collapse the true and
5671 false arms to store-flag values. Be careful to use copy_rtx
5672 here since true_rtx or false_rtx might share RTL with x as a
5673 result of the if_then_else_cond call above. */
5674 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5675 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5677 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5678 is unlikely to be simpler. */
5679 if (general_operand (true_rtx, VOIDmode)
5680 && general_operand (false_rtx, VOIDmode))
5682 enum rtx_code reversed;
5684 /* Restarting if we generate a store-flag expression will cause
5685 us to loop. Just drop through in this case. */
5687 /* If the result values are STORE_FLAG_VALUE and zero, we can
5688 just make the comparison operation. */
5689 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5690 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5691 cond, cop1);
5692 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5693 && ((reversed = reversed_comparison_code_parts
5694 (cond_code, cond, cop1, NULL))
5695 != UNKNOWN))
5696 x = simplify_gen_relational (reversed, mode, VOIDmode,
5697 cond, cop1);
5699 /* Likewise, we can make the negate of a comparison operation
5700 if the result values are - STORE_FLAG_VALUE and zero. */
5701 else if (CONST_INT_P (true_rtx)
5702 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5703 && false_rtx == const0_rtx)
5704 x = simplify_gen_unary (NEG, mode,
5705 simplify_gen_relational (cond_code,
5706 mode, VOIDmode,
5707 cond, cop1),
5708 mode);
5709 else if (CONST_INT_P (false_rtx)
5710 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5711 && true_rtx == const0_rtx
5712 && ((reversed = reversed_comparison_code_parts
5713 (cond_code, cond, cop1, NULL))
5714 != UNKNOWN))
5715 x = simplify_gen_unary (NEG, mode,
5716 simplify_gen_relational (reversed,
5717 mode, VOIDmode,
5718 cond, cop1),
5719 mode);
5721 code = GET_CODE (x);
5722 op0_mode = VOIDmode;
5727 /* First see if we can apply the inverse distributive law. */
5728 if (code == PLUS || code == MINUS
5729 || code == AND || code == IOR || code == XOR)
5731 x = apply_distributive_law (x);
5732 code = GET_CODE (x);
5733 op0_mode = VOIDmode;
5736 /* If CODE is an associative operation not otherwise handled, see if we
5737 can associate some operands. This can win if they are constants or
5738 if they are logically related (i.e. (a & b) & a). */
5739 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5740 || code == AND || code == IOR || code == XOR
5741 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5742 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5743 || (flag_associative_math && FLOAT_MODE_P (mode))))
5745 if (GET_CODE (XEXP (x, 0)) == code)
5747 rtx other = XEXP (XEXP (x, 0), 0);
5748 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5749 rtx inner_op1 = XEXP (x, 1);
5750 rtx inner;
5752 /* Make sure we pass the constant operand if any as the second
5753 one if this is a commutative operation. */
5754 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5755 std::swap (inner_op0, inner_op1);
5756 inner = simplify_binary_operation (code == MINUS ? PLUS
5757 : code == DIV ? MULT
5758 : code,
5759 mode, inner_op0, inner_op1);
5761 /* For commutative operations, try the other pair if that one
5762 didn't simplify. */
5763 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5765 other = XEXP (XEXP (x, 0), 1);
5766 inner = simplify_binary_operation (code, mode,
5767 XEXP (XEXP (x, 0), 0),
5768 XEXP (x, 1));
5771 if (inner)
5772 return simplify_gen_binary (code, mode, other, inner);
5776 /* A little bit of algebraic simplification here. */
5777 switch (code)
5779 case MEM:
5780 /* Ensure that our address has any ASHIFTs converted to MULT in case
5781 address-recognizing predicates are called later. */
5782 temp = make_compound_operation (XEXP (x, 0), MEM);
5783 SUBST (XEXP (x, 0), temp);
5784 break;
5786 case SUBREG:
5787 if (op0_mode == VOIDmode)
5788 op0_mode = GET_MODE (SUBREG_REG (x));
5790 /* See if this can be moved to simplify_subreg. */
5791 if (CONSTANT_P (SUBREG_REG (x))
5792 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5793 /* Don't call gen_lowpart if the inner mode
5794 is VOIDmode and we cannot simplify it, as SUBREG without
5795 inner mode is invalid. */
5796 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5797 || gen_lowpart_common (mode, SUBREG_REG (x))))
5798 return gen_lowpart (mode, SUBREG_REG (x));
5800 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5801 break;
5803 rtx temp;
5804 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5805 SUBREG_BYTE (x));
5806 if (temp)
5807 return temp;
5809 /* If op is known to have all lower bits zero, the result is zero. */
5810 scalar_int_mode int_mode, int_op0_mode;
5811 if (!in_dest
5812 && is_a <scalar_int_mode> (mode, &int_mode)
5813 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5814 && (GET_MODE_PRECISION (int_mode)
5815 < GET_MODE_PRECISION (int_op0_mode))
5816 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
5817 SUBREG_BYTE (x))
5818 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5819 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
5820 & GET_MODE_MASK (int_mode)) == 0)
5821 && !side_effects_p (SUBREG_REG (x)))
5822 return CONST0_RTX (int_mode);
5825 /* Don't change the mode of the MEM if that would change the meaning
5826 of the address. */
5827 if (MEM_P (SUBREG_REG (x))
5828 && (MEM_VOLATILE_P (SUBREG_REG (x))
5829 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5830 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5831 return gen_rtx_CLOBBER (mode, const0_rtx);
5833 /* Note that we cannot do any narrowing for non-constants since
5834 we might have been counting on using the fact that some bits were
5835 zero. We now do this in the SET. */
5837 break;
5839 case NEG:
5840 temp = expand_compound_operation (XEXP (x, 0));
5842 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5843 replaced by (lshiftrt X C). This will convert
5844 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5846 if (GET_CODE (temp) == ASHIFTRT
5847 && CONST_INT_P (XEXP (temp, 1))
5848 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5849 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5850 INTVAL (XEXP (temp, 1)));
5852 /* If X has only a single bit that might be nonzero, say, bit I, convert
5853 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5854 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5855 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5856 or a SUBREG of one since we'd be making the expression more
5857 complex if it was just a register. */
5859 if (!REG_P (temp)
5860 && ! (GET_CODE (temp) == SUBREG
5861 && REG_P (SUBREG_REG (temp)))
5862 && is_a <scalar_int_mode> (mode, &int_mode)
5863 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5865 rtx temp1 = simplify_shift_const
5866 (NULL_RTX, ASHIFTRT, int_mode,
5867 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5868 GET_MODE_PRECISION (int_mode) - 1 - i),
5869 GET_MODE_PRECISION (int_mode) - 1 - i);
5871 /* If all we did was surround TEMP with the two shifts, we
5872 haven't improved anything, so don't use it. Otherwise,
5873 we are better off with TEMP1. */
5874 if (GET_CODE (temp1) != ASHIFTRT
5875 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5876 || XEXP (XEXP (temp1, 0), 0) != temp)
5877 return temp1;
5879 break;
5881 case TRUNCATE:
5882 /* We can't handle truncation to a partial integer mode here
5883 because we don't know the real bitsize of the partial
5884 integer mode. */
5885 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5886 break;
5888 if (HWI_COMPUTABLE_MODE_P (mode))
5889 SUBST (XEXP (x, 0),
5890 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5891 GET_MODE_MASK (mode), 0));
5893 /* We can truncate a constant value and return it. */
5895 poly_int64 c;
5896 if (poly_int_rtx_p (XEXP (x, 0), &c))
5897 return gen_int_mode (c, mode);
5900 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5901 whose value is a comparison can be replaced with a subreg if
5902 STORE_FLAG_VALUE permits. */
5903 if (HWI_COMPUTABLE_MODE_P (mode)
5904 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5905 && (temp = get_last_value (XEXP (x, 0)))
5906 && COMPARISON_P (temp))
5907 return gen_lowpart (mode, XEXP (x, 0));
5908 break;
5910 case CONST:
5911 /* (const (const X)) can become (const X). Do it this way rather than
5912 returning the inner CONST since CONST can be shared with a
5913 REG_EQUAL note. */
5914 if (GET_CODE (XEXP (x, 0)) == CONST)
5915 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5916 break;
5918 case LO_SUM:
5919 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5920 can add in an offset. find_split_point will split this address up
5921 again if it doesn't match. */
5922 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5923 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5924 return XEXP (x, 1);
5925 break;
5927 case PLUS:
5928 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5929 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5930 bit-field and can be replaced by either a sign_extend or a
5931 sign_extract. The `and' may be a zero_extend and the two
5932 <c>, -<c> constants may be reversed. */
5933 if (GET_CODE (XEXP (x, 0)) == XOR
5934 && is_a <scalar_int_mode> (mode, &int_mode)
5935 && CONST_INT_P (XEXP (x, 1))
5936 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5937 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5938 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5939 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5940 && HWI_COMPUTABLE_MODE_P (int_mode)
5941 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5942 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5943 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5944 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5945 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5946 && known_eq ((GET_MODE_PRECISION
5947 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
5948 (unsigned int) i + 1))))
5949 return simplify_shift_const
5950 (NULL_RTX, ASHIFTRT, int_mode,
5951 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5952 XEXP (XEXP (XEXP (x, 0), 0), 0),
5953 GET_MODE_PRECISION (int_mode) - (i + 1)),
5954 GET_MODE_PRECISION (int_mode) - (i + 1));
5956 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5957 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5958 the bitsize of the mode - 1. This allows simplification of
5959 "a = (b & 8) == 0;" */
5960 if (XEXP (x, 1) == constm1_rtx
5961 && !REG_P (XEXP (x, 0))
5962 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5963 && REG_P (SUBREG_REG (XEXP (x, 0))))
5964 && is_a <scalar_int_mode> (mode, &int_mode)
5965 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
5966 return simplify_shift_const
5967 (NULL_RTX, ASHIFTRT, int_mode,
5968 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5969 gen_rtx_XOR (int_mode, XEXP (x, 0),
5970 const1_rtx),
5971 GET_MODE_PRECISION (int_mode) - 1),
5972 GET_MODE_PRECISION (int_mode) - 1);
5974 /* If we are adding two things that have no bits in common, convert
5975 the addition into an IOR. This will often be further simplified,
5976 for example in cases like ((a & 1) + (a & 2)), which can
5977 become a & 3. */
5979 if (HWI_COMPUTABLE_MODE_P (mode)
5980 && (nonzero_bits (XEXP (x, 0), mode)
5981 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5983 /* Try to simplify the expression further. */
5984 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5985 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5987 /* If we could, great. If not, do not go ahead with the IOR
5988 replacement, since PLUS appears in many special purpose
5989 address arithmetic instructions. */
5990 if (GET_CODE (temp) != CLOBBER
5991 && (GET_CODE (temp) != IOR
5992 || ((XEXP (temp, 0) != XEXP (x, 0)
5993 || XEXP (temp, 1) != XEXP (x, 1))
5994 && (XEXP (temp, 0) != XEXP (x, 1)
5995 || XEXP (temp, 1) != XEXP (x, 0)))))
5996 return temp;
5999 /* Canonicalize x + x into x << 1. */
6000 if (GET_MODE_CLASS (mode) == MODE_INT
6001 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6002 && !side_effects_p (XEXP (x, 0)))
6003 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6005 break;
6007 case MINUS:
6008 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6009 (and <foo> (const_int pow2-1)) */
6010 if (is_a <scalar_int_mode> (mode, &int_mode)
6011 && GET_CODE (XEXP (x, 1)) == AND
6012 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6013 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6014 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6015 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6016 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6017 break;
6019 case MULT:
6020 /* If we have (mult (plus A B) C), apply the distributive law and then
6021 the inverse distributive law to see if things simplify. This
6022 occurs mostly in addresses, often when unrolling loops. */
6024 if (GET_CODE (XEXP (x, 0)) == PLUS)
6026 rtx result = distribute_and_simplify_rtx (x, 0);
6027 if (result)
6028 return result;
6031 /* Try simplify a*(b/c) as (a*b)/c. */
6032 if (FLOAT_MODE_P (mode) && flag_associative_math
6033 && GET_CODE (XEXP (x, 0)) == DIV)
6035 rtx tem = simplify_binary_operation (MULT, mode,
6036 XEXP (XEXP (x, 0), 0),
6037 XEXP (x, 1));
6038 if (tem)
6039 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6041 break;
6043 case UDIV:
6044 /* If this is a divide by a power of two, treat it as a shift if
6045 its first operand is a shift. */
6046 if (is_a <scalar_int_mode> (mode, &int_mode)
6047 && CONST_INT_P (XEXP (x, 1))
6048 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6049 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6050 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6051 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6052 || GET_CODE (XEXP (x, 0)) == ROTATE
6053 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6054 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6055 XEXP (x, 0), i);
6056 break;
6058 case EQ: case NE:
6059 case GT: case GTU: case GE: case GEU:
6060 case LT: case LTU: case LE: case LEU:
6061 case UNEQ: case LTGT:
6062 case UNGT: case UNGE:
6063 case UNLT: case UNLE:
6064 case UNORDERED: case ORDERED:
6065 /* If the first operand is a condition code, we can't do anything
6066 with it. */
6067 if (GET_CODE (XEXP (x, 0)) == COMPARE
6068 || GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC)
6070 rtx op0 = XEXP (x, 0);
6071 rtx op1 = XEXP (x, 1);
6072 enum rtx_code new_code;
6074 if (GET_CODE (op0) == COMPARE)
6075 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6077 /* Simplify our comparison, if possible. */
6078 new_code = simplify_comparison (code, &op0, &op1);
6080 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6081 if only the low-order bit is possibly nonzero in X (such as when
6082 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6083 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6084 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6085 (plus X 1).
6087 Remove any ZERO_EXTRACT we made when thinking this was a
6088 comparison. It may now be simpler to use, e.g., an AND. If a
6089 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6090 the call to make_compound_operation in the SET case.
6092 Don't apply these optimizations if the caller would
6093 prefer a comparison rather than a value.
6094 E.g., for the condition in an IF_THEN_ELSE most targets need
6095 an explicit comparison. */
6097 if (in_cond)
6100 else if (STORE_FLAG_VALUE == 1
6101 && new_code == NE
6102 && is_int_mode (mode, &int_mode)
6103 && op1 == const0_rtx
6104 && int_mode == GET_MODE (op0)
6105 && nonzero_bits (op0, int_mode) == 1)
6106 return gen_lowpart (int_mode,
6107 expand_compound_operation (op0));
6109 else if (STORE_FLAG_VALUE == 1
6110 && new_code == NE
6111 && is_int_mode (mode, &int_mode)
6112 && op1 == const0_rtx
6113 && int_mode == GET_MODE (op0)
6114 && (num_sign_bit_copies (op0, int_mode)
6115 == GET_MODE_PRECISION (int_mode)))
6117 op0 = expand_compound_operation (op0);
6118 return simplify_gen_unary (NEG, int_mode,
6119 gen_lowpart (int_mode, op0),
6120 int_mode);
6123 else if (STORE_FLAG_VALUE == 1
6124 && new_code == EQ
6125 && is_int_mode (mode, &int_mode)
6126 && op1 == const0_rtx
6127 && int_mode == GET_MODE (op0)
6128 && nonzero_bits (op0, int_mode) == 1)
6130 op0 = expand_compound_operation (op0);
6131 return simplify_gen_binary (XOR, int_mode,
6132 gen_lowpart (int_mode, op0),
6133 const1_rtx);
6136 else if (STORE_FLAG_VALUE == 1
6137 && new_code == EQ
6138 && is_int_mode (mode, &int_mode)
6139 && op1 == const0_rtx
6140 && int_mode == GET_MODE (op0)
6141 && (num_sign_bit_copies (op0, int_mode)
6142 == GET_MODE_PRECISION (int_mode)))
6144 op0 = expand_compound_operation (op0);
6145 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6148 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6149 those above. */
6150 if (in_cond)
6153 else if (STORE_FLAG_VALUE == -1
6154 && new_code == NE
6155 && is_int_mode (mode, &int_mode)
6156 && op1 == const0_rtx
6157 && int_mode == GET_MODE (op0)
6158 && (num_sign_bit_copies (op0, int_mode)
6159 == GET_MODE_PRECISION (int_mode)))
6160 return gen_lowpart (int_mode, expand_compound_operation (op0));
6162 else if (STORE_FLAG_VALUE == -1
6163 && new_code == NE
6164 && is_int_mode (mode, &int_mode)
6165 && op1 == const0_rtx
6166 && int_mode == GET_MODE (op0)
6167 && nonzero_bits (op0, int_mode) == 1)
6169 op0 = expand_compound_operation (op0);
6170 return simplify_gen_unary (NEG, int_mode,
6171 gen_lowpart (int_mode, op0),
6172 int_mode);
6175 else if (STORE_FLAG_VALUE == -1
6176 && new_code == EQ
6177 && is_int_mode (mode, &int_mode)
6178 && op1 == const0_rtx
6179 && int_mode == GET_MODE (op0)
6180 && (num_sign_bit_copies (op0, int_mode)
6181 == GET_MODE_PRECISION (int_mode)))
6183 op0 = expand_compound_operation (op0);
6184 return simplify_gen_unary (NOT, int_mode,
6185 gen_lowpart (int_mode, op0),
6186 int_mode);
6189 /* If X is 0/1, (eq X 0) is X-1. */
6190 else if (STORE_FLAG_VALUE == -1
6191 && new_code == EQ
6192 && is_int_mode (mode, &int_mode)
6193 && op1 == const0_rtx
6194 && int_mode == GET_MODE (op0)
6195 && nonzero_bits (op0, int_mode) == 1)
6197 op0 = expand_compound_operation (op0);
6198 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6201 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6202 one bit that might be nonzero, we can convert (ne x 0) to
6203 (ashift x c) where C puts the bit in the sign bit. Remove any
6204 AND with STORE_FLAG_VALUE when we are done, since we are only
6205 going to test the sign bit. */
6206 if (new_code == NE
6207 && is_int_mode (mode, &int_mode)
6208 && HWI_COMPUTABLE_MODE_P (int_mode)
6209 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6210 && op1 == const0_rtx
6211 && int_mode == GET_MODE (op0)
6212 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6214 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6215 expand_compound_operation (op0),
6216 GET_MODE_PRECISION (int_mode) - 1 - i);
6217 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6218 return XEXP (x, 0);
6219 else
6220 return x;
6223 /* If the code changed, return a whole new comparison.
6224 We also need to avoid using SUBST in cases where
6225 simplify_comparison has widened a comparison with a CONST_INT,
6226 since in that case the wider CONST_INT may fail the sanity
6227 checks in do_SUBST. */
6228 if (new_code != code
6229 || (CONST_INT_P (op1)
6230 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6231 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6232 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6234 /* Otherwise, keep this operation, but maybe change its operands.
6235 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6236 SUBST (XEXP (x, 0), op0);
6237 SUBST (XEXP (x, 1), op1);
6239 break;
6241 case IF_THEN_ELSE:
6242 return simplify_if_then_else (x);
6244 case ZERO_EXTRACT:
6245 case SIGN_EXTRACT:
6246 case ZERO_EXTEND:
6247 case SIGN_EXTEND:
6248 /* If we are processing SET_DEST, we are done. */
6249 if (in_dest)
6250 return x;
6252 return expand_compound_operation (x);
6254 case SET:
6255 return simplify_set (x);
6257 case AND:
6258 case IOR:
6259 return simplify_logical (x);
6261 case ASHIFT:
6262 case LSHIFTRT:
6263 case ASHIFTRT:
6264 case ROTATE:
6265 case ROTATERT:
6266 /* If this is a shift by a constant amount, simplify it. */
6267 if (CONST_INT_P (XEXP (x, 1)))
6268 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6269 INTVAL (XEXP (x, 1)));
6271 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6272 SUBST (XEXP (x, 1),
6273 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6274 (HOST_WIDE_INT_1U
6275 << exact_log2 (GET_MODE_UNIT_BITSIZE
6276 (GET_MODE (x))))
6277 - 1,
6278 0));
6279 break;
6280 case VEC_SELECT:
6282 rtx trueop0 = XEXP (x, 0);
6283 mode = GET_MODE (trueop0);
6284 rtx trueop1 = XEXP (x, 1);
6285 /* If we select a low-part subreg, return that. */
6286 if (vec_series_lowpart_p (GET_MODE (x), mode, trueop1))
6288 rtx new_rtx = lowpart_subreg (GET_MODE (x), trueop0, mode);
6289 if (new_rtx != NULL_RTX)
6290 return new_rtx;
6294 default:
6295 break;
6298 return x;
6301 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6303 static rtx
6304 simplify_if_then_else (rtx x)
6306 machine_mode mode = GET_MODE (x);
6307 rtx cond = XEXP (x, 0);
6308 rtx true_rtx = XEXP (x, 1);
6309 rtx false_rtx = XEXP (x, 2);
6310 enum rtx_code true_code = GET_CODE (cond);
6311 int comparison_p = COMPARISON_P (cond);
6312 rtx temp;
6313 int i;
6314 enum rtx_code false_code;
6315 rtx reversed;
6316 scalar_int_mode int_mode, inner_mode;
6318 /* Simplify storing of the truth value. */
6319 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6320 return simplify_gen_relational (true_code, mode, VOIDmode,
6321 XEXP (cond, 0), XEXP (cond, 1));
6323 /* Also when the truth value has to be reversed. */
6324 if (comparison_p
6325 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6326 && (reversed = reversed_comparison (cond, mode)))
6327 return reversed;
6329 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6330 in it is being compared against certain values. Get the true and false
6331 comparisons and see if that says anything about the value of each arm. */
6333 if (comparison_p
6334 && ((false_code = reversed_comparison_code (cond, NULL))
6335 != UNKNOWN)
6336 && REG_P (XEXP (cond, 0)))
6338 HOST_WIDE_INT nzb;
6339 rtx from = XEXP (cond, 0);
6340 rtx true_val = XEXP (cond, 1);
6341 rtx false_val = true_val;
6342 int swapped = 0;
6344 /* If FALSE_CODE is EQ, swap the codes and arms. */
6346 if (false_code == EQ)
6348 swapped = 1, true_code = EQ, false_code = NE;
6349 std::swap (true_rtx, false_rtx);
6352 scalar_int_mode from_mode;
6353 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6355 /* If we are comparing against zero and the expression being
6356 tested has only a single bit that might be nonzero, that is
6357 its value when it is not equal to zero. Similarly if it is
6358 known to be -1 or 0. */
6359 if (true_code == EQ
6360 && true_val == const0_rtx
6361 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6363 false_code = EQ;
6364 false_val = gen_int_mode (nzb, from_mode);
6366 else if (true_code == EQ
6367 && true_val == const0_rtx
6368 && (num_sign_bit_copies (from, from_mode)
6369 == GET_MODE_PRECISION (from_mode)))
6371 false_code = EQ;
6372 false_val = constm1_rtx;
6376 /* Now simplify an arm if we know the value of the register in the
6377 branch and it is used in the arm. Be careful due to the potential
6378 of locally-shared RTL. */
6380 if (reg_mentioned_p (from, true_rtx))
6381 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6382 from, true_val),
6383 pc_rtx, pc_rtx, 0, 0, 0);
6384 if (reg_mentioned_p (from, false_rtx))
6385 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6386 from, false_val),
6387 pc_rtx, pc_rtx, 0, 0, 0);
6389 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6390 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6392 true_rtx = XEXP (x, 1);
6393 false_rtx = XEXP (x, 2);
6394 true_code = GET_CODE (cond);
6397 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6398 reversed, do so to avoid needing two sets of patterns for
6399 subtract-and-branch insns. Similarly if we have a constant in the true
6400 arm, the false arm is the same as the first operand of the comparison, or
6401 the false arm is more complicated than the true arm. */
6403 if (comparison_p
6404 && reversed_comparison_code (cond, NULL) != UNKNOWN
6405 && (true_rtx == pc_rtx
6406 || (CONSTANT_P (true_rtx)
6407 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6408 || true_rtx == const0_rtx
6409 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6410 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6411 && !OBJECT_P (false_rtx))
6412 || reg_mentioned_p (true_rtx, false_rtx)
6413 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6415 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6416 SUBST (XEXP (x, 1), false_rtx);
6417 SUBST (XEXP (x, 2), true_rtx);
6419 std::swap (true_rtx, false_rtx);
6420 cond = XEXP (x, 0);
6422 /* It is possible that the conditional has been simplified out. */
6423 true_code = GET_CODE (cond);
6424 comparison_p = COMPARISON_P (cond);
6427 /* If the two arms are identical, we don't need the comparison. */
6429 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6430 return true_rtx;
6432 /* Convert a == b ? b : a to "a". */
6433 if (true_code == EQ && ! side_effects_p (cond)
6434 && !HONOR_NANS (mode)
6435 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6436 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6437 return false_rtx;
6438 else if (true_code == NE && ! side_effects_p (cond)
6439 && !HONOR_NANS (mode)
6440 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6441 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6442 return true_rtx;
6444 /* Look for cases where we have (abs x) or (neg (abs X)). */
6446 if (GET_MODE_CLASS (mode) == MODE_INT
6447 && comparison_p
6448 && XEXP (cond, 1) == const0_rtx
6449 && GET_CODE (false_rtx) == NEG
6450 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6451 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6452 && ! side_effects_p (true_rtx))
6453 switch (true_code)
6455 case GT:
6456 case GE:
6457 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6458 case LT:
6459 case LE:
6460 return
6461 simplify_gen_unary (NEG, mode,
6462 simplify_gen_unary (ABS, mode, true_rtx, mode),
6463 mode);
6464 default:
6465 break;
6468 /* Look for MIN or MAX. */
6470 if ((! FLOAT_MODE_P (mode)
6471 || (flag_unsafe_math_optimizations
6472 && !HONOR_NANS (mode)
6473 && !HONOR_SIGNED_ZEROS (mode)))
6474 && comparison_p
6475 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6476 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6477 && ! side_effects_p (cond))
6478 switch (true_code)
6480 case GE:
6481 case GT:
6482 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6483 case LE:
6484 case LT:
6485 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6486 case GEU:
6487 case GTU:
6488 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6489 case LEU:
6490 case LTU:
6491 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6492 default:
6493 break;
6496 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6497 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6498 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6499 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6500 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6501 neither 1 or -1, but it isn't worth checking for. */
6503 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6504 && comparison_p
6505 && is_int_mode (mode, &int_mode)
6506 && ! side_effects_p (x))
6508 rtx t = make_compound_operation (true_rtx, SET);
6509 rtx f = make_compound_operation (false_rtx, SET);
6510 rtx cond_op0 = XEXP (cond, 0);
6511 rtx cond_op1 = XEXP (cond, 1);
6512 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6513 scalar_int_mode m = int_mode;
6514 rtx z = 0, c1 = NULL_RTX;
6516 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6517 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6518 || GET_CODE (t) == ASHIFT
6519 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6520 && rtx_equal_p (XEXP (t, 0), f))
6521 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6523 /* If an identity-zero op is commutative, check whether there
6524 would be a match if we swapped the operands. */
6525 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6526 || GET_CODE (t) == XOR)
6527 && rtx_equal_p (XEXP (t, 1), f))
6528 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6529 else if (GET_CODE (t) == SIGN_EXTEND
6530 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6531 && (GET_CODE (XEXP (t, 0)) == PLUS
6532 || GET_CODE (XEXP (t, 0)) == MINUS
6533 || GET_CODE (XEXP (t, 0)) == IOR
6534 || GET_CODE (XEXP (t, 0)) == XOR
6535 || GET_CODE (XEXP (t, 0)) == ASHIFT
6536 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6537 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6538 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6539 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6540 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6541 && (num_sign_bit_copies (f, GET_MODE (f))
6542 > (unsigned int)
6543 (GET_MODE_PRECISION (int_mode)
6544 - GET_MODE_PRECISION (inner_mode))))
6546 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6547 extend_op = SIGN_EXTEND;
6548 m = inner_mode;
6550 else if (GET_CODE (t) == SIGN_EXTEND
6551 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6552 && (GET_CODE (XEXP (t, 0)) == PLUS
6553 || GET_CODE (XEXP (t, 0)) == IOR
6554 || GET_CODE (XEXP (t, 0)) == XOR)
6555 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6556 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6557 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6558 && (num_sign_bit_copies (f, GET_MODE (f))
6559 > (unsigned int)
6560 (GET_MODE_PRECISION (int_mode)
6561 - GET_MODE_PRECISION (inner_mode))))
6563 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6564 extend_op = SIGN_EXTEND;
6565 m = inner_mode;
6567 else if (GET_CODE (t) == ZERO_EXTEND
6568 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6569 && (GET_CODE (XEXP (t, 0)) == PLUS
6570 || GET_CODE (XEXP (t, 0)) == MINUS
6571 || GET_CODE (XEXP (t, 0)) == IOR
6572 || GET_CODE (XEXP (t, 0)) == XOR
6573 || GET_CODE (XEXP (t, 0)) == ASHIFT
6574 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6575 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6576 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6577 && HWI_COMPUTABLE_MODE_P (int_mode)
6578 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6579 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6580 && ((nonzero_bits (f, GET_MODE (f))
6581 & ~GET_MODE_MASK (inner_mode))
6582 == 0))
6584 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6585 extend_op = ZERO_EXTEND;
6586 m = inner_mode;
6588 else if (GET_CODE (t) == ZERO_EXTEND
6589 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6590 && (GET_CODE (XEXP (t, 0)) == PLUS
6591 || GET_CODE (XEXP (t, 0)) == IOR
6592 || GET_CODE (XEXP (t, 0)) == XOR)
6593 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6594 && HWI_COMPUTABLE_MODE_P (int_mode)
6595 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6596 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6597 && ((nonzero_bits (f, GET_MODE (f))
6598 & ~GET_MODE_MASK (inner_mode))
6599 == 0))
6601 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6602 extend_op = ZERO_EXTEND;
6603 m = inner_mode;
6606 if (z)
6608 machine_mode cm = m;
6609 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6610 && GET_MODE (c1) != VOIDmode)
6611 cm = GET_MODE (c1);
6612 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6613 cond_op0, cond_op1),
6614 pc_rtx, pc_rtx, 0, 0, 0);
6615 temp = simplify_gen_binary (MULT, cm, temp,
6616 simplify_gen_binary (MULT, cm, c1,
6617 const_true_rtx));
6618 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6619 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6621 if (extend_op != UNKNOWN)
6622 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6624 return temp;
6628 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6629 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6630 negation of a single bit, we can convert this operation to a shift. We
6631 can actually do this more generally, but it doesn't seem worth it. */
6633 if (true_code == NE
6634 && is_a <scalar_int_mode> (mode, &int_mode)
6635 && XEXP (cond, 1) == const0_rtx
6636 && false_rtx == const0_rtx
6637 && CONST_INT_P (true_rtx)
6638 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6639 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6640 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6641 == GET_MODE_PRECISION (int_mode))
6642 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6643 return
6644 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6645 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6647 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6648 non-zero bit in A is C1. */
6649 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6650 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6651 && is_a <scalar_int_mode> (mode, &int_mode)
6652 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6653 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6654 == nonzero_bits (XEXP (cond, 0), inner_mode)
6655 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6657 rtx val = XEXP (cond, 0);
6658 if (inner_mode == int_mode)
6659 return val;
6660 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6661 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6664 return x;
6667 /* Simplify X, a SET expression. Return the new expression. */
6669 static rtx
6670 simplify_set (rtx x)
6672 rtx src = SET_SRC (x);
6673 rtx dest = SET_DEST (x);
6674 machine_mode mode
6675 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6676 rtx_insn *other_insn;
6677 rtx *cc_use;
6678 scalar_int_mode int_mode;
6680 /* (set (pc) (return)) gets written as (return). */
6681 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6682 return src;
6684 /* Now that we know for sure which bits of SRC we are using, see if we can
6685 simplify the expression for the object knowing that we only need the
6686 low-order bits. */
6688 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6690 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6691 SUBST (SET_SRC (x), src);
6694 /* If the source is a COMPARE, look for the use of the comparison result
6695 and try to simplify it unless we already have used undobuf.other_insn. */
6696 if ((GET_MODE_CLASS (mode) == MODE_CC || GET_CODE (src) == COMPARE)
6697 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6698 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6699 && COMPARISON_P (*cc_use)
6700 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6702 enum rtx_code old_code = GET_CODE (*cc_use);
6703 enum rtx_code new_code;
6704 rtx op0, op1, tmp;
6705 int other_changed = 0;
6706 rtx inner_compare = NULL_RTX;
6707 machine_mode compare_mode = GET_MODE (dest);
6709 if (GET_CODE (src) == COMPARE)
6711 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6712 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6714 inner_compare = op0;
6715 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6718 else
6719 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6721 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6722 op0, op1);
6723 if (!tmp)
6724 new_code = old_code;
6725 else if (!CONSTANT_P (tmp))
6727 new_code = GET_CODE (tmp);
6728 op0 = XEXP (tmp, 0);
6729 op1 = XEXP (tmp, 1);
6731 else
6733 rtx pat = PATTERN (other_insn);
6734 undobuf.other_insn = other_insn;
6735 SUBST (*cc_use, tmp);
6737 /* Attempt to simplify CC user. */
6738 if (GET_CODE (pat) == SET)
6740 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6741 if (new_rtx != NULL_RTX)
6742 SUBST (SET_SRC (pat), new_rtx);
6745 /* Convert X into a no-op move. */
6746 SUBST (SET_DEST (x), pc_rtx);
6747 SUBST (SET_SRC (x), pc_rtx);
6748 return x;
6751 /* Simplify our comparison, if possible. */
6752 new_code = simplify_comparison (new_code, &op0, &op1);
6754 #ifdef SELECT_CC_MODE
6755 /* If this machine has CC modes other than CCmode, check to see if we
6756 need to use a different CC mode here. */
6757 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6758 compare_mode = GET_MODE (op0);
6759 else if (inner_compare
6760 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6761 && new_code == old_code
6762 && op0 == XEXP (inner_compare, 0)
6763 && op1 == XEXP (inner_compare, 1))
6764 compare_mode = GET_MODE (inner_compare);
6765 else
6766 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6768 /* If the mode changed, we have to change SET_DEST, the mode in the
6769 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6770 a hard register, just build new versions with the proper mode. If it
6771 is a pseudo, we lose unless it is only time we set the pseudo, in
6772 which case we can safely change its mode. */
6773 if (compare_mode != GET_MODE (dest))
6775 if (can_change_dest_mode (dest, 0, compare_mode))
6777 unsigned int regno = REGNO (dest);
6778 rtx new_dest;
6780 if (regno < FIRST_PSEUDO_REGISTER)
6781 new_dest = gen_rtx_REG (compare_mode, regno);
6782 else
6784 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6785 new_dest = regno_reg_rtx[regno];
6788 SUBST (SET_DEST (x), new_dest);
6789 SUBST (XEXP (*cc_use, 0), new_dest);
6790 other_changed = 1;
6792 dest = new_dest;
6795 #endif /* SELECT_CC_MODE */
6797 /* If the code changed, we have to build a new comparison in
6798 undobuf.other_insn. */
6799 if (new_code != old_code)
6801 int other_changed_previously = other_changed;
6802 unsigned HOST_WIDE_INT mask;
6803 rtx old_cc_use = *cc_use;
6805 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6806 dest, const0_rtx));
6807 other_changed = 1;
6809 /* If the only change we made was to change an EQ into an NE or
6810 vice versa, OP0 has only one bit that might be nonzero, and OP1
6811 is zero, check if changing the user of the condition code will
6812 produce a valid insn. If it won't, we can keep the original code
6813 in that insn by surrounding our operation with an XOR. */
6815 if (((old_code == NE && new_code == EQ)
6816 || (old_code == EQ && new_code == NE))
6817 && ! other_changed_previously && op1 == const0_rtx
6818 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6819 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6821 rtx pat = PATTERN (other_insn), note = 0;
6823 if ((recog_for_combine (&pat, other_insn, &note) < 0
6824 && ! check_asm_operands (pat)))
6826 *cc_use = old_cc_use;
6827 other_changed = 0;
6829 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6830 gen_int_mode (mask,
6831 GET_MODE (op0)));
6836 if (other_changed)
6837 undobuf.other_insn = other_insn;
6839 /* Don't generate a compare of a CC with 0, just use that CC. */
6840 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6842 SUBST (SET_SRC (x), op0);
6843 src = SET_SRC (x);
6845 /* Otherwise, if we didn't previously have the same COMPARE we
6846 want, create it from scratch. */
6847 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6848 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6850 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6851 src = SET_SRC (x);
6854 else
6856 /* Get SET_SRC in a form where we have placed back any
6857 compound expressions. Then do the checks below. */
6858 src = make_compound_operation (src, SET);
6859 SUBST (SET_SRC (x), src);
6862 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6863 and X being a REG or (subreg (reg)), we may be able to convert this to
6864 (set (subreg:m2 x) (op)).
6866 We can always do this if M1 is narrower than M2 because that means that
6867 we only care about the low bits of the result.
6869 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6870 perform a narrower operation than requested since the high-order bits will
6871 be undefined. On machine where it is defined, this transformation is safe
6872 as long as M1 and M2 have the same number of words. */
6874 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6875 && !OBJECT_P (SUBREG_REG (src))
6876 && (known_equal_after_align_up
6877 (GET_MODE_SIZE (GET_MODE (src)),
6878 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
6879 UNITS_PER_WORD))
6880 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6881 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6882 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6883 GET_MODE (SUBREG_REG (src)),
6884 GET_MODE (src)))
6885 && (REG_P (dest)
6886 || (GET_CODE (dest) == SUBREG
6887 && REG_P (SUBREG_REG (dest)))))
6889 SUBST (SET_DEST (x),
6890 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6891 dest));
6892 SUBST (SET_SRC (x), SUBREG_REG (src));
6894 src = SET_SRC (x), dest = SET_DEST (x);
6897 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6898 would require a paradoxical subreg. Replace the subreg with a
6899 zero_extend to avoid the reload that would otherwise be required.
6900 Don't do this unless we have a scalar integer mode, otherwise the
6901 transformation is incorrect. */
6903 enum rtx_code extend_op;
6904 if (paradoxical_subreg_p (src)
6905 && MEM_P (SUBREG_REG (src))
6906 && SCALAR_INT_MODE_P (GET_MODE (src))
6907 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6909 SUBST (SET_SRC (x),
6910 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6912 src = SET_SRC (x);
6915 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6916 are comparing an item known to be 0 or -1 against 0, use a logical
6917 operation instead. Check for one of the arms being an IOR of the other
6918 arm with some value. We compute three terms to be IOR'ed together. In
6919 practice, at most two will be nonzero. Then we do the IOR's. */
6921 if (GET_CODE (dest) != PC
6922 && GET_CODE (src) == IF_THEN_ELSE
6923 && is_int_mode (GET_MODE (src), &int_mode)
6924 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6925 && XEXP (XEXP (src, 0), 1) == const0_rtx
6926 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6927 && (!HAVE_conditional_move
6928 || ! can_conditionally_move_p (int_mode))
6929 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6930 == GET_MODE_PRECISION (int_mode))
6931 && ! side_effects_p (src))
6933 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6934 ? XEXP (src, 1) : XEXP (src, 2));
6935 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6936 ? XEXP (src, 2) : XEXP (src, 1));
6937 rtx term1 = const0_rtx, term2, term3;
6939 if (GET_CODE (true_rtx) == IOR
6940 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6941 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6942 else if (GET_CODE (true_rtx) == IOR
6943 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6944 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6945 else if (GET_CODE (false_rtx) == IOR
6946 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6947 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6948 else if (GET_CODE (false_rtx) == IOR
6949 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6950 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6952 term2 = simplify_gen_binary (AND, int_mode,
6953 XEXP (XEXP (src, 0), 0), true_rtx);
6954 term3 = simplify_gen_binary (AND, int_mode,
6955 simplify_gen_unary (NOT, int_mode,
6956 XEXP (XEXP (src, 0), 0),
6957 int_mode),
6958 false_rtx);
6960 SUBST (SET_SRC (x),
6961 simplify_gen_binary (IOR, int_mode,
6962 simplify_gen_binary (IOR, int_mode,
6963 term1, term2),
6964 term3));
6966 src = SET_SRC (x);
6969 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6970 whole thing fail. */
6971 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6972 return src;
6973 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6974 return dest;
6975 else
6976 /* Convert this into a field assignment operation, if possible. */
6977 return make_field_assignment (x);
6980 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6981 result. */
6983 static rtx
6984 simplify_logical (rtx x)
6986 rtx op0 = XEXP (x, 0);
6987 rtx op1 = XEXP (x, 1);
6988 scalar_int_mode mode;
6990 switch (GET_CODE (x))
6992 case AND:
6993 /* We can call simplify_and_const_int only if we don't lose
6994 any (sign) bits when converting INTVAL (op1) to
6995 "unsigned HOST_WIDE_INT". */
6996 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
6997 && CONST_INT_P (op1)
6998 && (HWI_COMPUTABLE_MODE_P (mode)
6999 || INTVAL (op1) > 0))
7001 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7002 if (GET_CODE (x) != AND)
7003 return x;
7005 op0 = XEXP (x, 0);
7006 op1 = XEXP (x, 1);
7009 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7010 apply the distributive law and then the inverse distributive
7011 law to see if things simplify. */
7012 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7014 rtx result = distribute_and_simplify_rtx (x, 0);
7015 if (result)
7016 return result;
7018 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7020 rtx result = distribute_and_simplify_rtx (x, 1);
7021 if (result)
7022 return result;
7024 break;
7026 case IOR:
7027 /* If we have (ior (and A B) C), apply the distributive law and then
7028 the inverse distributive law to see if things simplify. */
7030 if (GET_CODE (op0) == AND)
7032 rtx result = distribute_and_simplify_rtx (x, 0);
7033 if (result)
7034 return result;
7037 if (GET_CODE (op1) == AND)
7039 rtx result = distribute_and_simplify_rtx (x, 1);
7040 if (result)
7041 return result;
7043 break;
7045 default:
7046 gcc_unreachable ();
7049 return x;
7052 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7053 operations" because they can be replaced with two more basic operations.
7054 ZERO_EXTEND is also considered "compound" because it can be replaced with
7055 an AND operation, which is simpler, though only one operation.
7057 The function expand_compound_operation is called with an rtx expression
7058 and will convert it to the appropriate shifts and AND operations,
7059 simplifying at each stage.
7061 The function make_compound_operation is called to convert an expression
7062 consisting of shifts and ANDs into the equivalent compound expression.
7063 It is the inverse of this function, loosely speaking. */
7065 static rtx
7066 expand_compound_operation (rtx x)
7068 unsigned HOST_WIDE_INT pos = 0, len;
7069 int unsignedp = 0;
7070 unsigned int modewidth;
7071 rtx tem;
7072 scalar_int_mode inner_mode;
7074 switch (GET_CODE (x))
7076 case ZERO_EXTEND:
7077 unsignedp = 1;
7078 /* FALLTHRU */
7079 case SIGN_EXTEND:
7080 /* We can't necessarily use a const_int for a multiword mode;
7081 it depends on implicitly extending the value.
7082 Since we don't know the right way to extend it,
7083 we can't tell whether the implicit way is right.
7085 Even for a mode that is no wider than a const_int,
7086 we can't win, because we need to sign extend one of its bits through
7087 the rest of it, and we don't know which bit. */
7088 if (CONST_INT_P (XEXP (x, 0)))
7089 return x;
7091 /* Reject modes that aren't scalar integers because turning vector
7092 or complex modes into shifts causes problems. */
7093 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7094 return x;
7096 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7097 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7098 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7099 reloaded. If not for that, MEM's would very rarely be safe.
7101 Reject modes bigger than a word, because we might not be able
7102 to reference a two-register group starting with an arbitrary register
7103 (and currently gen_lowpart might crash for a SUBREG). */
7105 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7106 return x;
7108 len = GET_MODE_PRECISION (inner_mode);
7109 /* If the inner object has VOIDmode (the only way this can happen
7110 is if it is an ASM_OPERANDS), we can't do anything since we don't
7111 know how much masking to do. */
7112 if (len == 0)
7113 return x;
7115 break;
7117 case ZERO_EXTRACT:
7118 unsignedp = 1;
7120 /* fall through */
7122 case SIGN_EXTRACT:
7123 /* If the operand is a CLOBBER, just return it. */
7124 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7125 return XEXP (x, 0);
7127 if (!CONST_INT_P (XEXP (x, 1))
7128 || !CONST_INT_P (XEXP (x, 2)))
7129 return x;
7131 /* Reject modes that aren't scalar integers because turning vector
7132 or complex modes into shifts causes problems. */
7133 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7134 return x;
7136 len = INTVAL (XEXP (x, 1));
7137 pos = INTVAL (XEXP (x, 2));
7139 /* This should stay within the object being extracted, fail otherwise. */
7140 if (len + pos > GET_MODE_PRECISION (inner_mode))
7141 return x;
7143 if (BITS_BIG_ENDIAN)
7144 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7146 break;
7148 default:
7149 return x;
7152 /* We've rejected non-scalar operations by now. */
7153 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7155 /* Convert sign extension to zero extension, if we know that the high
7156 bit is not set, as this is easier to optimize. It will be converted
7157 back to cheaper alternative in make_extraction. */
7158 if (GET_CODE (x) == SIGN_EXTEND
7159 && HWI_COMPUTABLE_MODE_P (mode)
7160 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7161 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7162 == 0))
7164 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7165 rtx temp2 = expand_compound_operation (temp);
7167 /* Make sure this is a profitable operation. */
7168 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7169 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7170 return temp2;
7171 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7172 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7173 return temp;
7174 else
7175 return x;
7178 /* We can optimize some special cases of ZERO_EXTEND. */
7179 if (GET_CODE (x) == ZERO_EXTEND)
7181 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7182 know that the last value didn't have any inappropriate bits
7183 set. */
7184 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7185 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7186 && HWI_COMPUTABLE_MODE_P (mode)
7187 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7188 & ~GET_MODE_MASK (inner_mode)) == 0)
7189 return XEXP (XEXP (x, 0), 0);
7191 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7192 if (GET_CODE (XEXP (x, 0)) == SUBREG
7193 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7194 && subreg_lowpart_p (XEXP (x, 0))
7195 && HWI_COMPUTABLE_MODE_P (mode)
7196 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7197 & ~GET_MODE_MASK (inner_mode)) == 0)
7198 return SUBREG_REG (XEXP (x, 0));
7200 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7201 is a comparison and STORE_FLAG_VALUE permits. This is like
7202 the first case, but it works even when MODE is larger
7203 than HOST_WIDE_INT. */
7204 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7205 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7206 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7207 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7208 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7209 return XEXP (XEXP (x, 0), 0);
7211 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7212 if (GET_CODE (XEXP (x, 0)) == SUBREG
7213 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7214 && subreg_lowpart_p (XEXP (x, 0))
7215 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7216 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7217 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7218 return SUBREG_REG (XEXP (x, 0));
7222 /* If we reach here, we want to return a pair of shifts. The inner
7223 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7224 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7225 logical depending on the value of UNSIGNEDP.
7227 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7228 converted into an AND of a shift.
7230 We must check for the case where the left shift would have a negative
7231 count. This can happen in a case like (x >> 31) & 255 on machines
7232 that can't shift by a constant. On those machines, we would first
7233 combine the shift with the AND to produce a variable-position
7234 extraction. Then the constant of 31 would be substituted in
7235 to produce such a position. */
7237 modewidth = GET_MODE_PRECISION (mode);
7238 if (modewidth >= pos + len)
7240 tem = gen_lowpart (mode, XEXP (x, 0));
7241 if (!tem || GET_CODE (tem) == CLOBBER)
7242 return x;
7243 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7244 tem, modewidth - pos - len);
7245 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7246 mode, tem, modewidth - len);
7248 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7250 tem = simplify_shift_const (NULL_RTX, LSHIFTRT, inner_mode,
7251 XEXP (x, 0), pos);
7252 tem = gen_lowpart (mode, tem);
7253 if (!tem || GET_CODE (tem) == CLOBBER)
7254 return x;
7255 tem = simplify_and_const_int (NULL_RTX, mode, tem,
7256 (HOST_WIDE_INT_1U << len) - 1);
7258 else
7259 /* Any other cases we can't handle. */
7260 return x;
7262 /* If we couldn't do this for some reason, return the original
7263 expression. */
7264 if (GET_CODE (tem) == CLOBBER)
7265 return x;
7267 return tem;
7270 /* X is a SET which contains an assignment of one object into
7271 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7272 or certain SUBREGS). If possible, convert it into a series of
7273 logical operations.
7275 We half-heartedly support variable positions, but do not at all
7276 support variable lengths. */
7278 static const_rtx
7279 expand_field_assignment (const_rtx x)
7281 rtx inner;
7282 rtx pos; /* Always counts from low bit. */
7283 int len, inner_len;
7284 rtx mask, cleared, masked;
7285 scalar_int_mode compute_mode;
7287 /* Loop until we find something we can't simplify. */
7288 while (1)
7290 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7291 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7293 rtx x0 = XEXP (SET_DEST (x), 0);
7294 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7295 break;
7296 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7297 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7298 MAX_MODE_INT);
7300 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7301 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7303 inner = XEXP (SET_DEST (x), 0);
7304 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7305 break;
7307 len = INTVAL (XEXP (SET_DEST (x), 1));
7308 pos = XEXP (SET_DEST (x), 2);
7310 /* A constant position should stay within the width of INNER. */
7311 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7312 break;
7314 if (BITS_BIG_ENDIAN)
7316 if (CONST_INT_P (pos))
7317 pos = GEN_INT (inner_len - len - INTVAL (pos));
7318 else if (GET_CODE (pos) == MINUS
7319 && CONST_INT_P (XEXP (pos, 1))
7320 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7321 /* If position is ADJUST - X, new position is X. */
7322 pos = XEXP (pos, 0);
7323 else
7324 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7325 gen_int_mode (inner_len - len,
7326 GET_MODE (pos)),
7327 pos);
7331 /* If the destination is a subreg that overwrites the whole of the inner
7332 register, we can move the subreg to the source. */
7333 else if (GET_CODE (SET_DEST (x)) == SUBREG
7334 /* We need SUBREGs to compute nonzero_bits properly. */
7335 && nonzero_sign_valid
7336 && !read_modify_subreg_p (SET_DEST (x)))
7338 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7339 gen_lowpart
7340 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7341 SET_SRC (x)));
7342 continue;
7344 else
7345 break;
7347 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7348 inner = SUBREG_REG (inner);
7350 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7351 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7353 /* Don't do anything for vector or complex integral types. */
7354 if (! FLOAT_MODE_P (GET_MODE (inner)))
7355 break;
7357 /* Try to find an integral mode to pun with. */
7358 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7359 .exists (&compute_mode))
7360 break;
7362 inner = gen_lowpart (compute_mode, inner);
7365 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7366 if (len >= HOST_BITS_PER_WIDE_INT)
7367 break;
7369 /* Don't try to compute in too wide unsupported modes. */
7370 if (!targetm.scalar_mode_supported_p (compute_mode))
7371 break;
7373 /* Now compute the equivalent expression. Make a copy of INNER
7374 for the SET_DEST in case it is a MEM into which we will substitute;
7375 we don't want shared RTL in that case. */
7376 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7377 compute_mode);
7378 cleared = simplify_gen_binary (AND, compute_mode,
7379 simplify_gen_unary (NOT, compute_mode,
7380 simplify_gen_binary (ASHIFT,
7381 compute_mode,
7382 mask, pos),
7383 compute_mode),
7384 inner);
7385 masked = simplify_gen_binary (ASHIFT, compute_mode,
7386 simplify_gen_binary (
7387 AND, compute_mode,
7388 gen_lowpart (compute_mode, SET_SRC (x)),
7389 mask),
7390 pos);
7392 x = gen_rtx_SET (copy_rtx (inner),
7393 simplify_gen_binary (IOR, compute_mode,
7394 cleared, masked));
7397 return x;
7400 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7401 it is an RTX that represents the (variable) starting position; otherwise,
7402 POS is the (constant) starting bit position. Both are counted from the LSB.
7404 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7406 IN_DEST is nonzero if this is a reference in the destination of a SET.
7407 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7408 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7409 be used.
7411 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7412 ZERO_EXTRACT should be built even for bits starting at bit 0.
7414 MODE is the desired mode of the result (if IN_DEST == 0).
7416 The result is an RTX for the extraction or NULL_RTX if the target
7417 can't handle it. */
7419 static rtx
7420 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7421 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7422 int in_dest, int in_compare)
7424 /* This mode describes the size of the storage area
7425 to fetch the overall value from. Within that, we
7426 ignore the POS lowest bits, etc. */
7427 machine_mode is_mode = GET_MODE (inner);
7428 machine_mode inner_mode;
7429 scalar_int_mode wanted_inner_mode;
7430 scalar_int_mode wanted_inner_reg_mode = word_mode;
7431 scalar_int_mode pos_mode = word_mode;
7432 machine_mode extraction_mode = word_mode;
7433 rtx new_rtx = 0;
7434 rtx orig_pos_rtx = pos_rtx;
7435 HOST_WIDE_INT orig_pos;
7437 if (pos_rtx && CONST_INT_P (pos_rtx))
7438 pos = INTVAL (pos_rtx), pos_rtx = 0;
7440 if (GET_CODE (inner) == SUBREG
7441 && subreg_lowpart_p (inner)
7442 && (paradoxical_subreg_p (inner)
7443 /* If trying or potentionally trying to extract
7444 bits outside of is_mode, don't look through
7445 non-paradoxical SUBREGs. See PR82192. */
7446 || (pos_rtx == NULL_RTX
7447 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7449 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7450 consider just the QI as the memory to extract from.
7451 The subreg adds or removes high bits; its mode is
7452 irrelevant to the meaning of this extraction,
7453 since POS and LEN count from the lsb. */
7454 if (MEM_P (SUBREG_REG (inner)))
7455 is_mode = GET_MODE (SUBREG_REG (inner));
7456 inner = SUBREG_REG (inner);
7458 else if (GET_CODE (inner) == ASHIFT
7459 && CONST_INT_P (XEXP (inner, 1))
7460 && pos_rtx == 0 && pos == 0
7461 && len > UINTVAL (XEXP (inner, 1)))
7463 /* We're extracting the least significant bits of an rtx
7464 (ashift X (const_int C)), where LEN > C. Extract the
7465 least significant (LEN - C) bits of X, giving an rtx
7466 whose mode is MODE, then shift it left C times. */
7467 new_rtx = make_extraction (mode, XEXP (inner, 0),
7468 0, 0, len - INTVAL (XEXP (inner, 1)),
7469 unsignedp, in_dest, in_compare);
7470 if (new_rtx != 0)
7471 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7473 else if (GET_CODE (inner) == MULT
7474 && CONST_INT_P (XEXP (inner, 1))
7475 && pos_rtx == 0 && pos == 0)
7477 /* We're extracting the least significant bits of an rtx
7478 (mult X (const_int 2^C)), where LEN > C. Extract the
7479 least significant (LEN - C) bits of X, giving an rtx
7480 whose mode is MODE, then multiply it by 2^C. */
7481 const HOST_WIDE_INT shift_amt = exact_log2 (INTVAL (XEXP (inner, 1)));
7482 if (IN_RANGE (shift_amt, 1, len - 1))
7484 new_rtx = make_extraction (mode, XEXP (inner, 0),
7485 0, 0, len - shift_amt,
7486 unsignedp, in_dest, in_compare);
7487 if (new_rtx)
7488 return gen_rtx_MULT (mode, new_rtx, XEXP (inner, 1));
7491 else if (GET_CODE (inner) == TRUNCATE
7492 /* If trying or potentionally trying to extract
7493 bits outside of is_mode, don't look through
7494 TRUNCATE. See PR82192. */
7495 && pos_rtx == NULL_RTX
7496 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7497 inner = XEXP (inner, 0);
7499 inner_mode = GET_MODE (inner);
7501 /* See if this can be done without an extraction. We never can if the
7502 width of the field is not the same as that of some integer mode. For
7503 registers, we can only avoid the extraction if the position is at the
7504 low-order bit and this is either not in the destination or we have the
7505 appropriate STRICT_LOW_PART operation available.
7507 For MEM, we can avoid an extract if the field starts on an appropriate
7508 boundary and we can change the mode of the memory reference. */
7510 scalar_int_mode tmode;
7511 if (int_mode_for_size (len, 1).exists (&tmode)
7512 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7513 && !MEM_P (inner)
7514 && (pos == 0 || REG_P (inner))
7515 && (inner_mode == tmode
7516 || !REG_P (inner)
7517 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7518 || reg_truncated_to_mode (tmode, inner))
7519 && (! in_dest
7520 || (REG_P (inner)
7521 && have_insn_for (STRICT_LOW_PART, tmode))))
7522 || (MEM_P (inner) && pos_rtx == 0
7523 && (pos
7524 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7525 : BITS_PER_UNIT)) == 0
7526 /* We can't do this if we are widening INNER_MODE (it
7527 may not be aligned, for one thing). */
7528 && !paradoxical_subreg_p (tmode, inner_mode)
7529 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7530 && (inner_mode == tmode
7531 || (! mode_dependent_address_p (XEXP (inner, 0),
7532 MEM_ADDR_SPACE (inner))
7533 && ! MEM_VOLATILE_P (inner))))))
7535 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7536 field. If the original and current mode are the same, we need not
7537 adjust the offset. Otherwise, we do if bytes big endian.
7539 If INNER is not a MEM, get a piece consisting of just the field
7540 of interest (in this case POS % BITS_PER_WORD must be 0). */
7542 if (MEM_P (inner))
7544 poly_int64 offset;
7546 /* POS counts from lsb, but make OFFSET count in memory order. */
7547 if (BYTES_BIG_ENDIAN)
7548 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7549 - len - pos);
7550 else
7551 offset = pos / BITS_PER_UNIT;
7553 new_rtx = adjust_address_nv (inner, tmode, offset);
7555 else if (REG_P (inner))
7557 if (tmode != inner_mode)
7559 /* We can't call gen_lowpart in a DEST since we
7560 always want a SUBREG (see below) and it would sometimes
7561 return a new hard register. */
7562 if (pos || in_dest)
7564 poly_uint64 offset
7565 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7567 /* Avoid creating invalid subregs, for example when
7568 simplifying (x>>32)&255. */
7569 if (!validate_subreg (tmode, inner_mode, inner, offset))
7570 return NULL_RTX;
7572 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7574 else
7575 new_rtx = gen_lowpart (tmode, inner);
7577 else
7578 new_rtx = inner;
7580 else
7581 new_rtx = force_to_mode (inner, tmode,
7582 len >= HOST_BITS_PER_WIDE_INT
7583 ? HOST_WIDE_INT_M1U
7584 : (HOST_WIDE_INT_1U << len) - 1, 0);
7586 /* If this extraction is going into the destination of a SET,
7587 make a STRICT_LOW_PART unless we made a MEM. */
7589 if (in_dest)
7590 return (MEM_P (new_rtx) ? new_rtx
7591 : (GET_CODE (new_rtx) != SUBREG
7592 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7593 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7595 if (mode == tmode)
7596 return new_rtx;
7598 if (CONST_SCALAR_INT_P (new_rtx))
7599 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7600 mode, new_rtx, tmode);
7602 /* If we know that no extraneous bits are set, and that the high
7603 bit is not set, convert the extraction to the cheaper of
7604 sign and zero extension, that are equivalent in these cases. */
7605 if (flag_expensive_optimizations
7606 && (HWI_COMPUTABLE_MODE_P (tmode)
7607 && ((nonzero_bits (new_rtx, tmode)
7608 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7609 == 0)))
7611 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7612 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7614 /* Prefer ZERO_EXTENSION, since it gives more information to
7615 backends. */
7616 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7617 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7618 return temp;
7619 return temp1;
7622 /* Otherwise, sign- or zero-extend unless we already are in the
7623 proper mode. */
7625 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7626 mode, new_rtx));
7629 /* Unless this is a COMPARE or we have a funny memory reference,
7630 don't do anything with zero-extending field extracts starting at
7631 the low-order bit since they are simple AND operations. */
7632 if (pos_rtx == 0 && pos == 0 && ! in_dest
7633 && ! in_compare && unsignedp)
7634 return 0;
7636 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7637 if the position is not a constant and the length is not 1. In all
7638 other cases, we would only be going outside our object in cases when
7639 an original shift would have been undefined. */
7640 if (MEM_P (inner)
7641 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7642 || (pos_rtx != 0 && len != 1)))
7643 return 0;
7645 enum extraction_pattern pattern = (in_dest ? EP_insv
7646 : unsignedp ? EP_extzv : EP_extv);
7648 /* If INNER is not from memory, we want it to have the mode of a register
7649 extraction pattern's structure operand, or word_mode if there is no
7650 such pattern. The same applies to extraction_mode and pos_mode
7651 and their respective operands.
7653 For memory, assume that the desired extraction_mode and pos_mode
7654 are the same as for a register operation, since at present we don't
7655 have named patterns for aligned memory structures. */
7656 class extraction_insn insn;
7657 unsigned int inner_size;
7658 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7659 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7661 wanted_inner_reg_mode = insn.struct_mode.require ();
7662 pos_mode = insn.pos_mode;
7663 extraction_mode = insn.field_mode;
7666 /* Never narrow an object, since that might not be safe. */
7668 if (mode != VOIDmode
7669 && partial_subreg_p (extraction_mode, mode))
7670 extraction_mode = mode;
7672 /* Punt if len is too large for extraction_mode. */
7673 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7674 return NULL_RTX;
7676 if (!MEM_P (inner))
7677 wanted_inner_mode = wanted_inner_reg_mode;
7678 else
7680 /* Be careful not to go beyond the extracted object and maintain the
7681 natural alignment of the memory. */
7682 wanted_inner_mode = smallest_int_mode_for_size (len);
7683 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7684 > GET_MODE_BITSIZE (wanted_inner_mode))
7685 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7688 orig_pos = pos;
7690 if (BITS_BIG_ENDIAN)
7692 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7693 BITS_BIG_ENDIAN style. If position is constant, compute new
7694 position. Otherwise, build subtraction.
7695 Note that POS is relative to the mode of the original argument.
7696 If it's a MEM we need to recompute POS relative to that.
7697 However, if we're extracting from (or inserting into) a register,
7698 we want to recompute POS relative to wanted_inner_mode. */
7699 int width;
7700 if (!MEM_P (inner))
7701 width = GET_MODE_BITSIZE (wanted_inner_mode);
7702 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7703 return NULL_RTX;
7705 if (pos_rtx == 0)
7706 pos = width - len - pos;
7707 else
7708 pos_rtx
7709 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7710 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7711 pos_rtx);
7712 /* POS may be less than 0 now, but we check for that below.
7713 Note that it can only be less than 0 if !MEM_P (inner). */
7716 /* If INNER has a wider mode, and this is a constant extraction, try to
7717 make it smaller and adjust the byte to point to the byte containing
7718 the value. */
7719 if (wanted_inner_mode != VOIDmode
7720 && inner_mode != wanted_inner_mode
7721 && ! pos_rtx
7722 && partial_subreg_p (wanted_inner_mode, is_mode)
7723 && MEM_P (inner)
7724 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7725 && ! MEM_VOLATILE_P (inner))
7727 poly_int64 offset = 0;
7729 /* The computations below will be correct if the machine is big
7730 endian in both bits and bytes or little endian in bits and bytes.
7731 If it is mixed, we must adjust. */
7733 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7734 adjust OFFSET to compensate. */
7735 if (BYTES_BIG_ENDIAN
7736 && paradoxical_subreg_p (is_mode, inner_mode))
7737 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7739 /* We can now move to the desired byte. */
7740 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7741 * GET_MODE_SIZE (wanted_inner_mode);
7742 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7744 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7745 && is_mode != wanted_inner_mode)
7746 offset = (GET_MODE_SIZE (is_mode)
7747 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7749 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7752 /* If INNER is not memory, get it into the proper mode. If we are changing
7753 its mode, POS must be a constant and smaller than the size of the new
7754 mode. */
7755 else if (!MEM_P (inner))
7757 /* On the LHS, don't create paradoxical subregs implicitely truncating
7758 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7759 if (in_dest
7760 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7761 wanted_inner_mode))
7762 return NULL_RTX;
7764 if (GET_MODE (inner) != wanted_inner_mode
7765 && (pos_rtx != 0
7766 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7767 return NULL_RTX;
7769 if (orig_pos < 0)
7770 return NULL_RTX;
7772 inner = force_to_mode (inner, wanted_inner_mode,
7773 pos_rtx
7774 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7775 ? HOST_WIDE_INT_M1U
7776 : (((HOST_WIDE_INT_1U << len) - 1)
7777 << orig_pos),
7781 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7782 have to zero extend. Otherwise, we can just use a SUBREG.
7784 We dealt with constant rtxes earlier, so pos_rtx cannot
7785 have VOIDmode at this point. */
7786 if (pos_rtx != 0
7787 && (GET_MODE_SIZE (pos_mode)
7788 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7790 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7791 GET_MODE (pos_rtx));
7793 /* If we know that no extraneous bits are set, and that the high
7794 bit is not set, convert extraction to cheaper one - either
7795 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7796 cases. */
7797 if (flag_expensive_optimizations
7798 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7799 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7800 & ~(((unsigned HOST_WIDE_INT)
7801 GET_MODE_MASK (GET_MODE (pos_rtx)))
7802 >> 1))
7803 == 0)))
7805 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7806 GET_MODE (pos_rtx));
7808 /* Prefer ZERO_EXTENSION, since it gives more information to
7809 backends. */
7810 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7811 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7812 temp = temp1;
7814 pos_rtx = temp;
7817 /* Make POS_RTX unless we already have it and it is correct. If we don't
7818 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7819 be a CONST_INT. */
7820 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7821 pos_rtx = orig_pos_rtx;
7823 else if (pos_rtx == 0)
7824 pos_rtx = GEN_INT (pos);
7826 /* Make the required operation. See if we can use existing rtx. */
7827 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7828 extraction_mode, inner, GEN_INT (len), pos_rtx);
7829 if (! in_dest)
7830 new_rtx = gen_lowpart (mode, new_rtx);
7832 return new_rtx;
7835 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7836 can be commuted with any other operations in X. Return X without
7837 that shift if so. */
7839 static rtx
7840 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7842 enum rtx_code code = GET_CODE (x);
7843 rtx tem;
7845 switch (code)
7847 case ASHIFT:
7848 /* This is the shift itself. If it is wide enough, we will return
7849 either the value being shifted if the shift count is equal to
7850 COUNT or a shift for the difference. */
7851 if (CONST_INT_P (XEXP (x, 1))
7852 && INTVAL (XEXP (x, 1)) >= count)
7853 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7854 INTVAL (XEXP (x, 1)) - count);
7855 break;
7857 case NEG: case NOT:
7858 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7859 return simplify_gen_unary (code, mode, tem, mode);
7861 break;
7863 case PLUS: case IOR: case XOR: case AND:
7864 /* If we can safely shift this constant and we find the inner shift,
7865 make a new operation. */
7866 if (CONST_INT_P (XEXP (x, 1))
7867 && (UINTVAL (XEXP (x, 1))
7868 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7869 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7871 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7872 return simplify_gen_binary (code, mode, tem,
7873 gen_int_mode (val, mode));
7875 break;
7877 default:
7878 break;
7881 return 0;
7884 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7885 level of the expression and MODE is its mode. IN_CODE is as for
7886 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7887 that should be used when recursing on operands of *X_PTR.
7889 There are two possible actions:
7891 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7892 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7894 - Return a new rtx, which the caller returns directly. */
7896 static rtx
7897 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7898 enum rtx_code in_code,
7899 enum rtx_code *next_code_ptr)
7901 rtx x = *x_ptr;
7902 enum rtx_code next_code = *next_code_ptr;
7903 enum rtx_code code = GET_CODE (x);
7904 int mode_width = GET_MODE_PRECISION (mode);
7905 rtx rhs, lhs;
7906 rtx new_rtx = 0;
7907 int i;
7908 rtx tem;
7909 scalar_int_mode inner_mode;
7910 bool equality_comparison = false;
7912 if (in_code == EQ)
7914 equality_comparison = true;
7915 in_code = COMPARE;
7918 /* Process depending on the code of this operation. If NEW is set
7919 nonzero, it will be returned. */
7921 switch (code)
7923 case ASHIFT:
7924 /* Convert shifts by constants into multiplications if inside
7925 an address. */
7926 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7927 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7928 && INTVAL (XEXP (x, 1)) >= 0)
7930 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7931 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7933 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7934 if (GET_CODE (new_rtx) == NEG)
7936 new_rtx = XEXP (new_rtx, 0);
7937 multval = -multval;
7939 multval = trunc_int_for_mode (multval, mode);
7940 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7942 break;
7944 case PLUS:
7945 lhs = XEXP (x, 0);
7946 rhs = XEXP (x, 1);
7947 lhs = make_compound_operation (lhs, next_code);
7948 rhs = make_compound_operation (rhs, next_code);
7949 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7951 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7952 XEXP (lhs, 1));
7953 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7955 else if (GET_CODE (lhs) == MULT
7956 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7958 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7959 simplify_gen_unary (NEG, mode,
7960 XEXP (lhs, 1),
7961 mode));
7962 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7964 else
7966 SUBST (XEXP (x, 0), lhs);
7967 SUBST (XEXP (x, 1), rhs);
7969 maybe_swap_commutative_operands (x);
7970 return x;
7972 case MINUS:
7973 lhs = XEXP (x, 0);
7974 rhs = XEXP (x, 1);
7975 lhs = make_compound_operation (lhs, next_code);
7976 rhs = make_compound_operation (rhs, next_code);
7977 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7979 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7980 XEXP (rhs, 1));
7981 return simplify_gen_binary (PLUS, mode, tem, lhs);
7983 else if (GET_CODE (rhs) == MULT
7984 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7986 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7987 simplify_gen_unary (NEG, mode,
7988 XEXP (rhs, 1),
7989 mode));
7990 return simplify_gen_binary (PLUS, mode, tem, lhs);
7992 else
7994 SUBST (XEXP (x, 0), lhs);
7995 SUBST (XEXP (x, 1), rhs);
7996 return x;
7999 case AND:
8000 /* If the second operand is not a constant, we can't do anything
8001 with it. */
8002 if (!CONST_INT_P (XEXP (x, 1)))
8003 break;
8005 /* If the constant is a power of two minus one and the first operand
8006 is a logical right shift, make an extraction. */
8007 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8008 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8010 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8011 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8012 i, 1, 0, in_code == COMPARE);
8015 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8016 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8017 && subreg_lowpart_p (XEXP (x, 0))
8018 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8019 &inner_mode)
8020 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8021 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8023 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8024 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8025 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8026 XEXP (inner_x0, 1),
8027 i, 1, 0, in_code == COMPARE);
8029 /* If we narrowed the mode when dropping the subreg, then we lose. */
8030 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8031 new_rtx = NULL;
8033 /* If that didn't give anything, see if the AND simplifies on
8034 its own. */
8035 if (!new_rtx && i >= 0)
8037 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8038 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8039 0, in_code == COMPARE);
8042 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8043 else if ((GET_CODE (XEXP (x, 0)) == XOR
8044 || GET_CODE (XEXP (x, 0)) == IOR)
8045 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8046 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8047 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8049 /* Apply the distributive law, and then try to make extractions. */
8050 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8051 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8052 XEXP (x, 1)),
8053 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8054 XEXP (x, 1)));
8055 new_rtx = make_compound_operation (new_rtx, in_code);
8058 /* If we are have (and (rotate X C) M) and C is larger than the number
8059 of bits in M, this is an extraction. */
8061 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8062 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8063 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8064 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8066 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8067 new_rtx = make_extraction (mode, new_rtx,
8068 (GET_MODE_PRECISION (mode)
8069 - INTVAL (XEXP (XEXP (x, 0), 1))),
8070 NULL_RTX, i, 1, 0, in_code == COMPARE);
8073 /* On machines without logical shifts, if the operand of the AND is
8074 a logical shift and our mask turns off all the propagated sign
8075 bits, we can replace the logical shift with an arithmetic shift. */
8076 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8077 && !have_insn_for (LSHIFTRT, mode)
8078 && have_insn_for (ASHIFTRT, mode)
8079 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8080 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8081 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8082 && mode_width <= HOST_BITS_PER_WIDE_INT)
8084 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8086 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8087 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8088 SUBST (XEXP (x, 0),
8089 gen_rtx_ASHIFTRT (mode,
8090 make_compound_operation (XEXP (XEXP (x,
8093 next_code),
8094 XEXP (XEXP (x, 0), 1)));
8097 /* If the constant is one less than a power of two, this might be
8098 representable by an extraction even if no shift is present.
8099 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8100 we are in a COMPARE. */
8101 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8102 new_rtx = make_extraction (mode,
8103 make_compound_operation (XEXP (x, 0),
8104 next_code),
8105 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8107 /* If we are in a comparison and this is an AND with a power of two,
8108 convert this into the appropriate bit extract. */
8109 else if (in_code == COMPARE
8110 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8111 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8112 new_rtx = make_extraction (mode,
8113 make_compound_operation (XEXP (x, 0),
8114 next_code),
8115 i, NULL_RTX, 1, 1, 0, 1);
8117 /* If the one operand is a paradoxical subreg of a register or memory and
8118 the constant (limited to the smaller mode) has only zero bits where
8119 the sub expression has known zero bits, this can be expressed as
8120 a zero_extend. */
8121 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8123 rtx sub;
8125 sub = XEXP (XEXP (x, 0), 0);
8126 machine_mode sub_mode = GET_MODE (sub);
8127 int sub_width;
8128 if ((REG_P (sub) || MEM_P (sub))
8129 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8130 && sub_width < mode_width)
8132 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8133 unsigned HOST_WIDE_INT mask;
8135 /* original AND constant with all the known zero bits set */
8136 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8137 if ((mask & mode_mask) == mode_mask)
8139 new_rtx = make_compound_operation (sub, next_code);
8140 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8141 1, 0, in_code == COMPARE);
8146 break;
8148 case LSHIFTRT:
8149 /* If the sign bit is known to be zero, replace this with an
8150 arithmetic shift. */
8151 if (have_insn_for (ASHIFTRT, mode)
8152 && ! have_insn_for (LSHIFTRT, mode)
8153 && mode_width <= HOST_BITS_PER_WIDE_INT
8154 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8156 new_rtx = gen_rtx_ASHIFTRT (mode,
8157 make_compound_operation (XEXP (x, 0),
8158 next_code),
8159 XEXP (x, 1));
8160 break;
8163 /* fall through */
8165 case ASHIFTRT:
8166 lhs = XEXP (x, 0);
8167 rhs = XEXP (x, 1);
8169 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8170 this is a SIGN_EXTRACT. */
8171 if (CONST_INT_P (rhs)
8172 && GET_CODE (lhs) == ASHIFT
8173 && CONST_INT_P (XEXP (lhs, 1))
8174 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8175 && INTVAL (XEXP (lhs, 1)) >= 0
8176 && INTVAL (rhs) < mode_width)
8178 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8179 new_rtx = make_extraction (mode, new_rtx,
8180 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8181 NULL_RTX, mode_width - INTVAL (rhs),
8182 code == LSHIFTRT, 0, in_code == COMPARE);
8183 break;
8186 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8187 If so, try to merge the shifts into a SIGN_EXTEND. We could
8188 also do this for some cases of SIGN_EXTRACT, but it doesn't
8189 seem worth the effort; the case checked for occurs on Alpha. */
8191 if (!OBJECT_P (lhs)
8192 && ! (GET_CODE (lhs) == SUBREG
8193 && (OBJECT_P (SUBREG_REG (lhs))))
8194 && CONST_INT_P (rhs)
8195 && INTVAL (rhs) >= 0
8196 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8197 && INTVAL (rhs) < mode_width
8198 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8199 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8200 next_code),
8201 0, NULL_RTX, mode_width - INTVAL (rhs),
8202 code == LSHIFTRT, 0, in_code == COMPARE);
8204 break;
8206 case SUBREG:
8207 /* Call ourselves recursively on the inner expression. If we are
8208 narrowing the object and it has a different RTL code from
8209 what it originally did, do this SUBREG as a force_to_mode. */
8211 rtx inner = SUBREG_REG (x), simplified;
8212 enum rtx_code subreg_code = in_code;
8214 /* If the SUBREG is masking of a logical right shift,
8215 make an extraction. */
8216 if (GET_CODE (inner) == LSHIFTRT
8217 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8218 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8219 && CONST_INT_P (XEXP (inner, 1))
8220 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8221 && subreg_lowpart_p (x))
8223 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8224 int width = GET_MODE_PRECISION (inner_mode)
8225 - INTVAL (XEXP (inner, 1));
8226 if (width > mode_width)
8227 width = mode_width;
8228 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8229 width, 1, 0, in_code == COMPARE);
8230 break;
8233 /* If in_code is COMPARE, it isn't always safe to pass it through
8234 to the recursive make_compound_operation call. */
8235 if (subreg_code == COMPARE
8236 && (!subreg_lowpart_p (x)
8237 || GET_CODE (inner) == SUBREG
8238 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8239 is (const_int 0), rather than
8240 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8241 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8242 for non-equality comparisons against 0 is not equivalent
8243 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8244 || (GET_CODE (inner) == AND
8245 && CONST_INT_P (XEXP (inner, 1))
8246 && partial_subreg_p (x)
8247 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8248 >= GET_MODE_BITSIZE (mode) - 1)))
8249 subreg_code = SET;
8251 tem = make_compound_operation (inner, subreg_code);
8253 simplified
8254 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8255 if (simplified)
8256 tem = simplified;
8258 if (GET_CODE (tem) != GET_CODE (inner)
8259 && partial_subreg_p (x)
8260 && subreg_lowpart_p (x))
8262 rtx newer
8263 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8265 /* If we have something other than a SUBREG, we might have
8266 done an expansion, so rerun ourselves. */
8267 if (GET_CODE (newer) != SUBREG)
8268 newer = make_compound_operation (newer, in_code);
8270 /* force_to_mode can expand compounds. If it just re-expanded
8271 the compound, use gen_lowpart to convert to the desired
8272 mode. */
8273 if (rtx_equal_p (newer, x)
8274 /* Likewise if it re-expanded the compound only partially.
8275 This happens for SUBREG of ZERO_EXTRACT if they extract
8276 the same number of bits. */
8277 || (GET_CODE (newer) == SUBREG
8278 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8279 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8280 && GET_CODE (inner) == AND
8281 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8282 return gen_lowpart (GET_MODE (x), tem);
8284 return newer;
8287 if (simplified)
8288 return tem;
8290 break;
8292 default:
8293 break;
8296 if (new_rtx)
8297 *x_ptr = gen_lowpart (mode, new_rtx);
8298 *next_code_ptr = next_code;
8299 return NULL_RTX;
8302 /* Look at the expression rooted at X. Look for expressions
8303 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8304 Form these expressions.
8306 Return the new rtx, usually just X.
8308 Also, for machines like the VAX that don't have logical shift insns,
8309 try to convert logical to arithmetic shift operations in cases where
8310 they are equivalent. This undoes the canonicalizations to logical
8311 shifts done elsewhere.
8313 We try, as much as possible, to re-use rtl expressions to save memory.
8315 IN_CODE says what kind of expression we are processing. Normally, it is
8316 SET. In a memory address it is MEM. When processing the arguments of
8317 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8318 precisely it is an equality comparison against zero. */
8321 make_compound_operation (rtx x, enum rtx_code in_code)
8323 enum rtx_code code = GET_CODE (x);
8324 const char *fmt;
8325 int i, j;
8326 enum rtx_code next_code;
8327 rtx new_rtx, tem;
8329 /* Select the code to be used in recursive calls. Once we are inside an
8330 address, we stay there. If we have a comparison, set to COMPARE,
8331 but once inside, go back to our default of SET. */
8333 next_code = (code == MEM ? MEM
8334 : ((code == COMPARE || COMPARISON_P (x))
8335 && XEXP (x, 1) == const0_rtx) ? COMPARE
8336 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8338 scalar_int_mode mode;
8339 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8341 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8342 &next_code);
8343 if (new_rtx)
8344 return new_rtx;
8345 code = GET_CODE (x);
8348 /* Now recursively process each operand of this operation. We need to
8349 handle ZERO_EXTEND specially so that we don't lose track of the
8350 inner mode. */
8351 if (code == ZERO_EXTEND)
8353 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8354 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8355 new_rtx, GET_MODE (XEXP (x, 0)));
8356 if (tem)
8357 return tem;
8358 SUBST (XEXP (x, 0), new_rtx);
8359 return x;
8362 fmt = GET_RTX_FORMAT (code);
8363 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8364 if (fmt[i] == 'e')
8366 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8367 SUBST (XEXP (x, i), new_rtx);
8369 else if (fmt[i] == 'E')
8370 for (j = 0; j < XVECLEN (x, i); j++)
8372 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8373 SUBST (XVECEXP (x, i, j), new_rtx);
8376 maybe_swap_commutative_operands (x);
8377 return x;
8380 /* Given M see if it is a value that would select a field of bits
8381 within an item, but not the entire word. Return -1 if not.
8382 Otherwise, return the starting position of the field, where 0 is the
8383 low-order bit.
8385 *PLEN is set to the length of the field. */
8387 static int
8388 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8390 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8391 int pos = m ? ctz_hwi (m) : -1;
8392 int len = 0;
8394 if (pos >= 0)
8395 /* Now shift off the low-order zero bits and see if we have a
8396 power of two minus 1. */
8397 len = exact_log2 ((m >> pos) + 1);
8399 if (len <= 0)
8400 pos = -1;
8402 *plen = len;
8403 return pos;
8406 /* If X refers to a register that equals REG in value, replace these
8407 references with REG. */
8408 static rtx
8409 canon_reg_for_combine (rtx x, rtx reg)
8411 rtx op0, op1, op2;
8412 const char *fmt;
8413 int i;
8414 bool copied;
8416 enum rtx_code code = GET_CODE (x);
8417 switch (GET_RTX_CLASS (code))
8419 case RTX_UNARY:
8420 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8421 if (op0 != XEXP (x, 0))
8422 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8423 GET_MODE (reg));
8424 break;
8426 case RTX_BIN_ARITH:
8427 case RTX_COMM_ARITH:
8428 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8429 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8430 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8431 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8432 break;
8434 case RTX_COMPARE:
8435 case RTX_COMM_COMPARE:
8436 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8437 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8438 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8439 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8440 GET_MODE (op0), op0, op1);
8441 break;
8443 case RTX_TERNARY:
8444 case RTX_BITFIELD_OPS:
8445 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8446 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8447 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8448 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8449 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8450 GET_MODE (op0), op0, op1, op2);
8451 /* FALLTHRU */
8453 case RTX_OBJ:
8454 if (REG_P (x))
8456 if (rtx_equal_p (get_last_value (reg), x)
8457 || rtx_equal_p (reg, get_last_value (x)))
8458 return reg;
8459 else
8460 break;
8463 /* fall through */
8465 default:
8466 fmt = GET_RTX_FORMAT (code);
8467 copied = false;
8468 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8469 if (fmt[i] == 'e')
8471 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8472 if (op != XEXP (x, i))
8474 if (!copied)
8476 copied = true;
8477 x = copy_rtx (x);
8479 XEXP (x, i) = op;
8482 else if (fmt[i] == 'E')
8484 int j;
8485 for (j = 0; j < XVECLEN (x, i); j++)
8487 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8488 if (op != XVECEXP (x, i, j))
8490 if (!copied)
8492 copied = true;
8493 x = copy_rtx (x);
8495 XVECEXP (x, i, j) = op;
8500 break;
8503 return x;
8506 /* Return X converted to MODE. If the value is already truncated to
8507 MODE we can just return a subreg even though in the general case we
8508 would need an explicit truncation. */
8510 static rtx
8511 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8513 if (!CONST_INT_P (x)
8514 && partial_subreg_p (mode, GET_MODE (x))
8515 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8516 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8518 /* Bit-cast X into an integer mode. */
8519 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8520 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8521 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8522 x, GET_MODE (x));
8525 return gen_lowpart (mode, x);
8528 /* See if X can be simplified knowing that we will only refer to it in
8529 MODE and will only refer to those bits that are nonzero in MASK.
8530 If other bits are being computed or if masking operations are done
8531 that select a superset of the bits in MASK, they can sometimes be
8532 ignored.
8534 Return a possibly simplified expression, but always convert X to
8535 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8537 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8538 are all off in X. This is used when X will be complemented, by either
8539 NOT, NEG, or XOR. */
8541 static rtx
8542 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8543 int just_select)
8545 enum rtx_code code = GET_CODE (x);
8546 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8547 machine_mode op_mode;
8548 unsigned HOST_WIDE_INT nonzero;
8550 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8551 code below will do the wrong thing since the mode of such an
8552 expression is VOIDmode.
8554 Also do nothing if X is a CLOBBER; this can happen if X was
8555 the return value from a call to gen_lowpart. */
8556 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8557 return x;
8559 /* We want to perform the operation in its present mode unless we know
8560 that the operation is valid in MODE, in which case we do the operation
8561 in MODE. */
8562 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8563 && have_insn_for (code, mode))
8564 ? mode : GET_MODE (x));
8566 /* It is not valid to do a right-shift in a narrower mode
8567 than the one it came in with. */
8568 if ((code == LSHIFTRT || code == ASHIFTRT)
8569 && partial_subreg_p (mode, GET_MODE (x)))
8570 op_mode = GET_MODE (x);
8572 /* Truncate MASK to fit OP_MODE. */
8573 if (op_mode)
8574 mask &= GET_MODE_MASK (op_mode);
8576 /* Determine what bits of X are guaranteed to be (non)zero. */
8577 nonzero = nonzero_bits (x, mode);
8579 /* If none of the bits in X are needed, return a zero. */
8580 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8581 x = const0_rtx;
8583 /* If X is a CONST_INT, return a new one. Do this here since the
8584 test below will fail. */
8585 if (CONST_INT_P (x))
8587 if (SCALAR_INT_MODE_P (mode))
8588 return gen_int_mode (INTVAL (x) & mask, mode);
8589 else
8591 x = GEN_INT (INTVAL (x) & mask);
8592 return gen_lowpart_common (mode, x);
8596 /* If X is narrower than MODE and we want all the bits in X's mode, just
8597 get X in the proper mode. */
8598 if (paradoxical_subreg_p (mode, GET_MODE (x))
8599 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8600 return gen_lowpart (mode, x);
8602 /* We can ignore the effect of a SUBREG if it narrows the mode or
8603 if the constant masks to zero all the bits the mode doesn't have. */
8604 if (GET_CODE (x) == SUBREG
8605 && subreg_lowpart_p (x)
8606 && (partial_subreg_p (x)
8607 || (mask
8608 & GET_MODE_MASK (GET_MODE (x))
8609 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8610 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8612 scalar_int_mode int_mode, xmode;
8613 if (is_a <scalar_int_mode> (mode, &int_mode)
8614 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8615 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8616 integer too. */
8617 return force_int_to_mode (x, int_mode, xmode,
8618 as_a <scalar_int_mode> (op_mode),
8619 mask, just_select);
8621 return gen_lowpart_or_truncate (mode, x);
8624 /* Subroutine of force_to_mode that handles cases in which both X and
8625 the result are scalar integers. MODE is the mode of the result,
8626 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8627 is preferred for simplified versions of X. The other arguments
8628 are as for force_to_mode. */
8630 static rtx
8631 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8632 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8633 int just_select)
8635 enum rtx_code code = GET_CODE (x);
8636 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8637 unsigned HOST_WIDE_INT fuller_mask;
8638 rtx op0, op1, temp;
8639 poly_int64 const_op0;
8641 /* When we have an arithmetic operation, or a shift whose count we
8642 do not know, we need to assume that all bits up to the highest-order
8643 bit in MASK will be needed. This is how we form such a mask. */
8644 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8645 fuller_mask = HOST_WIDE_INT_M1U;
8646 else
8647 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8648 - 1);
8650 switch (code)
8652 case CLOBBER:
8653 /* If X is a (clobber (const_int)), return it since we know we are
8654 generating something that won't match. */
8655 return x;
8657 case SIGN_EXTEND:
8658 case ZERO_EXTEND:
8659 case ZERO_EXTRACT:
8660 case SIGN_EXTRACT:
8661 x = expand_compound_operation (x);
8662 if (GET_CODE (x) != code)
8663 return force_to_mode (x, mode, mask, next_select);
8664 break;
8666 case TRUNCATE:
8667 /* Similarly for a truncate. */
8668 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8670 case AND:
8671 /* If this is an AND with a constant, convert it into an AND
8672 whose constant is the AND of that constant with MASK. If it
8673 remains an AND of MASK, delete it since it is redundant. */
8675 if (CONST_INT_P (XEXP (x, 1)))
8677 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8678 mask & INTVAL (XEXP (x, 1)));
8679 xmode = op_mode;
8681 /* If X is still an AND, see if it is an AND with a mask that
8682 is just some low-order bits. If so, and it is MASK, we don't
8683 need it. */
8685 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8686 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8687 x = XEXP (x, 0);
8689 /* If it remains an AND, try making another AND with the bits
8690 in the mode mask that aren't in MASK turned on. If the
8691 constant in the AND is wide enough, this might make a
8692 cheaper constant. */
8694 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8695 && GET_MODE_MASK (xmode) != mask
8696 && HWI_COMPUTABLE_MODE_P (xmode))
8698 unsigned HOST_WIDE_INT cval
8699 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8700 rtx y;
8702 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8703 gen_int_mode (cval, xmode));
8704 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8705 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8706 x = y;
8709 break;
8712 goto binop;
8714 case PLUS:
8715 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8716 low-order bits (as in an alignment operation) and FOO is already
8717 aligned to that boundary, mask C1 to that boundary as well.
8718 This may eliminate that PLUS and, later, the AND. */
8721 unsigned int width = GET_MODE_PRECISION (mode);
8722 unsigned HOST_WIDE_INT smask = mask;
8724 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8725 number, sign extend it. */
8727 if (width < HOST_BITS_PER_WIDE_INT
8728 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8729 smask |= HOST_WIDE_INT_M1U << width;
8731 if (CONST_INT_P (XEXP (x, 1))
8732 && pow2p_hwi (- smask)
8733 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8734 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8735 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8736 (INTVAL (XEXP (x, 1)) & smask)),
8737 mode, smask, next_select);
8740 /* fall through */
8742 case MULT:
8743 /* Substituting into the operands of a widening MULT is not likely to
8744 create RTL matching a machine insn. */
8745 if (code == MULT
8746 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8747 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8748 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8749 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8750 && REG_P (XEXP (XEXP (x, 0), 0))
8751 && REG_P (XEXP (XEXP (x, 1), 0)))
8752 return gen_lowpart_or_truncate (mode, x);
8754 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8755 most significant bit in MASK since carries from those bits will
8756 affect the bits we are interested in. */
8757 mask = fuller_mask;
8758 goto binop;
8760 case MINUS:
8761 /* If X is (minus C Y) where C's least set bit is larger than any bit
8762 in the mask, then we may replace with (neg Y). */
8763 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8764 && known_alignment (poly_uint64 (const_op0)) > mask)
8766 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8767 return force_to_mode (x, mode, mask, next_select);
8770 /* Similarly, if C contains every bit in the fuller_mask, then we may
8771 replace with (not Y). */
8772 if (CONST_INT_P (XEXP (x, 0))
8773 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8775 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8776 return force_to_mode (x, mode, mask, next_select);
8779 mask = fuller_mask;
8780 goto binop;
8782 case IOR:
8783 case XOR:
8784 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8785 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8786 operation which may be a bitfield extraction. Ensure that the
8787 constant we form is not wider than the mode of X. */
8789 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8790 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8791 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8792 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8793 && CONST_INT_P (XEXP (x, 1))
8794 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8795 + floor_log2 (INTVAL (XEXP (x, 1))))
8796 < GET_MODE_PRECISION (xmode))
8797 && (UINTVAL (XEXP (x, 1))
8798 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8800 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8801 << INTVAL (XEXP (XEXP (x, 0), 1)),
8802 xmode);
8803 temp = simplify_gen_binary (GET_CODE (x), xmode,
8804 XEXP (XEXP (x, 0), 0), temp);
8805 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8806 XEXP (XEXP (x, 0), 1));
8807 return force_to_mode (x, mode, mask, next_select);
8810 binop:
8811 /* For most binary operations, just propagate into the operation and
8812 change the mode if we have an operation of that mode. */
8814 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8815 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8817 /* If we ended up truncating both operands, truncate the result of the
8818 operation instead. */
8819 if (GET_CODE (op0) == TRUNCATE
8820 && GET_CODE (op1) == TRUNCATE)
8822 op0 = XEXP (op0, 0);
8823 op1 = XEXP (op1, 0);
8826 op0 = gen_lowpart_or_truncate (op_mode, op0);
8827 op1 = gen_lowpart_or_truncate (op_mode, op1);
8829 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8831 x = simplify_gen_binary (code, op_mode, op0, op1);
8832 xmode = op_mode;
8834 break;
8836 case ASHIFT:
8837 /* For left shifts, do the same, but just for the first operand.
8838 However, we cannot do anything with shifts where we cannot
8839 guarantee that the counts are smaller than the size of the mode
8840 because such a count will have a different meaning in a
8841 wider mode. */
8843 if (! (CONST_INT_P (XEXP (x, 1))
8844 && INTVAL (XEXP (x, 1)) >= 0
8845 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8846 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8847 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8848 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8849 break;
8851 /* If the shift count is a constant and we can do arithmetic in
8852 the mode of the shift, refine which bits we need. Otherwise, use the
8853 conservative form of the mask. */
8854 if (CONST_INT_P (XEXP (x, 1))
8855 && INTVAL (XEXP (x, 1)) >= 0
8856 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8857 && HWI_COMPUTABLE_MODE_P (op_mode))
8858 mask >>= INTVAL (XEXP (x, 1));
8859 else
8860 mask = fuller_mask;
8862 op0 = gen_lowpart_or_truncate (op_mode,
8863 force_to_mode (XEXP (x, 0), mode,
8864 mask, next_select));
8866 if (op_mode != xmode || op0 != XEXP (x, 0))
8868 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8869 xmode = op_mode;
8871 break;
8873 case LSHIFTRT:
8874 /* Here we can only do something if the shift count is a constant,
8875 this shift constant is valid for the host, and we can do arithmetic
8876 in OP_MODE. */
8878 if (CONST_INT_P (XEXP (x, 1))
8879 && INTVAL (XEXP (x, 1)) >= 0
8880 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8881 && HWI_COMPUTABLE_MODE_P (op_mode))
8883 rtx inner = XEXP (x, 0);
8884 unsigned HOST_WIDE_INT inner_mask;
8886 /* Select the mask of the bits we need for the shift operand. */
8887 inner_mask = mask << INTVAL (XEXP (x, 1));
8889 /* We can only change the mode of the shift if we can do arithmetic
8890 in the mode of the shift and INNER_MASK is no wider than the
8891 width of X's mode. */
8892 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8893 op_mode = xmode;
8895 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8897 if (xmode != op_mode || inner != XEXP (x, 0))
8899 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8900 xmode = op_mode;
8904 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8905 shift and AND produces only copies of the sign bit (C2 is one less
8906 than a power of two), we can do this with just a shift. */
8908 if (GET_CODE (x) == LSHIFTRT
8909 && CONST_INT_P (XEXP (x, 1))
8910 /* The shift puts one of the sign bit copies in the least significant
8911 bit. */
8912 && ((INTVAL (XEXP (x, 1))
8913 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8914 >= GET_MODE_PRECISION (xmode))
8915 && pow2p_hwi (mask + 1)
8916 /* Number of bits left after the shift must be more than the mask
8917 needs. */
8918 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8919 <= GET_MODE_PRECISION (xmode))
8920 /* Must be more sign bit copies than the mask needs. */
8921 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8922 >= exact_log2 (mask + 1)))
8924 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
8925 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8926 gen_int_shift_amount (xmode, nbits));
8928 goto shiftrt;
8930 case ASHIFTRT:
8931 /* If we are just looking for the sign bit, we don't need this shift at
8932 all, even if it has a variable count. */
8933 if (val_signbit_p (xmode, mask))
8934 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8936 /* If this is a shift by a constant, get a mask that contains those bits
8937 that are not copies of the sign bit. We then have two cases: If
8938 MASK only includes those bits, this can be a logical shift, which may
8939 allow simplifications. If MASK is a single-bit field not within
8940 those bits, we are requesting a copy of the sign bit and hence can
8941 shift the sign bit to the appropriate location. */
8943 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8944 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8946 unsigned HOST_WIDE_INT nonzero;
8947 int i;
8949 /* If the considered data is wider than HOST_WIDE_INT, we can't
8950 represent a mask for all its bits in a single scalar.
8951 But we only care about the lower bits, so calculate these. */
8953 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8955 nonzero = HOST_WIDE_INT_M1U;
8957 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8958 is the number of bits a full-width mask would have set.
8959 We need only shift if these are fewer than nonzero can
8960 hold. If not, we must keep all bits set in nonzero. */
8962 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8963 < HOST_BITS_PER_WIDE_INT)
8964 nonzero >>= INTVAL (XEXP (x, 1))
8965 + HOST_BITS_PER_WIDE_INT
8966 - GET_MODE_PRECISION (xmode);
8968 else
8970 nonzero = GET_MODE_MASK (xmode);
8971 nonzero >>= INTVAL (XEXP (x, 1));
8974 if ((mask & ~nonzero) == 0)
8976 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8977 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8978 if (GET_CODE (x) != ASHIFTRT)
8979 return force_to_mode (x, mode, mask, next_select);
8982 else if ((i = exact_log2 (mask)) >= 0)
8984 x = simplify_shift_const
8985 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8986 GET_MODE_PRECISION (xmode) - 1 - i);
8988 if (GET_CODE (x) != ASHIFTRT)
8989 return force_to_mode (x, mode, mask, next_select);
8993 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8994 even if the shift count isn't a constant. */
8995 if (mask == 1)
8996 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
8998 shiftrt:
9000 /* If this is a zero- or sign-extension operation that just affects bits
9001 we don't care about, remove it. Be sure the call above returned
9002 something that is still a shift. */
9004 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9005 && CONST_INT_P (XEXP (x, 1))
9006 && INTVAL (XEXP (x, 1)) >= 0
9007 && (INTVAL (XEXP (x, 1))
9008 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9009 && GET_CODE (XEXP (x, 0)) == ASHIFT
9010 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9011 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9012 next_select);
9014 break;
9016 case ROTATE:
9017 case ROTATERT:
9018 /* If the shift count is constant and we can do computations
9019 in the mode of X, compute where the bits we care about are.
9020 Otherwise, we can't do anything. Don't change the mode of
9021 the shift or propagate MODE into the shift, though. */
9022 if (CONST_INT_P (XEXP (x, 1))
9023 && INTVAL (XEXP (x, 1)) >= 0)
9025 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9026 xmode, gen_int_mode (mask, xmode),
9027 XEXP (x, 1));
9028 if (temp && CONST_INT_P (temp))
9029 x = simplify_gen_binary (code, xmode,
9030 force_to_mode (XEXP (x, 0), xmode,
9031 INTVAL (temp), next_select),
9032 XEXP (x, 1));
9034 break;
9036 case NEG:
9037 /* If we just want the low-order bit, the NEG isn't needed since it
9038 won't change the low-order bit. */
9039 if (mask == 1)
9040 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9042 /* We need any bits less significant than the most significant bit in
9043 MASK since carries from those bits will affect the bits we are
9044 interested in. */
9045 mask = fuller_mask;
9046 goto unop;
9048 case NOT:
9049 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9050 same as the XOR case above. Ensure that the constant we form is not
9051 wider than the mode of X. */
9053 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9054 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9055 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9056 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9057 < GET_MODE_PRECISION (xmode))
9058 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9060 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9061 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9062 x = simplify_gen_binary (LSHIFTRT, xmode,
9063 temp, XEXP (XEXP (x, 0), 1));
9065 return force_to_mode (x, mode, mask, next_select);
9068 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9069 use the full mask inside the NOT. */
9070 mask = fuller_mask;
9072 unop:
9073 op0 = gen_lowpart_or_truncate (op_mode,
9074 force_to_mode (XEXP (x, 0), mode, mask,
9075 next_select));
9076 if (op_mode != xmode || op0 != XEXP (x, 0))
9078 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9079 xmode = op_mode;
9081 break;
9083 case NE:
9084 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9085 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9086 which is equal to STORE_FLAG_VALUE. */
9087 if ((mask & ~STORE_FLAG_VALUE) == 0
9088 && XEXP (x, 1) == const0_rtx
9089 && GET_MODE (XEXP (x, 0)) == mode
9090 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9091 && (nonzero_bits (XEXP (x, 0), mode)
9092 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9093 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9095 break;
9097 case IF_THEN_ELSE:
9098 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9099 written in a narrower mode. We play it safe and do not do so. */
9101 op0 = gen_lowpart_or_truncate (xmode,
9102 force_to_mode (XEXP (x, 1), mode,
9103 mask, next_select));
9104 op1 = gen_lowpart_or_truncate (xmode,
9105 force_to_mode (XEXP (x, 2), mode,
9106 mask, next_select));
9107 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9108 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9109 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9110 op0, op1);
9111 break;
9113 default:
9114 break;
9117 /* Ensure we return a value of the proper mode. */
9118 return gen_lowpart_or_truncate (mode, x);
9121 /* Return nonzero if X is an expression that has one of two values depending on
9122 whether some other value is zero or nonzero. In that case, we return the
9123 value that is being tested, *PTRUE is set to the value if the rtx being
9124 returned has a nonzero value, and *PFALSE is set to the other alternative.
9126 If we return zero, we set *PTRUE and *PFALSE to X. */
9128 static rtx
9129 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9131 machine_mode mode = GET_MODE (x);
9132 enum rtx_code code = GET_CODE (x);
9133 rtx cond0, cond1, true0, true1, false0, false1;
9134 unsigned HOST_WIDE_INT nz;
9135 scalar_int_mode int_mode;
9137 /* If we are comparing a value against zero, we are done. */
9138 if ((code == NE || code == EQ)
9139 && XEXP (x, 1) == const0_rtx)
9141 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9142 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9143 return XEXP (x, 0);
9146 /* If this is a unary operation whose operand has one of two values, apply
9147 our opcode to compute those values. */
9148 else if (UNARY_P (x)
9149 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9151 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9152 *pfalse = simplify_gen_unary (code, mode, false0,
9153 GET_MODE (XEXP (x, 0)));
9154 return cond0;
9157 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9158 make can't possibly match and would suppress other optimizations. */
9159 else if (code == COMPARE)
9162 /* If this is a binary operation, see if either side has only one of two
9163 values. If either one does or if both do and they are conditional on
9164 the same value, compute the new true and false values. */
9165 else if (BINARY_P (x))
9167 rtx op0 = XEXP (x, 0);
9168 rtx op1 = XEXP (x, 1);
9169 cond0 = if_then_else_cond (op0, &true0, &false0);
9170 cond1 = if_then_else_cond (op1, &true1, &false1);
9172 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9173 && (REG_P (op0) || REG_P (op1)))
9175 /* Try to enable a simplification by undoing work done by
9176 if_then_else_cond if it converted a REG into something more
9177 complex. */
9178 if (REG_P (op0))
9180 cond0 = 0;
9181 true0 = false0 = op0;
9183 else
9185 cond1 = 0;
9186 true1 = false1 = op1;
9190 if ((cond0 != 0 || cond1 != 0)
9191 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9193 /* If if_then_else_cond returned zero, then true/false are the
9194 same rtl. We must copy one of them to prevent invalid rtl
9195 sharing. */
9196 if (cond0 == 0)
9197 true0 = copy_rtx (true0);
9198 else if (cond1 == 0)
9199 true1 = copy_rtx (true1);
9201 if (COMPARISON_P (x))
9203 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9204 true0, true1);
9205 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9206 false0, false1);
9208 else
9210 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9211 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9214 return cond0 ? cond0 : cond1;
9217 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9218 operands is zero when the other is nonzero, and vice-versa,
9219 and STORE_FLAG_VALUE is 1 or -1. */
9221 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9222 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9223 || code == UMAX)
9224 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9226 rtx op0 = XEXP (XEXP (x, 0), 1);
9227 rtx op1 = XEXP (XEXP (x, 1), 1);
9229 cond0 = XEXP (XEXP (x, 0), 0);
9230 cond1 = XEXP (XEXP (x, 1), 0);
9232 if (COMPARISON_P (cond0)
9233 && COMPARISON_P (cond1)
9234 && SCALAR_INT_MODE_P (mode)
9235 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9236 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9237 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9238 || ((swap_condition (GET_CODE (cond0))
9239 == reversed_comparison_code (cond1, NULL))
9240 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9241 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9242 && ! side_effects_p (x))
9244 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9245 *pfalse = simplify_gen_binary (MULT, mode,
9246 (code == MINUS
9247 ? simplify_gen_unary (NEG, mode,
9248 op1, mode)
9249 : op1),
9250 const_true_rtx);
9251 return cond0;
9255 /* Similarly for MULT, AND and UMIN, except that for these the result
9256 is always zero. */
9257 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9258 && (code == MULT || code == AND || code == UMIN)
9259 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9261 cond0 = XEXP (XEXP (x, 0), 0);
9262 cond1 = XEXP (XEXP (x, 1), 0);
9264 if (COMPARISON_P (cond0)
9265 && COMPARISON_P (cond1)
9266 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9267 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9268 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9269 || ((swap_condition (GET_CODE (cond0))
9270 == reversed_comparison_code (cond1, NULL))
9271 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9272 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9273 && ! side_effects_p (x))
9275 *ptrue = *pfalse = const0_rtx;
9276 return cond0;
9281 else if (code == IF_THEN_ELSE)
9283 /* If we have IF_THEN_ELSE already, extract the condition and
9284 canonicalize it if it is NE or EQ. */
9285 cond0 = XEXP (x, 0);
9286 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9287 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9288 return XEXP (cond0, 0);
9289 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9291 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9292 return XEXP (cond0, 0);
9294 else
9295 return cond0;
9298 /* If X is a SUBREG, we can narrow both the true and false values
9299 if the inner expression, if there is a condition. */
9300 else if (code == SUBREG
9301 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9302 &false0)) != 0)
9304 true0 = simplify_gen_subreg (mode, true0,
9305 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9306 false0 = simplify_gen_subreg (mode, false0,
9307 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9308 if (true0 && false0)
9310 *ptrue = true0;
9311 *pfalse = false0;
9312 return cond0;
9316 /* If X is a constant, this isn't special and will cause confusions
9317 if we treat it as such. Likewise if it is equivalent to a constant. */
9318 else if (CONSTANT_P (x)
9319 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9322 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9323 will be least confusing to the rest of the compiler. */
9324 else if (mode == BImode)
9326 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9327 return x;
9330 /* If X is known to be either 0 or -1, those are the true and
9331 false values when testing X. */
9332 else if (x == constm1_rtx || x == const0_rtx
9333 || (is_a <scalar_int_mode> (mode, &int_mode)
9334 && (num_sign_bit_copies (x, int_mode)
9335 == GET_MODE_PRECISION (int_mode))))
9337 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9338 return x;
9341 /* Likewise for 0 or a single bit. */
9342 else if (HWI_COMPUTABLE_MODE_P (mode)
9343 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9345 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9346 return x;
9349 /* Otherwise fail; show no condition with true and false values the same. */
9350 *ptrue = *pfalse = x;
9351 return 0;
9354 /* Return the value of expression X given the fact that condition COND
9355 is known to be true when applied to REG as its first operand and VAL
9356 as its second. X is known to not be shared and so can be modified in
9357 place.
9359 We only handle the simplest cases, and specifically those cases that
9360 arise with IF_THEN_ELSE expressions. */
9362 static rtx
9363 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9365 enum rtx_code code = GET_CODE (x);
9366 const char *fmt;
9367 int i, j;
9369 if (side_effects_p (x))
9370 return x;
9372 /* If either operand of the condition is a floating point value,
9373 then we have to avoid collapsing an EQ comparison. */
9374 if (cond == EQ
9375 && rtx_equal_p (x, reg)
9376 && ! FLOAT_MODE_P (GET_MODE (x))
9377 && ! FLOAT_MODE_P (GET_MODE (val)))
9378 return val;
9380 if (cond == UNEQ && rtx_equal_p (x, reg))
9381 return val;
9383 /* If X is (abs REG) and we know something about REG's relationship
9384 with zero, we may be able to simplify this. */
9386 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9387 switch (cond)
9389 case GE: case GT: case EQ:
9390 return XEXP (x, 0);
9391 case LT: case LE:
9392 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9393 XEXP (x, 0),
9394 GET_MODE (XEXP (x, 0)));
9395 default:
9396 break;
9399 /* The only other cases we handle are MIN, MAX, and comparisons if the
9400 operands are the same as REG and VAL. */
9402 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9404 if (rtx_equal_p (XEXP (x, 0), val))
9406 std::swap (val, reg);
9407 cond = swap_condition (cond);
9410 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9412 if (COMPARISON_P (x))
9414 if (comparison_dominates_p (cond, code))
9415 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9417 code = reversed_comparison_code (x, NULL);
9418 if (code != UNKNOWN
9419 && comparison_dominates_p (cond, code))
9420 return CONST0_RTX (GET_MODE (x));
9421 else
9422 return x;
9424 else if (code == SMAX || code == SMIN
9425 || code == UMIN || code == UMAX)
9427 int unsignedp = (code == UMIN || code == UMAX);
9429 /* Do not reverse the condition when it is NE or EQ.
9430 This is because we cannot conclude anything about
9431 the value of 'SMAX (x, y)' when x is not equal to y,
9432 but we can when x equals y. */
9433 if ((code == SMAX || code == UMAX)
9434 && ! (cond == EQ || cond == NE))
9435 cond = reverse_condition (cond);
9437 switch (cond)
9439 case GE: case GT:
9440 return unsignedp ? x : XEXP (x, 1);
9441 case LE: case LT:
9442 return unsignedp ? x : XEXP (x, 0);
9443 case GEU: case GTU:
9444 return unsignedp ? XEXP (x, 1) : x;
9445 case LEU: case LTU:
9446 return unsignedp ? XEXP (x, 0) : x;
9447 default:
9448 break;
9453 else if (code == SUBREG)
9455 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9456 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9458 if (SUBREG_REG (x) != r)
9460 /* We must simplify subreg here, before we lose track of the
9461 original inner_mode. */
9462 new_rtx = simplify_subreg (GET_MODE (x), r,
9463 inner_mode, SUBREG_BYTE (x));
9464 if (new_rtx)
9465 return new_rtx;
9466 else
9467 SUBST (SUBREG_REG (x), r);
9470 return x;
9472 /* We don't have to handle SIGN_EXTEND here, because even in the
9473 case of replacing something with a modeless CONST_INT, a
9474 CONST_INT is already (supposed to be) a valid sign extension for
9475 its narrower mode, which implies it's already properly
9476 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9477 story is different. */
9478 else if (code == ZERO_EXTEND)
9480 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9481 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9483 if (XEXP (x, 0) != r)
9485 /* We must simplify the zero_extend here, before we lose
9486 track of the original inner_mode. */
9487 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9488 r, inner_mode);
9489 if (new_rtx)
9490 return new_rtx;
9491 else
9492 SUBST (XEXP (x, 0), r);
9495 return x;
9498 fmt = GET_RTX_FORMAT (code);
9499 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9501 if (fmt[i] == 'e')
9502 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9503 else if (fmt[i] == 'E')
9504 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9505 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9506 cond, reg, val));
9509 return x;
9512 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9513 assignment as a field assignment. */
9515 static int
9516 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9518 if (widen_x && GET_MODE (x) != GET_MODE (y))
9520 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9521 return 0;
9522 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9523 return 0;
9524 x = adjust_address_nv (x, GET_MODE (y),
9525 byte_lowpart_offset (GET_MODE (y),
9526 GET_MODE (x)));
9529 if (x == y || rtx_equal_p (x, y))
9530 return 1;
9532 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9533 return 0;
9535 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9536 Note that all SUBREGs of MEM are paradoxical; otherwise they
9537 would have been rewritten. */
9538 if (MEM_P (x) && GET_CODE (y) == SUBREG
9539 && MEM_P (SUBREG_REG (y))
9540 && rtx_equal_p (SUBREG_REG (y),
9541 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9542 return 1;
9544 if (MEM_P (y) && GET_CODE (x) == SUBREG
9545 && MEM_P (SUBREG_REG (x))
9546 && rtx_equal_p (SUBREG_REG (x),
9547 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9548 return 1;
9550 /* We used to see if get_last_value of X and Y were the same but that's
9551 not correct. In one direction, we'll cause the assignment to have
9552 the wrong destination and in the case, we'll import a register into this
9553 insn that might have already have been dead. So fail if none of the
9554 above cases are true. */
9555 return 0;
9558 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9559 Return that assignment if so.
9561 We only handle the most common cases. */
9563 static rtx
9564 make_field_assignment (rtx x)
9566 rtx dest = SET_DEST (x);
9567 rtx src = SET_SRC (x);
9568 rtx assign;
9569 rtx rhs, lhs;
9570 HOST_WIDE_INT c1;
9571 HOST_WIDE_INT pos;
9572 unsigned HOST_WIDE_INT len;
9573 rtx other;
9575 /* All the rules in this function are specific to scalar integers. */
9576 scalar_int_mode mode;
9577 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9578 return x;
9580 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9581 a clear of a one-bit field. We will have changed it to
9582 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9583 for a SUBREG. */
9585 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9586 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9587 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9588 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9590 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9591 1, 1, 1, 0);
9592 if (assign != 0)
9593 return gen_rtx_SET (assign, const0_rtx);
9594 return x;
9597 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9598 && subreg_lowpart_p (XEXP (src, 0))
9599 && partial_subreg_p (XEXP (src, 0))
9600 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9601 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9602 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9603 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9605 assign = make_extraction (VOIDmode, dest, 0,
9606 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9607 1, 1, 1, 0);
9608 if (assign != 0)
9609 return gen_rtx_SET (assign, const0_rtx);
9610 return x;
9613 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9614 one-bit field. */
9615 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9616 && XEXP (XEXP (src, 0), 0) == const1_rtx
9617 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9619 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9620 1, 1, 1, 0);
9621 if (assign != 0)
9622 return gen_rtx_SET (assign, const1_rtx);
9623 return x;
9626 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9627 SRC is an AND with all bits of that field set, then we can discard
9628 the AND. */
9629 if (GET_CODE (dest) == ZERO_EXTRACT
9630 && CONST_INT_P (XEXP (dest, 1))
9631 && GET_CODE (src) == AND
9632 && CONST_INT_P (XEXP (src, 1)))
9634 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9635 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9636 unsigned HOST_WIDE_INT ze_mask;
9638 if (width >= HOST_BITS_PER_WIDE_INT)
9639 ze_mask = -1;
9640 else
9641 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9643 /* Complete overlap. We can remove the source AND. */
9644 if ((and_mask & ze_mask) == ze_mask)
9645 return gen_rtx_SET (dest, XEXP (src, 0));
9647 /* Partial overlap. We can reduce the source AND. */
9648 if ((and_mask & ze_mask) != and_mask)
9650 src = gen_rtx_AND (mode, XEXP (src, 0),
9651 gen_int_mode (and_mask & ze_mask, mode));
9652 return gen_rtx_SET (dest, src);
9656 /* The other case we handle is assignments into a constant-position
9657 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9658 a mask that has all one bits except for a group of zero bits and
9659 OTHER is known to have zeros where C1 has ones, this is such an
9660 assignment. Compute the position and length from C1. Shift OTHER
9661 to the appropriate position, force it to the required mode, and
9662 make the extraction. Check for the AND in both operands. */
9664 /* One or more SUBREGs might obscure the constant-position field
9665 assignment. The first one we are likely to encounter is an outer
9666 narrowing SUBREG, which we can just strip for the purposes of
9667 identifying the constant-field assignment. */
9668 scalar_int_mode src_mode = mode;
9669 if (GET_CODE (src) == SUBREG
9670 && subreg_lowpart_p (src)
9671 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9672 src = SUBREG_REG (src);
9674 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9675 return x;
9677 rhs = expand_compound_operation (XEXP (src, 0));
9678 lhs = expand_compound_operation (XEXP (src, 1));
9680 if (GET_CODE (rhs) == AND
9681 && CONST_INT_P (XEXP (rhs, 1))
9682 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9683 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9684 /* The second SUBREG that might get in the way is a paradoxical
9685 SUBREG around the first operand of the AND. We want to
9686 pretend the operand is as wide as the destination here. We
9687 do this by adjusting the MEM to wider mode for the sole
9688 purpose of the call to rtx_equal_for_field_assignment_p. Also
9689 note this trick only works for MEMs. */
9690 else if (GET_CODE (rhs) == AND
9691 && paradoxical_subreg_p (XEXP (rhs, 0))
9692 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9693 && CONST_INT_P (XEXP (rhs, 1))
9694 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9695 dest, true))
9696 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9697 else if (GET_CODE (lhs) == AND
9698 && CONST_INT_P (XEXP (lhs, 1))
9699 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9700 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9701 /* The second SUBREG that might get in the way is a paradoxical
9702 SUBREG around the first operand of the AND. We want to
9703 pretend the operand is as wide as the destination here. We
9704 do this by adjusting the MEM to wider mode for the sole
9705 purpose of the call to rtx_equal_for_field_assignment_p. Also
9706 note this trick only works for MEMs. */
9707 else if (GET_CODE (lhs) == AND
9708 && paradoxical_subreg_p (XEXP (lhs, 0))
9709 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9710 && CONST_INT_P (XEXP (lhs, 1))
9711 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9712 dest, true))
9713 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9714 else
9715 return x;
9717 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9718 if (pos < 0
9719 || pos + len > GET_MODE_PRECISION (mode)
9720 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9721 || (c1 & nonzero_bits (other, mode)) != 0)
9722 return x;
9724 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9725 if (assign == 0)
9726 return x;
9728 /* The mode to use for the source is the mode of the assignment, or of
9729 what is inside a possible STRICT_LOW_PART. */
9730 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9731 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9733 /* Shift OTHER right POS places and make it the source, restricting it
9734 to the proper length and mode. */
9736 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9737 src_mode, other, pos),
9738 dest);
9739 src = force_to_mode (src, new_mode,
9740 len >= HOST_BITS_PER_WIDE_INT
9741 ? HOST_WIDE_INT_M1U
9742 : (HOST_WIDE_INT_1U << len) - 1,
9745 /* If SRC is masked by an AND that does not make a difference in
9746 the value being stored, strip it. */
9747 if (GET_CODE (assign) == ZERO_EXTRACT
9748 && CONST_INT_P (XEXP (assign, 1))
9749 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9750 && GET_CODE (src) == AND
9751 && CONST_INT_P (XEXP (src, 1))
9752 && UINTVAL (XEXP (src, 1))
9753 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9754 src = XEXP (src, 0);
9756 return gen_rtx_SET (assign, src);
9759 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9760 if so. */
9762 static rtx
9763 apply_distributive_law (rtx x)
9765 enum rtx_code code = GET_CODE (x);
9766 enum rtx_code inner_code;
9767 rtx lhs, rhs, other;
9768 rtx tem;
9770 /* Distributivity is not true for floating point as it can change the
9771 value. So we don't do it unless -funsafe-math-optimizations. */
9772 if (FLOAT_MODE_P (GET_MODE (x))
9773 && ! flag_unsafe_math_optimizations)
9774 return x;
9776 /* The outer operation can only be one of the following: */
9777 if (code != IOR && code != AND && code != XOR
9778 && code != PLUS && code != MINUS)
9779 return x;
9781 lhs = XEXP (x, 0);
9782 rhs = XEXP (x, 1);
9784 /* If either operand is a primitive we can't do anything, so get out
9785 fast. */
9786 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9787 return x;
9789 lhs = expand_compound_operation (lhs);
9790 rhs = expand_compound_operation (rhs);
9791 inner_code = GET_CODE (lhs);
9792 if (inner_code != GET_CODE (rhs))
9793 return x;
9795 /* See if the inner and outer operations distribute. */
9796 switch (inner_code)
9798 case LSHIFTRT:
9799 case ASHIFTRT:
9800 case AND:
9801 case IOR:
9802 /* These all distribute except over PLUS. */
9803 if (code == PLUS || code == MINUS)
9804 return x;
9805 break;
9807 case MULT:
9808 if (code != PLUS && code != MINUS)
9809 return x;
9810 break;
9812 case ASHIFT:
9813 /* This is also a multiply, so it distributes over everything. */
9814 break;
9816 /* This used to handle SUBREG, but this turned out to be counter-
9817 productive, since (subreg (op ...)) usually is not handled by
9818 insn patterns, and this "optimization" therefore transformed
9819 recognizable patterns into unrecognizable ones. Therefore the
9820 SUBREG case was removed from here.
9822 It is possible that distributing SUBREG over arithmetic operations
9823 leads to an intermediate result than can then be optimized further,
9824 e.g. by moving the outer SUBREG to the other side of a SET as done
9825 in simplify_set. This seems to have been the original intent of
9826 handling SUBREGs here.
9828 However, with current GCC this does not appear to actually happen,
9829 at least on major platforms. If some case is found where removing
9830 the SUBREG case here prevents follow-on optimizations, distributing
9831 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9833 default:
9834 return x;
9837 /* Set LHS and RHS to the inner operands (A and B in the example
9838 above) and set OTHER to the common operand (C in the example).
9839 There is only one way to do this unless the inner operation is
9840 commutative. */
9841 if (COMMUTATIVE_ARITH_P (lhs)
9842 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9843 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9844 else if (COMMUTATIVE_ARITH_P (lhs)
9845 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9846 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9847 else if (COMMUTATIVE_ARITH_P (lhs)
9848 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9849 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9850 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9851 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9852 else
9853 return x;
9855 /* Form the new inner operation, seeing if it simplifies first. */
9856 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9858 /* There is one exception to the general way of distributing:
9859 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9860 if (code == XOR && inner_code == IOR)
9862 inner_code = AND;
9863 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9866 /* We may be able to continuing distributing the result, so call
9867 ourselves recursively on the inner operation before forming the
9868 outer operation, which we return. */
9869 return simplify_gen_binary (inner_code, GET_MODE (x),
9870 apply_distributive_law (tem), other);
9873 /* See if X is of the form (* (+ A B) C), and if so convert to
9874 (+ (* A C) (* B C)) and try to simplify.
9876 Most of the time, this results in no change. However, if some of
9877 the operands are the same or inverses of each other, simplifications
9878 will result.
9880 For example, (and (ior A B) (not B)) can occur as the result of
9881 expanding a bit field assignment. When we apply the distributive
9882 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9883 which then simplifies to (and (A (not B))).
9885 Note that no checks happen on the validity of applying the inverse
9886 distributive law. This is pointless since we can do it in the
9887 few places where this routine is called.
9889 N is the index of the term that is decomposed (the arithmetic operation,
9890 i.e. (+ A B) in the first example above). !N is the index of the term that
9891 is distributed, i.e. of C in the first example above. */
9892 static rtx
9893 distribute_and_simplify_rtx (rtx x, int n)
9895 machine_mode mode;
9896 enum rtx_code outer_code, inner_code;
9897 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9899 /* Distributivity is not true for floating point as it can change the
9900 value. So we don't do it unless -funsafe-math-optimizations. */
9901 if (FLOAT_MODE_P (GET_MODE (x))
9902 && ! flag_unsafe_math_optimizations)
9903 return NULL_RTX;
9905 decomposed = XEXP (x, n);
9906 if (!ARITHMETIC_P (decomposed))
9907 return NULL_RTX;
9909 mode = GET_MODE (x);
9910 outer_code = GET_CODE (x);
9911 distributed = XEXP (x, !n);
9913 inner_code = GET_CODE (decomposed);
9914 inner_op0 = XEXP (decomposed, 0);
9915 inner_op1 = XEXP (decomposed, 1);
9917 /* Special case (and (xor B C) (not A)), which is equivalent to
9918 (xor (ior A B) (ior A C)) */
9919 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9921 distributed = XEXP (distributed, 0);
9922 outer_code = IOR;
9925 if (n == 0)
9927 /* Distribute the second term. */
9928 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9929 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9931 else
9933 /* Distribute the first term. */
9934 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9935 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9938 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9939 new_op0, new_op1));
9940 if (GET_CODE (tmp) != outer_code
9941 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9942 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9943 return tmp;
9945 return NULL_RTX;
9948 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9949 in MODE. Return an equivalent form, if different from (and VAROP
9950 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9952 static rtx
9953 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9954 unsigned HOST_WIDE_INT constop)
9956 unsigned HOST_WIDE_INT nonzero;
9957 unsigned HOST_WIDE_INT orig_constop;
9958 rtx orig_varop;
9959 int i;
9961 orig_varop = varop;
9962 orig_constop = constop;
9963 if (GET_CODE (varop) == CLOBBER)
9964 return NULL_RTX;
9966 /* Simplify VAROP knowing that we will be only looking at some of the
9967 bits in it.
9969 Note by passing in CONSTOP, we guarantee that the bits not set in
9970 CONSTOP are not significant and will never be examined. We must
9971 ensure that is the case by explicitly masking out those bits
9972 before returning. */
9973 varop = force_to_mode (varop, mode, constop, 0);
9975 /* If VAROP is a CLOBBER, we will fail so return it. */
9976 if (GET_CODE (varop) == CLOBBER)
9977 return varop;
9979 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9980 to VAROP and return the new constant. */
9981 if (CONST_INT_P (varop))
9982 return gen_int_mode (INTVAL (varop) & constop, mode);
9984 /* See what bits may be nonzero in VAROP. Unlike the general case of
9985 a call to nonzero_bits, here we don't care about bits outside
9986 MODE. */
9988 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9990 /* Turn off all bits in the constant that are known to already be zero.
9991 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9992 which is tested below. */
9994 constop &= nonzero;
9996 /* If we don't have any bits left, return zero. */
9997 if (constop == 0 && !side_effects_p (varop))
9998 return const0_rtx;
10000 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10001 a power of two, we can replace this with an ASHIFT. */
10002 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10003 && (i = exact_log2 (constop)) >= 0)
10004 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10006 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10007 or XOR, then try to apply the distributive law. This may eliminate
10008 operations if either branch can be simplified because of the AND.
10009 It may also make some cases more complex, but those cases probably
10010 won't match a pattern either with or without this. */
10012 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10014 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10015 return
10016 gen_lowpart
10017 (mode,
10018 apply_distributive_law
10019 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10020 simplify_and_const_int (NULL_RTX, varop_mode,
10021 XEXP (varop, 0),
10022 constop),
10023 simplify_and_const_int (NULL_RTX, varop_mode,
10024 XEXP (varop, 1),
10025 constop))));
10028 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10029 the AND and see if one of the operands simplifies to zero. If so, we
10030 may eliminate it. */
10032 if (GET_CODE (varop) == PLUS
10033 && pow2p_hwi (constop + 1))
10035 rtx o0, o1;
10037 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10038 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10039 if (o0 == const0_rtx)
10040 return o1;
10041 if (o1 == const0_rtx)
10042 return o0;
10045 /* Make a SUBREG if necessary. If we can't make it, fail. */
10046 varop = gen_lowpart (mode, varop);
10047 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10048 return NULL_RTX;
10050 /* If we are only masking insignificant bits, return VAROP. */
10051 if (constop == nonzero)
10052 return varop;
10054 if (varop == orig_varop && constop == orig_constop)
10055 return NULL_RTX;
10057 /* Otherwise, return an AND. */
10058 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10062 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10063 in MODE.
10065 Return an equivalent form, if different from X. Otherwise, return X. If
10066 X is zero, we are to always construct the equivalent form. */
10068 static rtx
10069 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10070 unsigned HOST_WIDE_INT constop)
10072 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10073 if (tem)
10074 return tem;
10076 if (!x)
10077 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10078 gen_int_mode (constop, mode));
10079 if (GET_MODE (x) != mode)
10080 x = gen_lowpart (mode, x);
10081 return x;
10084 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10085 We don't care about bits outside of those defined in MODE.
10086 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10088 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10089 a shift, AND, or zero_extract, we can do better. */
10091 static rtx
10092 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10093 scalar_int_mode mode,
10094 unsigned HOST_WIDE_INT *nonzero)
10096 rtx tem;
10097 reg_stat_type *rsp;
10099 /* If X is a register whose nonzero bits value is current, use it.
10100 Otherwise, if X is a register whose value we can find, use that
10101 value. Otherwise, use the previously-computed global nonzero bits
10102 for this register. */
10104 rsp = &reg_stat[REGNO (x)];
10105 if (rsp->last_set_value != 0
10106 && (rsp->last_set_mode == mode
10107 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10108 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10109 && GET_MODE_CLASS (mode) == MODE_INT))
10110 && ((rsp->last_set_label >= label_tick_ebb_start
10111 && rsp->last_set_label < label_tick)
10112 || (rsp->last_set_label == label_tick
10113 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10114 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10115 && REGNO (x) < reg_n_sets_max
10116 && REG_N_SETS (REGNO (x)) == 1
10117 && !REGNO_REG_SET_P
10118 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10119 REGNO (x)))))
10121 /* Note that, even if the precision of last_set_mode is lower than that
10122 of mode, record_value_for_reg invoked nonzero_bits on the register
10123 with nonzero_bits_mode (because last_set_mode is necessarily integral
10124 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10125 are all valid, hence in mode too since nonzero_bits_mode is defined
10126 to the largest HWI_COMPUTABLE_MODE_P mode. */
10127 *nonzero &= rsp->last_set_nonzero_bits;
10128 return NULL;
10131 tem = get_last_value (x);
10132 if (tem)
10134 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10135 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10137 return tem;
10140 if (nonzero_sign_valid && rsp->nonzero_bits)
10142 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10144 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10145 /* We don't know anything about the upper bits. */
10146 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10148 *nonzero &= mask;
10151 return NULL;
10154 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10155 end of X that are known to be equal to the sign bit. X will be used
10156 in mode MODE; the returned value will always be between 1 and the
10157 number of bits in MODE. */
10159 static rtx
10160 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10161 scalar_int_mode mode,
10162 unsigned int *result)
10164 rtx tem;
10165 reg_stat_type *rsp;
10167 rsp = &reg_stat[REGNO (x)];
10168 if (rsp->last_set_value != 0
10169 && rsp->last_set_mode == mode
10170 && ((rsp->last_set_label >= label_tick_ebb_start
10171 && rsp->last_set_label < label_tick)
10172 || (rsp->last_set_label == label_tick
10173 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10174 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10175 && REGNO (x) < reg_n_sets_max
10176 && REG_N_SETS (REGNO (x)) == 1
10177 && !REGNO_REG_SET_P
10178 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10179 REGNO (x)))))
10181 *result = rsp->last_set_sign_bit_copies;
10182 return NULL;
10185 tem = get_last_value (x);
10186 if (tem != 0)
10187 return tem;
10189 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10190 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10191 *result = rsp->sign_bit_copies;
10193 return NULL;
10196 /* Return the number of "extended" bits there are in X, when interpreted
10197 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10198 unsigned quantities, this is the number of high-order zero bits.
10199 For signed quantities, this is the number of copies of the sign bit
10200 minus 1. In both case, this function returns the number of "spare"
10201 bits. For example, if two quantities for which this function returns
10202 at least 1 are added, the addition is known not to overflow.
10204 This function will always return 0 unless called during combine, which
10205 implies that it must be called from a define_split. */
10207 unsigned int
10208 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10210 if (nonzero_sign_valid == 0)
10211 return 0;
10213 scalar_int_mode int_mode;
10214 return (unsignedp
10215 ? (is_a <scalar_int_mode> (mode, &int_mode)
10216 && HWI_COMPUTABLE_MODE_P (int_mode)
10217 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10218 - floor_log2 (nonzero_bits (x, int_mode)))
10219 : 0)
10220 : num_sign_bit_copies (x, mode) - 1);
10223 /* This function is called from `simplify_shift_const' to merge two
10224 outer operations. Specifically, we have already found that we need
10225 to perform operation *POP0 with constant *PCONST0 at the outermost
10226 position. We would now like to also perform OP1 with constant CONST1
10227 (with *POP0 being done last).
10229 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10230 the resulting operation. *PCOMP_P is set to 1 if we would need to
10231 complement the innermost operand, otherwise it is unchanged.
10233 MODE is the mode in which the operation will be done. No bits outside
10234 the width of this mode matter. It is assumed that the width of this mode
10235 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10237 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10238 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10239 result is simply *PCONST0.
10241 If the resulting operation cannot be expressed as one operation, we
10242 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10244 static int
10245 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10247 enum rtx_code op0 = *pop0;
10248 HOST_WIDE_INT const0 = *pconst0;
10250 const0 &= GET_MODE_MASK (mode);
10251 const1 &= GET_MODE_MASK (mode);
10253 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10254 if (op0 == AND)
10255 const1 &= const0;
10257 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10258 if OP0 is SET. */
10260 if (op1 == UNKNOWN || op0 == SET)
10261 return 1;
10263 else if (op0 == UNKNOWN)
10264 op0 = op1, const0 = const1;
10266 else if (op0 == op1)
10268 switch (op0)
10270 case AND:
10271 const0 &= const1;
10272 break;
10273 case IOR:
10274 const0 |= const1;
10275 break;
10276 case XOR:
10277 const0 ^= const1;
10278 break;
10279 case PLUS:
10280 const0 += const1;
10281 break;
10282 case NEG:
10283 op0 = UNKNOWN;
10284 break;
10285 default:
10286 break;
10290 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10291 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10292 return 0;
10294 /* If the two constants aren't the same, we can't do anything. The
10295 remaining six cases can all be done. */
10296 else if (const0 != const1)
10297 return 0;
10299 else
10300 switch (op0)
10302 case IOR:
10303 if (op1 == AND)
10304 /* (a & b) | b == b */
10305 op0 = SET;
10306 else /* op1 == XOR */
10307 /* (a ^ b) | b == a | b */
10309 break;
10311 case XOR:
10312 if (op1 == AND)
10313 /* (a & b) ^ b == (~a) & b */
10314 op0 = AND, *pcomp_p = 1;
10315 else /* op1 == IOR */
10316 /* (a | b) ^ b == a & ~b */
10317 op0 = AND, const0 = ~const0;
10318 break;
10320 case AND:
10321 if (op1 == IOR)
10322 /* (a | b) & b == b */
10323 op0 = SET;
10324 else /* op1 == XOR */
10325 /* (a ^ b) & b) == (~a) & b */
10326 *pcomp_p = 1;
10327 break;
10328 default:
10329 break;
10332 /* Check for NO-OP cases. */
10333 const0 &= GET_MODE_MASK (mode);
10334 if (const0 == 0
10335 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10336 op0 = UNKNOWN;
10337 else if (const0 == 0 && op0 == AND)
10338 op0 = SET;
10339 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10340 && op0 == AND)
10341 op0 = UNKNOWN;
10343 *pop0 = op0;
10345 /* ??? Slightly redundant with the above mask, but not entirely.
10346 Moving this above means we'd have to sign-extend the mode mask
10347 for the final test. */
10348 if (op0 != UNKNOWN && op0 != NEG)
10349 *pconst0 = trunc_int_for_mode (const0, mode);
10351 return 1;
10354 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10355 the shift in. The original shift operation CODE is performed on OP in
10356 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10357 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10358 result of the shift is subject to operation OUTER_CODE with operand
10359 OUTER_CONST. */
10361 static scalar_int_mode
10362 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10363 scalar_int_mode orig_mode, scalar_int_mode mode,
10364 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10366 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10368 /* In general we can't perform in wider mode for right shift and rotate. */
10369 switch (code)
10371 case ASHIFTRT:
10372 /* We can still widen if the bits brought in from the left are identical
10373 to the sign bit of ORIG_MODE. */
10374 if (num_sign_bit_copies (op, mode)
10375 > (unsigned) (GET_MODE_PRECISION (mode)
10376 - GET_MODE_PRECISION (orig_mode)))
10377 return mode;
10378 return orig_mode;
10380 case LSHIFTRT:
10381 /* Similarly here but with zero bits. */
10382 if (HWI_COMPUTABLE_MODE_P (mode)
10383 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10384 return mode;
10386 /* We can also widen if the bits brought in will be masked off. This
10387 operation is performed in ORIG_MODE. */
10388 if (outer_code == AND)
10390 int care_bits = low_bitmask_len (orig_mode, outer_const);
10392 if (care_bits >= 0
10393 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10394 return mode;
10396 /* fall through */
10398 case ROTATE:
10399 return orig_mode;
10401 case ROTATERT:
10402 gcc_unreachable ();
10404 default:
10405 return mode;
10409 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10410 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10411 if we cannot simplify it. Otherwise, return a simplified value.
10413 The shift is normally computed in the widest mode we find in VAROP, as
10414 long as it isn't a different number of words than RESULT_MODE. Exceptions
10415 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10417 static rtx
10418 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10419 rtx varop, int orig_count)
10421 enum rtx_code orig_code = code;
10422 rtx orig_varop = varop;
10423 int count, log2;
10424 machine_mode mode = result_mode;
10425 machine_mode shift_mode;
10426 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10427 /* We form (outer_op (code varop count) (outer_const)). */
10428 enum rtx_code outer_op = UNKNOWN;
10429 HOST_WIDE_INT outer_const = 0;
10430 int complement_p = 0;
10431 rtx new_rtx, x;
10433 /* Make sure and truncate the "natural" shift on the way in. We don't
10434 want to do this inside the loop as it makes it more difficult to
10435 combine shifts. */
10436 if (SHIFT_COUNT_TRUNCATED)
10437 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10439 /* If we were given an invalid count, don't do anything except exactly
10440 what was requested. */
10442 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10443 return NULL_RTX;
10445 count = orig_count;
10447 /* Unless one of the branches of the `if' in this loop does a `continue',
10448 we will `break' the loop after the `if'. */
10450 while (count != 0)
10452 /* If we have an operand of (clobber (const_int 0)), fail. */
10453 if (GET_CODE (varop) == CLOBBER)
10454 return NULL_RTX;
10456 /* Convert ROTATERT to ROTATE. */
10457 if (code == ROTATERT)
10459 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10460 code = ROTATE;
10461 count = bitsize - count;
10464 shift_mode = result_mode;
10465 if (shift_mode != mode)
10467 /* We only change the modes of scalar shifts. */
10468 int_mode = as_a <scalar_int_mode> (mode);
10469 int_result_mode = as_a <scalar_int_mode> (result_mode);
10470 shift_mode = try_widen_shift_mode (code, varop, count,
10471 int_result_mode, int_mode,
10472 outer_op, outer_const);
10475 scalar_int_mode shift_unit_mode
10476 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10478 /* Handle cases where the count is greater than the size of the mode
10479 minus 1. For ASHIFT, use the size minus one as the count (this can
10480 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10481 take the count modulo the size. For other shifts, the result is
10482 zero.
10484 Since these shifts are being produced by the compiler by combining
10485 multiple operations, each of which are defined, we know what the
10486 result is supposed to be. */
10488 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10490 if (code == ASHIFTRT)
10491 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10492 else if (code == ROTATE || code == ROTATERT)
10493 count %= GET_MODE_PRECISION (shift_unit_mode);
10494 else
10496 /* We can't simply return zero because there may be an
10497 outer op. */
10498 varop = const0_rtx;
10499 count = 0;
10500 break;
10504 /* If we discovered we had to complement VAROP, leave. Making a NOT
10505 here would cause an infinite loop. */
10506 if (complement_p)
10507 break;
10509 if (shift_mode == shift_unit_mode)
10511 /* An arithmetic right shift of a quantity known to be -1 or 0
10512 is a no-op. */
10513 if (code == ASHIFTRT
10514 && (num_sign_bit_copies (varop, shift_unit_mode)
10515 == GET_MODE_PRECISION (shift_unit_mode)))
10517 count = 0;
10518 break;
10521 /* If we are doing an arithmetic right shift and discarding all but
10522 the sign bit copies, this is equivalent to doing a shift by the
10523 bitsize minus one. Convert it into that shift because it will
10524 often allow other simplifications. */
10526 if (code == ASHIFTRT
10527 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10528 >= GET_MODE_PRECISION (shift_unit_mode)))
10529 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10531 /* We simplify the tests below and elsewhere by converting
10532 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10533 `make_compound_operation' will convert it to an ASHIFTRT for
10534 those machines (such as VAX) that don't have an LSHIFTRT. */
10535 if (code == ASHIFTRT
10536 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10537 && val_signbit_known_clear_p (shift_unit_mode,
10538 nonzero_bits (varop,
10539 shift_unit_mode)))
10540 code = LSHIFTRT;
10542 if (((code == LSHIFTRT
10543 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10544 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10545 || (code == ASHIFT
10546 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10547 && !((nonzero_bits (varop, shift_unit_mode) << count)
10548 & GET_MODE_MASK (shift_unit_mode))))
10549 && !side_effects_p (varop))
10550 varop = const0_rtx;
10553 switch (GET_CODE (varop))
10555 case SIGN_EXTEND:
10556 case ZERO_EXTEND:
10557 case SIGN_EXTRACT:
10558 case ZERO_EXTRACT:
10559 new_rtx = expand_compound_operation (varop);
10560 if (new_rtx != varop)
10562 varop = new_rtx;
10563 continue;
10565 break;
10567 case MEM:
10568 /* The following rules apply only to scalars. */
10569 if (shift_mode != shift_unit_mode)
10570 break;
10571 int_mode = as_a <scalar_int_mode> (mode);
10573 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10574 minus the width of a smaller mode, we can do this with a
10575 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10576 if ((code == ASHIFTRT || code == LSHIFTRT)
10577 && ! mode_dependent_address_p (XEXP (varop, 0),
10578 MEM_ADDR_SPACE (varop))
10579 && ! MEM_VOLATILE_P (varop)
10580 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10581 .exists (&tmode)))
10583 new_rtx = adjust_address_nv (varop, tmode,
10584 BYTES_BIG_ENDIAN ? 0
10585 : count / BITS_PER_UNIT);
10587 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10588 : ZERO_EXTEND, int_mode, new_rtx);
10589 count = 0;
10590 continue;
10592 break;
10594 case SUBREG:
10595 /* The following rules apply only to scalars. */
10596 if (shift_mode != shift_unit_mode)
10597 break;
10598 int_mode = as_a <scalar_int_mode> (mode);
10599 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10601 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10602 the same number of words as what we've seen so far. Then store
10603 the widest mode in MODE. */
10604 if (subreg_lowpart_p (varop)
10605 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10606 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10607 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10608 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10609 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10611 varop = SUBREG_REG (varop);
10612 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10613 mode = inner_mode;
10614 continue;
10616 break;
10618 case MULT:
10619 /* Some machines use MULT instead of ASHIFT because MULT
10620 is cheaper. But it is still better on those machines to
10621 merge two shifts into one. */
10622 if (CONST_INT_P (XEXP (varop, 1))
10623 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10625 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10626 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10627 XEXP (varop, 0), log2_rtx);
10628 continue;
10630 break;
10632 case UDIV:
10633 /* Similar, for when divides are cheaper. */
10634 if (CONST_INT_P (XEXP (varop, 1))
10635 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10637 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10638 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10639 XEXP (varop, 0), log2_rtx);
10640 continue;
10642 break;
10644 case ASHIFTRT:
10645 /* If we are extracting just the sign bit of an arithmetic
10646 right shift, that shift is not needed. However, the sign
10647 bit of a wider mode may be different from what would be
10648 interpreted as the sign bit in a narrower mode, so, if
10649 the result is narrower, don't discard the shift. */
10650 if (code == LSHIFTRT
10651 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10652 && (GET_MODE_UNIT_BITSIZE (result_mode)
10653 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10655 varop = XEXP (varop, 0);
10656 continue;
10659 /* fall through */
10661 case LSHIFTRT:
10662 case ASHIFT:
10663 case ROTATE:
10664 /* The following rules apply only to scalars. */
10665 if (shift_mode != shift_unit_mode)
10666 break;
10667 int_mode = as_a <scalar_int_mode> (mode);
10668 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10669 int_result_mode = as_a <scalar_int_mode> (result_mode);
10671 /* Here we have two nested shifts. The result is usually the
10672 AND of a new shift with a mask. We compute the result below. */
10673 if (CONST_INT_P (XEXP (varop, 1))
10674 && INTVAL (XEXP (varop, 1)) >= 0
10675 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10676 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10677 && HWI_COMPUTABLE_MODE_P (int_mode))
10679 enum rtx_code first_code = GET_CODE (varop);
10680 unsigned int first_count = INTVAL (XEXP (varop, 1));
10681 unsigned HOST_WIDE_INT mask;
10682 rtx mask_rtx;
10684 /* We have one common special case. We can't do any merging if
10685 the inner code is an ASHIFTRT of a smaller mode. However, if
10686 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10687 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10688 we can convert it to
10689 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10690 This simplifies certain SIGN_EXTEND operations. */
10691 if (code == ASHIFT && first_code == ASHIFTRT
10692 && count == (GET_MODE_PRECISION (int_result_mode)
10693 - GET_MODE_PRECISION (int_varop_mode)))
10695 /* C3 has the low-order C1 bits zero. */
10697 mask = GET_MODE_MASK (int_mode)
10698 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10700 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10701 XEXP (varop, 0), mask);
10702 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10703 int_result_mode, varop, count);
10704 count = first_count;
10705 code = ASHIFTRT;
10706 continue;
10709 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10710 than C1 high-order bits equal to the sign bit, we can convert
10711 this to either an ASHIFT or an ASHIFTRT depending on the
10712 two counts.
10714 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10716 if (code == ASHIFTRT && first_code == ASHIFT
10717 && int_varop_mode == shift_unit_mode
10718 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10719 > first_count))
10721 varop = XEXP (varop, 0);
10722 count -= first_count;
10723 if (count < 0)
10725 count = -count;
10726 code = ASHIFT;
10729 continue;
10732 /* There are some cases we can't do. If CODE is ASHIFTRT,
10733 we can only do this if FIRST_CODE is also ASHIFTRT.
10735 We can't do the case when CODE is ROTATE and FIRST_CODE is
10736 ASHIFTRT.
10738 If the mode of this shift is not the mode of the outer shift,
10739 we can't do this if either shift is a right shift or ROTATE.
10741 Finally, we can't do any of these if the mode is too wide
10742 unless the codes are the same.
10744 Handle the case where the shift codes are the same
10745 first. */
10747 if (code == first_code)
10749 if (int_varop_mode != int_result_mode
10750 && (code == ASHIFTRT || code == LSHIFTRT
10751 || code == ROTATE))
10752 break;
10754 count += first_count;
10755 varop = XEXP (varop, 0);
10756 continue;
10759 if (code == ASHIFTRT
10760 || (code == ROTATE && first_code == ASHIFTRT)
10761 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10762 || (int_varop_mode != int_result_mode
10763 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10764 || first_code == ROTATE
10765 || code == ROTATE)))
10766 break;
10768 /* To compute the mask to apply after the shift, shift the
10769 nonzero bits of the inner shift the same way the
10770 outer shift will. */
10772 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10773 int_result_mode);
10774 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10775 mask_rtx
10776 = simplify_const_binary_operation (code, int_result_mode,
10777 mask_rtx, count_rtx);
10779 /* Give up if we can't compute an outer operation to use. */
10780 if (mask_rtx == 0
10781 || !CONST_INT_P (mask_rtx)
10782 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10783 INTVAL (mask_rtx),
10784 int_result_mode, &complement_p))
10785 break;
10787 /* If the shifts are in the same direction, we add the
10788 counts. Otherwise, we subtract them. */
10789 if ((code == ASHIFTRT || code == LSHIFTRT)
10790 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10791 count += first_count;
10792 else
10793 count -= first_count;
10795 /* If COUNT is positive, the new shift is usually CODE,
10796 except for the two exceptions below, in which case it is
10797 FIRST_CODE. If the count is negative, FIRST_CODE should
10798 always be used */
10799 if (count > 0
10800 && ((first_code == ROTATE && code == ASHIFT)
10801 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10802 code = first_code;
10803 else if (count < 0)
10804 code = first_code, count = -count;
10806 varop = XEXP (varop, 0);
10807 continue;
10810 /* If we have (A << B << C) for any shift, we can convert this to
10811 (A << C << B). This wins if A is a constant. Only try this if
10812 B is not a constant. */
10814 else if (GET_CODE (varop) == code
10815 && CONST_INT_P (XEXP (varop, 0))
10816 && !CONST_INT_P (XEXP (varop, 1)))
10818 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10819 sure the result will be masked. See PR70222. */
10820 if (code == LSHIFTRT
10821 && int_mode != int_result_mode
10822 && !merge_outer_ops (&outer_op, &outer_const, AND,
10823 GET_MODE_MASK (int_result_mode)
10824 >> orig_count, int_result_mode,
10825 &complement_p))
10826 break;
10827 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10828 up outer sign extension (often left and right shift) is
10829 hardly more efficient than the original. See PR70429.
10830 Similarly punt for rotates with different modes.
10831 See PR97386. */
10832 if ((code == ASHIFTRT || code == ROTATE)
10833 && int_mode != int_result_mode)
10834 break;
10836 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10837 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10838 XEXP (varop, 0),
10839 count_rtx);
10840 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10841 count = 0;
10842 continue;
10844 break;
10846 case NOT:
10847 /* The following rules apply only to scalars. */
10848 if (shift_mode != shift_unit_mode)
10849 break;
10851 /* Make this fit the case below. */
10852 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10853 continue;
10855 case IOR:
10856 case AND:
10857 case XOR:
10858 /* The following rules apply only to scalars. */
10859 if (shift_mode != shift_unit_mode)
10860 break;
10861 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10862 int_result_mode = as_a <scalar_int_mode> (result_mode);
10864 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10865 with C the size of VAROP - 1 and the shift is logical if
10866 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10867 we have an (le X 0) operation. If we have an arithmetic shift
10868 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10869 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10871 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10872 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10873 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10874 && (code == LSHIFTRT || code == ASHIFTRT)
10875 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10876 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10878 count = 0;
10879 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10880 const0_rtx);
10882 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10883 varop = gen_rtx_NEG (int_varop_mode, varop);
10885 continue;
10888 /* If we have (shift (logical)), move the logical to the outside
10889 to allow it to possibly combine with another logical and the
10890 shift to combine with another shift. This also canonicalizes to
10891 what a ZERO_EXTRACT looks like. Also, some machines have
10892 (and (shift)) insns. */
10894 if (CONST_INT_P (XEXP (varop, 1))
10895 /* We can't do this if we have (ashiftrt (xor)) and the
10896 constant has its sign bit set in shift_unit_mode with
10897 shift_unit_mode wider than result_mode. */
10898 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10899 && int_result_mode != shift_unit_mode
10900 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10901 shift_unit_mode) < 0)
10902 && (new_rtx = simplify_const_binary_operation
10903 (code, int_result_mode,
10904 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10905 gen_int_shift_amount (int_result_mode, count))) != 0
10906 && CONST_INT_P (new_rtx)
10907 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10908 INTVAL (new_rtx), int_result_mode,
10909 &complement_p))
10911 varop = XEXP (varop, 0);
10912 continue;
10915 /* If we can't do that, try to simplify the shift in each arm of the
10916 logical expression, make a new logical expression, and apply
10917 the inverse distributive law. This also can't be done for
10918 (ashiftrt (xor)) where we've widened the shift and the constant
10919 changes the sign bit. */
10920 if (CONST_INT_P (XEXP (varop, 1))
10921 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10922 && int_result_mode != shift_unit_mode
10923 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10924 shift_unit_mode) < 0))
10926 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10927 XEXP (varop, 0), count);
10928 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10929 XEXP (varop, 1), count);
10931 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10932 lhs, rhs);
10933 varop = apply_distributive_law (varop);
10935 count = 0;
10936 continue;
10938 break;
10940 case EQ:
10941 /* The following rules apply only to scalars. */
10942 if (shift_mode != shift_unit_mode)
10943 break;
10944 int_result_mode = as_a <scalar_int_mode> (result_mode);
10946 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10947 says that the sign bit can be tested, FOO has mode MODE, C is
10948 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10949 that may be nonzero. */
10950 if (code == LSHIFTRT
10951 && XEXP (varop, 1) == const0_rtx
10952 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10953 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10954 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10955 && STORE_FLAG_VALUE == -1
10956 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10957 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10958 int_result_mode, &complement_p))
10960 varop = XEXP (varop, 0);
10961 count = 0;
10962 continue;
10964 break;
10966 case NEG:
10967 /* The following rules apply only to scalars. */
10968 if (shift_mode != shift_unit_mode)
10969 break;
10970 int_result_mode = as_a <scalar_int_mode> (result_mode);
10972 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10973 than the number of bits in the mode is equivalent to A. */
10974 if (code == LSHIFTRT
10975 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10976 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10978 varop = XEXP (varop, 0);
10979 count = 0;
10980 continue;
10983 /* NEG commutes with ASHIFT since it is multiplication. Move the
10984 NEG outside to allow shifts to combine. */
10985 if (code == ASHIFT
10986 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10987 int_result_mode, &complement_p))
10989 varop = XEXP (varop, 0);
10990 continue;
10992 break;
10994 case PLUS:
10995 /* The following rules apply only to scalars. */
10996 if (shift_mode != shift_unit_mode)
10997 break;
10998 int_result_mode = as_a <scalar_int_mode> (result_mode);
11000 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11001 is one less than the number of bits in the mode is
11002 equivalent to (xor A 1). */
11003 if (code == LSHIFTRT
11004 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11005 && XEXP (varop, 1) == constm1_rtx
11006 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11007 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11008 int_result_mode, &complement_p))
11010 count = 0;
11011 varop = XEXP (varop, 0);
11012 continue;
11015 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11016 that might be nonzero in BAR are those being shifted out and those
11017 bits are known zero in FOO, we can replace the PLUS with FOO.
11018 Similarly in the other operand order. This code occurs when
11019 we are computing the size of a variable-size array. */
11021 if ((code == ASHIFTRT || code == LSHIFTRT)
11022 && count < HOST_BITS_PER_WIDE_INT
11023 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11024 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11025 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11027 varop = XEXP (varop, 0);
11028 continue;
11030 else if ((code == ASHIFTRT || code == LSHIFTRT)
11031 && count < HOST_BITS_PER_WIDE_INT
11032 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11033 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11034 >> count) == 0
11035 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11036 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11038 varop = XEXP (varop, 1);
11039 continue;
11042 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11043 if (code == ASHIFT
11044 && CONST_INT_P (XEXP (varop, 1))
11045 && (new_rtx = simplify_const_binary_operation
11046 (ASHIFT, int_result_mode,
11047 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11048 gen_int_shift_amount (int_result_mode, count))) != 0
11049 && CONST_INT_P (new_rtx)
11050 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11051 INTVAL (new_rtx), int_result_mode,
11052 &complement_p))
11054 varop = XEXP (varop, 0);
11055 continue;
11058 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11059 signbit', and attempt to change the PLUS to an XOR and move it to
11060 the outer operation as is done above in the AND/IOR/XOR case
11061 leg for shift(logical). See details in logical handling above
11062 for reasoning in doing so. */
11063 if (code == LSHIFTRT
11064 && CONST_INT_P (XEXP (varop, 1))
11065 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11066 && (new_rtx = simplify_const_binary_operation
11067 (code, int_result_mode,
11068 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11069 gen_int_shift_amount (int_result_mode, count))) != 0
11070 && CONST_INT_P (new_rtx)
11071 && merge_outer_ops (&outer_op, &outer_const, XOR,
11072 INTVAL (new_rtx), int_result_mode,
11073 &complement_p))
11075 varop = XEXP (varop, 0);
11076 continue;
11079 break;
11081 case MINUS:
11082 /* The following rules apply only to scalars. */
11083 if (shift_mode != shift_unit_mode)
11084 break;
11085 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11087 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11088 with C the size of VAROP - 1 and the shift is logical if
11089 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11090 we have a (gt X 0) operation. If the shift is arithmetic with
11091 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11092 we have a (neg (gt X 0)) operation. */
11094 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11095 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11096 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11097 && (code == LSHIFTRT || code == ASHIFTRT)
11098 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11099 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11100 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11102 count = 0;
11103 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11104 const0_rtx);
11106 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11107 varop = gen_rtx_NEG (int_varop_mode, varop);
11109 continue;
11111 break;
11113 case TRUNCATE:
11114 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11115 if the truncate does not affect the value. */
11116 if (code == LSHIFTRT
11117 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11118 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11119 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11120 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11121 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11123 rtx varop_inner = XEXP (varop, 0);
11124 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11125 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11126 new_count);
11127 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11128 XEXP (varop_inner, 0),
11129 new_count_rtx);
11130 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11131 count = 0;
11132 continue;
11134 break;
11136 default:
11137 break;
11140 break;
11143 shift_mode = result_mode;
11144 if (shift_mode != mode)
11146 /* We only change the modes of scalar shifts. */
11147 int_mode = as_a <scalar_int_mode> (mode);
11148 int_result_mode = as_a <scalar_int_mode> (result_mode);
11149 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11150 int_mode, outer_op, outer_const);
11153 /* We have now finished analyzing the shift. The result should be
11154 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11155 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11156 to the result of the shift. OUTER_CONST is the relevant constant,
11157 but we must turn off all bits turned off in the shift. */
11159 if (outer_op == UNKNOWN
11160 && orig_code == code && orig_count == count
11161 && varop == orig_varop
11162 && shift_mode == GET_MODE (varop))
11163 return NULL_RTX;
11165 /* Make a SUBREG if necessary. If we can't make it, fail. */
11166 varop = gen_lowpart (shift_mode, varop);
11167 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11168 return NULL_RTX;
11170 /* If we have an outer operation and we just made a shift, it is
11171 possible that we could have simplified the shift were it not
11172 for the outer operation. So try to do the simplification
11173 recursively. */
11175 if (outer_op != UNKNOWN)
11176 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11177 else
11178 x = NULL_RTX;
11180 if (x == NULL_RTX)
11181 x = simplify_gen_binary (code, shift_mode, varop,
11182 gen_int_shift_amount (shift_mode, count));
11184 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11185 turn off all the bits that the shift would have turned off. */
11186 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11187 /* We only change the modes of scalar shifts. */
11188 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11189 x, GET_MODE_MASK (result_mode) >> orig_count);
11191 /* Do the remainder of the processing in RESULT_MODE. */
11192 x = gen_lowpart_or_truncate (result_mode, x);
11194 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11195 operation. */
11196 if (complement_p)
11197 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11199 if (outer_op != UNKNOWN)
11201 int_result_mode = as_a <scalar_int_mode> (result_mode);
11203 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11204 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11205 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11207 if (outer_op == AND)
11208 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11209 else if (outer_op == SET)
11211 /* This means that we have determined that the result is
11212 equivalent to a constant. This should be rare. */
11213 if (!side_effects_p (x))
11214 x = GEN_INT (outer_const);
11216 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11217 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11218 else
11219 x = simplify_gen_binary (outer_op, int_result_mode, x,
11220 GEN_INT (outer_const));
11223 return x;
11226 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11227 The result of the shift is RESULT_MODE. If we cannot simplify it,
11228 return X or, if it is NULL, synthesize the expression with
11229 simplify_gen_binary. Otherwise, return a simplified value.
11231 The shift is normally computed in the widest mode we find in VAROP, as
11232 long as it isn't a different number of words than RESULT_MODE. Exceptions
11233 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11235 static rtx
11236 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11237 rtx varop, int count)
11239 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11240 if (tem)
11241 return tem;
11243 if (!x)
11244 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11245 gen_int_shift_amount (GET_MODE (varop), count));
11246 if (GET_MODE (x) != result_mode)
11247 x = gen_lowpart (result_mode, x);
11248 return x;
11252 /* A subroutine of recog_for_combine. See there for arguments and
11253 return value. */
11255 static int
11256 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11258 rtx pat = *pnewpat;
11259 rtx pat_without_clobbers;
11260 int insn_code_number;
11261 int num_clobbers_to_add = 0;
11262 int i;
11263 rtx notes = NULL_RTX;
11264 rtx old_notes, old_pat;
11265 int old_icode;
11267 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11268 we use to indicate that something didn't match. If we find such a
11269 thing, force rejection. */
11270 if (GET_CODE (pat) == PARALLEL)
11271 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11272 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11273 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11274 return -1;
11276 old_pat = PATTERN (insn);
11277 old_notes = REG_NOTES (insn);
11278 PATTERN (insn) = pat;
11279 REG_NOTES (insn) = NULL_RTX;
11281 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11282 if (dump_file && (dump_flags & TDF_DETAILS))
11284 if (insn_code_number < 0)
11285 fputs ("Failed to match this instruction:\n", dump_file);
11286 else
11287 fputs ("Successfully matched this instruction:\n", dump_file);
11288 print_rtl_single (dump_file, pat);
11291 /* If it isn't, there is the possibility that we previously had an insn
11292 that clobbered some register as a side effect, but the combined
11293 insn doesn't need to do that. So try once more without the clobbers
11294 unless this represents an ASM insn. */
11296 if (insn_code_number < 0 && ! check_asm_operands (pat)
11297 && GET_CODE (pat) == PARALLEL)
11299 int pos;
11301 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11302 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11304 if (i != pos)
11305 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11306 pos++;
11309 SUBST_INT (XVECLEN (pat, 0), pos);
11311 if (pos == 1)
11312 pat = XVECEXP (pat, 0, 0);
11314 PATTERN (insn) = pat;
11315 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11316 if (dump_file && (dump_flags & TDF_DETAILS))
11318 if (insn_code_number < 0)
11319 fputs ("Failed to match this instruction:\n", dump_file);
11320 else
11321 fputs ("Successfully matched this instruction:\n", dump_file);
11322 print_rtl_single (dump_file, pat);
11326 pat_without_clobbers = pat;
11328 PATTERN (insn) = old_pat;
11329 REG_NOTES (insn) = old_notes;
11331 /* Recognize all noop sets, these will be killed by followup pass. */
11332 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11333 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11335 /* If we had any clobbers to add, make a new pattern than contains
11336 them. Then check to make sure that all of them are dead. */
11337 if (num_clobbers_to_add)
11339 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11340 rtvec_alloc (GET_CODE (pat) == PARALLEL
11341 ? (XVECLEN (pat, 0)
11342 + num_clobbers_to_add)
11343 : num_clobbers_to_add + 1));
11345 if (GET_CODE (pat) == PARALLEL)
11346 for (i = 0; i < XVECLEN (pat, 0); i++)
11347 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11348 else
11349 XVECEXP (newpat, 0, 0) = pat;
11351 add_clobbers (newpat, insn_code_number);
11353 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11354 i < XVECLEN (newpat, 0); i++)
11356 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11357 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11358 return -1;
11359 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11361 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11362 notes = alloc_reg_note (REG_UNUSED,
11363 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11366 pat = newpat;
11369 if (insn_code_number >= 0
11370 && insn_code_number != NOOP_MOVE_INSN_CODE)
11372 old_pat = PATTERN (insn);
11373 old_notes = REG_NOTES (insn);
11374 old_icode = INSN_CODE (insn);
11375 PATTERN (insn) = pat;
11376 REG_NOTES (insn) = notes;
11377 INSN_CODE (insn) = insn_code_number;
11379 /* Allow targets to reject combined insn. */
11380 if (!targetm.legitimate_combined_insn (insn))
11382 if (dump_file && (dump_flags & TDF_DETAILS))
11383 fputs ("Instruction not appropriate for target.",
11384 dump_file);
11386 /* Callers expect recog_for_combine to strip
11387 clobbers from the pattern on failure. */
11388 pat = pat_without_clobbers;
11389 notes = NULL_RTX;
11391 insn_code_number = -1;
11394 PATTERN (insn) = old_pat;
11395 REG_NOTES (insn) = old_notes;
11396 INSN_CODE (insn) = old_icode;
11399 *pnewpat = pat;
11400 *pnotes = notes;
11402 return insn_code_number;
11405 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11406 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11407 Return whether anything was so changed. */
11409 static bool
11410 change_zero_ext (rtx pat)
11412 bool changed = false;
11413 rtx *src = &SET_SRC (pat);
11415 subrtx_ptr_iterator::array_type array;
11416 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11418 rtx x = **iter;
11419 scalar_int_mode mode, inner_mode;
11420 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11421 continue;
11422 int size;
11424 if (GET_CODE (x) == ZERO_EXTRACT
11425 && CONST_INT_P (XEXP (x, 1))
11426 && CONST_INT_P (XEXP (x, 2))
11427 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11428 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11430 size = INTVAL (XEXP (x, 1));
11432 int start = INTVAL (XEXP (x, 2));
11433 if (BITS_BIG_ENDIAN)
11434 start = GET_MODE_PRECISION (inner_mode) - size - start;
11436 if (start != 0)
11437 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11438 gen_int_shift_amount (inner_mode, start));
11439 else
11440 x = XEXP (x, 0);
11442 if (mode != inner_mode)
11444 if (REG_P (x) && HARD_REGISTER_P (x)
11445 && !can_change_dest_mode (x, 0, mode))
11446 continue;
11448 x = gen_lowpart_SUBREG (mode, x);
11451 else if (GET_CODE (x) == ZERO_EXTEND
11452 && GET_CODE (XEXP (x, 0)) == SUBREG
11453 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11454 && !paradoxical_subreg_p (XEXP (x, 0))
11455 && subreg_lowpart_p (XEXP (x, 0)))
11457 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11458 size = GET_MODE_PRECISION (inner_mode);
11459 x = SUBREG_REG (XEXP (x, 0));
11460 if (GET_MODE (x) != mode)
11462 if (REG_P (x) && HARD_REGISTER_P (x)
11463 && !can_change_dest_mode (x, 0, mode))
11464 continue;
11466 x = gen_lowpart_SUBREG (mode, x);
11469 else if (GET_CODE (x) == ZERO_EXTEND
11470 && REG_P (XEXP (x, 0))
11471 && HARD_REGISTER_P (XEXP (x, 0))
11472 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11474 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11475 size = GET_MODE_PRECISION (inner_mode);
11476 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11478 else
11479 continue;
11481 if (!(GET_CODE (x) == LSHIFTRT
11482 && CONST_INT_P (XEXP (x, 1))
11483 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11485 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11486 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11489 SUBST (**iter, x);
11490 changed = true;
11493 if (changed)
11494 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11495 maybe_swap_commutative_operands (**iter);
11497 rtx *dst = &SET_DEST (pat);
11498 scalar_int_mode mode;
11499 if (GET_CODE (*dst) == ZERO_EXTRACT
11500 && REG_P (XEXP (*dst, 0))
11501 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11502 && CONST_INT_P (XEXP (*dst, 1))
11503 && CONST_INT_P (XEXP (*dst, 2)))
11505 rtx reg = XEXP (*dst, 0);
11506 int width = INTVAL (XEXP (*dst, 1));
11507 int offset = INTVAL (XEXP (*dst, 2));
11508 int reg_width = GET_MODE_PRECISION (mode);
11509 if (BITS_BIG_ENDIAN)
11510 offset = reg_width - width - offset;
11512 rtx x, y, z, w;
11513 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11514 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11515 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11516 if (offset)
11517 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11518 else
11519 y = SET_SRC (pat);
11520 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11521 w = gen_rtx_IOR (mode, x, z);
11522 SUBST (SET_DEST (pat), reg);
11523 SUBST (SET_SRC (pat), w);
11525 changed = true;
11528 return changed;
11531 /* Like recog, but we receive the address of a pointer to a new pattern.
11532 We try to match the rtx that the pointer points to.
11533 If that fails, we may try to modify or replace the pattern,
11534 storing the replacement into the same pointer object.
11536 Modifications include deletion or addition of CLOBBERs. If the
11537 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11538 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11539 (and undo if that fails).
11541 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11542 the CLOBBERs are placed.
11544 The value is the final insn code from the pattern ultimately matched,
11545 or -1. */
11547 static int
11548 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11550 rtx pat = *pnewpat;
11551 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11552 if (insn_code_number >= 0 || check_asm_operands (pat))
11553 return insn_code_number;
11555 void *marker = get_undo_marker ();
11556 bool changed = false;
11558 if (GET_CODE (pat) == SET)
11559 changed = change_zero_ext (pat);
11560 else if (GET_CODE (pat) == PARALLEL)
11562 int i;
11563 for (i = 0; i < XVECLEN (pat, 0); i++)
11565 rtx set = XVECEXP (pat, 0, i);
11566 if (GET_CODE (set) == SET)
11567 changed |= change_zero_ext (set);
11571 if (changed)
11573 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11575 if (insn_code_number < 0)
11576 undo_to_marker (marker);
11579 return insn_code_number;
11582 /* Like gen_lowpart_general but for use by combine. In combine it
11583 is not possible to create any new pseudoregs. However, it is
11584 safe to create invalid memory addresses, because combine will
11585 try to recognize them and all they will do is make the combine
11586 attempt fail.
11588 If for some reason this cannot do its job, an rtx
11589 (clobber (const_int 0)) is returned.
11590 An insn containing that will not be recognized. */
11592 static rtx
11593 gen_lowpart_for_combine (machine_mode omode, rtx x)
11595 machine_mode imode = GET_MODE (x);
11596 rtx result;
11598 if (omode == imode)
11599 return x;
11601 /* We can only support MODE being wider than a word if X is a
11602 constant integer or has a mode the same size. */
11603 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11604 && ! (CONST_SCALAR_INT_P (x)
11605 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11606 goto fail;
11608 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11609 won't know what to do. So we will strip off the SUBREG here and
11610 process normally. */
11611 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11613 x = SUBREG_REG (x);
11615 /* For use in case we fall down into the address adjustments
11616 further below, we need to adjust the known mode and size of
11617 x; imode and isize, since we just adjusted x. */
11618 imode = GET_MODE (x);
11620 if (imode == omode)
11621 return x;
11624 result = gen_lowpart_common (omode, x);
11626 if (result)
11627 return result;
11629 if (MEM_P (x))
11631 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11632 address. */
11633 if (MEM_VOLATILE_P (x)
11634 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11635 goto fail;
11637 /* If we want to refer to something bigger than the original memref,
11638 generate a paradoxical subreg instead. That will force a reload
11639 of the original memref X. */
11640 if (paradoxical_subreg_p (omode, imode))
11641 return gen_rtx_SUBREG (omode, x, 0);
11643 poly_int64 offset = byte_lowpart_offset (omode, imode);
11644 return adjust_address_nv (x, omode, offset);
11647 /* If X is a comparison operator, rewrite it in a new mode. This
11648 probably won't match, but may allow further simplifications. */
11649 else if (COMPARISON_P (x)
11650 && SCALAR_INT_MODE_P (imode)
11651 && SCALAR_INT_MODE_P (omode))
11652 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11654 /* If we couldn't simplify X any other way, just enclose it in a
11655 SUBREG. Normally, this SUBREG won't match, but some patterns may
11656 include an explicit SUBREG or we may simplify it further in combine. */
11657 else
11659 rtx res;
11661 if (imode == VOIDmode)
11663 imode = int_mode_for_mode (omode).require ();
11664 x = gen_lowpart_common (imode, x);
11665 if (x == NULL)
11666 goto fail;
11668 res = lowpart_subreg (omode, x, imode);
11669 if (res)
11670 return res;
11673 fail:
11674 return gen_rtx_CLOBBER (omode, const0_rtx);
11677 /* Try to simplify a comparison between OP0 and a constant OP1,
11678 where CODE is the comparison code that will be tested, into a
11679 (CODE OP0 const0_rtx) form.
11681 The result is a possibly different comparison code to use.
11682 *POP1 may be updated. */
11684 static enum rtx_code
11685 simplify_compare_const (enum rtx_code code, machine_mode mode,
11686 rtx op0, rtx *pop1)
11688 scalar_int_mode int_mode;
11689 HOST_WIDE_INT const_op = INTVAL (*pop1);
11691 /* Get the constant we are comparing against and turn off all bits
11692 not on in our mode. */
11693 if (mode != VOIDmode)
11694 const_op = trunc_int_for_mode (const_op, mode);
11696 /* If we are comparing against a constant power of two and the value
11697 being compared can only have that single bit nonzero (e.g., it was
11698 `and'ed with that bit), we can replace this with a comparison
11699 with zero. */
11700 if (const_op
11701 && (code == EQ || code == NE || code == GE || code == GEU
11702 || code == LT || code == LTU)
11703 && is_a <scalar_int_mode> (mode, &int_mode)
11704 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11705 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11706 && (nonzero_bits (op0, int_mode)
11707 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11709 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11710 const_op = 0;
11713 /* Similarly, if we are comparing a value known to be either -1 or
11714 0 with -1, change it to the opposite comparison against zero. */
11715 if (const_op == -1
11716 && (code == EQ || code == NE || code == GT || code == LE
11717 || code == GEU || code == LTU)
11718 && is_a <scalar_int_mode> (mode, &int_mode)
11719 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11721 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11722 const_op = 0;
11725 /* Do some canonicalizations based on the comparison code. We prefer
11726 comparisons against zero and then prefer equality comparisons.
11727 If we can reduce the size of a constant, we will do that too. */
11728 switch (code)
11730 case LT:
11731 /* < C is equivalent to <= (C - 1) */
11732 if (const_op > 0)
11734 const_op -= 1;
11735 code = LE;
11736 /* ... fall through to LE case below. */
11737 gcc_fallthrough ();
11739 else
11740 break;
11742 case LE:
11743 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11744 if (const_op < 0)
11746 const_op += 1;
11747 code = LT;
11750 /* If we are doing a <= 0 comparison on a value known to have
11751 a zero sign bit, we can replace this with == 0. */
11752 else if (const_op == 0
11753 && is_a <scalar_int_mode> (mode, &int_mode)
11754 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11755 && (nonzero_bits (op0, int_mode)
11756 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11757 == 0)
11758 code = EQ;
11759 break;
11761 case GE:
11762 /* >= C is equivalent to > (C - 1). */
11763 if (const_op > 0)
11765 const_op -= 1;
11766 code = GT;
11767 /* ... fall through to GT below. */
11768 gcc_fallthrough ();
11770 else
11771 break;
11773 case GT:
11774 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11775 if (const_op < 0)
11777 const_op += 1;
11778 code = GE;
11781 /* If we are doing a > 0 comparison on a value known to have
11782 a zero sign bit, we can replace this with != 0. */
11783 else if (const_op == 0
11784 && is_a <scalar_int_mode> (mode, &int_mode)
11785 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11786 && (nonzero_bits (op0, int_mode)
11787 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11788 == 0)
11789 code = NE;
11790 break;
11792 case LTU:
11793 /* < C is equivalent to <= (C - 1). */
11794 if (const_op > 0)
11796 const_op -= 1;
11797 code = LEU;
11798 /* ... fall through ... */
11799 gcc_fallthrough ();
11801 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11802 else if (is_a <scalar_int_mode> (mode, &int_mode)
11803 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11804 && ((unsigned HOST_WIDE_INT) const_op
11805 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11807 const_op = 0;
11808 code = GE;
11809 break;
11811 else
11812 break;
11814 case LEU:
11815 /* unsigned <= 0 is equivalent to == 0 */
11816 if (const_op == 0)
11817 code = EQ;
11818 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11819 else if (is_a <scalar_int_mode> (mode, &int_mode)
11820 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11821 && ((unsigned HOST_WIDE_INT) const_op
11822 == ((HOST_WIDE_INT_1U
11823 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11825 const_op = 0;
11826 code = GE;
11828 break;
11830 case GEU:
11831 /* >= C is equivalent to > (C - 1). */
11832 if (const_op > 1)
11834 const_op -= 1;
11835 code = GTU;
11836 /* ... fall through ... */
11837 gcc_fallthrough ();
11840 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11841 else if (is_a <scalar_int_mode> (mode, &int_mode)
11842 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11843 && ((unsigned HOST_WIDE_INT) const_op
11844 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11846 const_op = 0;
11847 code = LT;
11848 break;
11850 else
11851 break;
11853 case GTU:
11854 /* unsigned > 0 is equivalent to != 0 */
11855 if (const_op == 0)
11856 code = NE;
11857 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11858 else if (is_a <scalar_int_mode> (mode, &int_mode)
11859 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11860 && ((unsigned HOST_WIDE_INT) const_op
11861 == (HOST_WIDE_INT_1U
11862 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11864 const_op = 0;
11865 code = LT;
11867 break;
11869 default:
11870 break;
11873 *pop1 = GEN_INT (const_op);
11874 return code;
11877 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11878 comparison code that will be tested.
11880 The result is a possibly different comparison code to use. *POP0 and
11881 *POP1 may be updated.
11883 It is possible that we might detect that a comparison is either always
11884 true or always false. However, we do not perform general constant
11885 folding in combine, so this knowledge isn't useful. Such tautologies
11886 should have been detected earlier. Hence we ignore all such cases. */
11888 static enum rtx_code
11889 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11891 rtx op0 = *pop0;
11892 rtx op1 = *pop1;
11893 rtx tem, tem1;
11894 int i;
11895 scalar_int_mode mode, inner_mode, tmode;
11896 opt_scalar_int_mode tmode_iter;
11898 /* Try a few ways of applying the same transformation to both operands. */
11899 while (1)
11901 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11902 so check specially. */
11903 if (!WORD_REGISTER_OPERATIONS
11904 && code != GTU && code != GEU && code != LTU && code != LEU
11905 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11906 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11907 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11908 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11909 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11910 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11911 && (is_a <scalar_int_mode>
11912 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11913 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11914 && CONST_INT_P (XEXP (op0, 1))
11915 && XEXP (op0, 1) == XEXP (op1, 1)
11916 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11917 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11918 && (INTVAL (XEXP (op0, 1))
11919 == (GET_MODE_PRECISION (mode)
11920 - GET_MODE_PRECISION (inner_mode))))
11922 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11923 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11926 /* If both operands are the same constant shift, see if we can ignore the
11927 shift. We can if the shift is a rotate or if the bits shifted out of
11928 this shift are known to be zero for both inputs and if the type of
11929 comparison is compatible with the shift. */
11930 if (GET_CODE (op0) == GET_CODE (op1)
11931 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11932 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11933 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11934 && (code != GT && code != LT && code != GE && code != LE))
11935 || (GET_CODE (op0) == ASHIFTRT
11936 && (code != GTU && code != LTU
11937 && code != GEU && code != LEU)))
11938 && CONST_INT_P (XEXP (op0, 1))
11939 && INTVAL (XEXP (op0, 1)) >= 0
11940 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11941 && XEXP (op0, 1) == XEXP (op1, 1))
11943 machine_mode mode = GET_MODE (op0);
11944 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11945 int shift_count = INTVAL (XEXP (op0, 1));
11947 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11948 mask &= (mask >> shift_count) << shift_count;
11949 else if (GET_CODE (op0) == ASHIFT)
11950 mask = (mask & (mask << shift_count)) >> shift_count;
11952 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11953 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11954 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11955 else
11956 break;
11959 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11960 SUBREGs are of the same mode, and, in both cases, the AND would
11961 be redundant if the comparison was done in the narrower mode,
11962 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11963 and the operand's possibly nonzero bits are 0xffffff01; in that case
11964 if we only care about QImode, we don't need the AND). This case
11965 occurs if the output mode of an scc insn is not SImode and
11966 STORE_FLAG_VALUE == 1 (e.g., the 386).
11968 Similarly, check for a case where the AND's are ZERO_EXTEND
11969 operations from some narrower mode even though a SUBREG is not
11970 present. */
11972 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11973 && CONST_INT_P (XEXP (op0, 1))
11974 && CONST_INT_P (XEXP (op1, 1)))
11976 rtx inner_op0 = XEXP (op0, 0);
11977 rtx inner_op1 = XEXP (op1, 0);
11978 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11979 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11980 int changed = 0;
11982 if (paradoxical_subreg_p (inner_op0)
11983 && GET_CODE (inner_op1) == SUBREG
11984 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
11985 && (GET_MODE (SUBREG_REG (inner_op0))
11986 == GET_MODE (SUBREG_REG (inner_op1)))
11987 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11988 GET_MODE (SUBREG_REG (inner_op0)))) == 0
11989 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11990 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
11992 op0 = SUBREG_REG (inner_op0);
11993 op1 = SUBREG_REG (inner_op1);
11995 /* The resulting comparison is always unsigned since we masked
11996 off the original sign bit. */
11997 code = unsigned_condition (code);
11999 changed = 1;
12002 else if (c0 == c1)
12003 FOR_EACH_MODE_UNTIL (tmode,
12004 as_a <scalar_int_mode> (GET_MODE (op0)))
12005 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12007 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12008 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12009 code = unsigned_condition (code);
12010 changed = 1;
12011 break;
12014 if (! changed)
12015 break;
12018 /* If both operands are NOT, we can strip off the outer operation
12019 and adjust the comparison code for swapped operands; similarly for
12020 NEG, except that this must be an equality comparison. */
12021 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12022 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12023 && (code == EQ || code == NE)))
12024 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12026 else
12027 break;
12030 /* If the first operand is a constant, swap the operands and adjust the
12031 comparison code appropriately, but don't do this if the second operand
12032 is already a constant integer. */
12033 if (swap_commutative_operands_p (op0, op1))
12035 std::swap (op0, op1);
12036 code = swap_condition (code);
12039 /* We now enter a loop during which we will try to simplify the comparison.
12040 For the most part, we only are concerned with comparisons with zero,
12041 but some things may really be comparisons with zero but not start
12042 out looking that way. */
12044 while (CONST_INT_P (op1))
12046 machine_mode raw_mode = GET_MODE (op0);
12047 scalar_int_mode int_mode;
12048 int equality_comparison_p;
12049 int sign_bit_comparison_p;
12050 int unsigned_comparison_p;
12051 HOST_WIDE_INT const_op;
12053 /* We only want to handle integral modes. This catches VOIDmode,
12054 CCmode, and the floating-point modes. An exception is that we
12055 can handle VOIDmode if OP0 is a COMPARE or a comparison
12056 operation. */
12058 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12059 && ! (raw_mode == VOIDmode
12060 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12061 break;
12063 /* Try to simplify the compare to constant, possibly changing the
12064 comparison op, and/or changing op1 to zero. */
12065 code = simplify_compare_const (code, raw_mode, op0, &op1);
12066 const_op = INTVAL (op1);
12068 /* Compute some predicates to simplify code below. */
12070 equality_comparison_p = (code == EQ || code == NE);
12071 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12072 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12073 || code == GEU);
12075 /* If this is a sign bit comparison and we can do arithmetic in
12076 MODE, say that we will only be needing the sign bit of OP0. */
12077 if (sign_bit_comparison_p
12078 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12079 && HWI_COMPUTABLE_MODE_P (int_mode))
12080 op0 = force_to_mode (op0, int_mode,
12081 HOST_WIDE_INT_1U
12082 << (GET_MODE_PRECISION (int_mode) - 1),
12085 if (COMPARISON_P (op0))
12087 /* We can't do anything if OP0 is a condition code value, rather
12088 than an actual data value. */
12089 if (const_op != 0
12090 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12091 break;
12093 /* Get the two operands being compared. */
12094 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12095 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12096 else
12097 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12099 /* Check for the cases where we simply want the result of the
12100 earlier test or the opposite of that result. */
12101 if (code == NE || code == EQ
12102 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12103 && (code == LT || code == GE)))
12105 enum rtx_code new_code;
12106 if (code == LT || code == NE)
12107 new_code = GET_CODE (op0);
12108 else
12109 new_code = reversed_comparison_code (op0, NULL);
12111 if (new_code != UNKNOWN)
12113 code = new_code;
12114 op0 = tem;
12115 op1 = tem1;
12116 continue;
12119 break;
12122 if (raw_mode == VOIDmode)
12123 break;
12124 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12126 /* Now try cases based on the opcode of OP0. If none of the cases
12127 does a "continue", we exit this loop immediately after the
12128 switch. */
12130 unsigned int mode_width = GET_MODE_PRECISION (mode);
12131 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12132 switch (GET_CODE (op0))
12134 case ZERO_EXTRACT:
12135 /* If we are extracting a single bit from a variable position in
12136 a constant that has only a single bit set and are comparing it
12137 with zero, we can convert this into an equality comparison
12138 between the position and the location of the single bit. */
12139 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12140 have already reduced the shift count modulo the word size. */
12141 if (!SHIFT_COUNT_TRUNCATED
12142 && CONST_INT_P (XEXP (op0, 0))
12143 && XEXP (op0, 1) == const1_rtx
12144 && equality_comparison_p && const_op == 0
12145 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12147 if (BITS_BIG_ENDIAN)
12148 i = BITS_PER_WORD - 1 - i;
12150 op0 = XEXP (op0, 2);
12151 op1 = GEN_INT (i);
12152 const_op = i;
12154 /* Result is nonzero iff shift count is equal to I. */
12155 code = reverse_condition (code);
12156 continue;
12159 /* fall through */
12161 case SIGN_EXTRACT:
12162 tem = expand_compound_operation (op0);
12163 if (tem != op0)
12165 op0 = tem;
12166 continue;
12168 break;
12170 case NOT:
12171 /* If testing for equality, we can take the NOT of the constant. */
12172 if (equality_comparison_p
12173 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12175 op0 = XEXP (op0, 0);
12176 op1 = tem;
12177 continue;
12180 /* If just looking at the sign bit, reverse the sense of the
12181 comparison. */
12182 if (sign_bit_comparison_p)
12184 op0 = XEXP (op0, 0);
12185 code = (code == GE ? LT : GE);
12186 continue;
12188 break;
12190 case NEG:
12191 /* If testing for equality, we can take the NEG of the constant. */
12192 if (equality_comparison_p
12193 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12195 op0 = XEXP (op0, 0);
12196 op1 = tem;
12197 continue;
12200 /* The remaining cases only apply to comparisons with zero. */
12201 if (const_op != 0)
12202 break;
12204 /* When X is ABS or is known positive,
12205 (neg X) is < 0 if and only if X != 0. */
12207 if (sign_bit_comparison_p
12208 && (GET_CODE (XEXP (op0, 0)) == ABS
12209 || (mode_width <= HOST_BITS_PER_WIDE_INT
12210 && (nonzero_bits (XEXP (op0, 0), mode)
12211 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12212 == 0)))
12214 op0 = XEXP (op0, 0);
12215 code = (code == LT ? NE : EQ);
12216 continue;
12219 /* If we have NEG of something whose two high-order bits are the
12220 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12221 if (num_sign_bit_copies (op0, mode) >= 2)
12223 op0 = XEXP (op0, 0);
12224 code = swap_condition (code);
12225 continue;
12227 break;
12229 case ROTATE:
12230 /* If we are testing equality and our count is a constant, we
12231 can perform the inverse operation on our RHS. */
12232 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12233 && (tem = simplify_binary_operation (ROTATERT, mode,
12234 op1, XEXP (op0, 1))) != 0)
12236 op0 = XEXP (op0, 0);
12237 op1 = tem;
12238 continue;
12241 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12242 a particular bit. Convert it to an AND of a constant of that
12243 bit. This will be converted into a ZERO_EXTRACT. */
12244 if (const_op == 0 && sign_bit_comparison_p
12245 && CONST_INT_P (XEXP (op0, 1))
12246 && mode_width <= HOST_BITS_PER_WIDE_INT
12247 && UINTVAL (XEXP (op0, 1)) < mode_width)
12249 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12250 (HOST_WIDE_INT_1U
12251 << (mode_width - 1
12252 - INTVAL (XEXP (op0, 1)))));
12253 code = (code == LT ? NE : EQ);
12254 continue;
12257 /* Fall through. */
12259 case ABS:
12260 /* ABS is ignorable inside an equality comparison with zero. */
12261 if (const_op == 0 && equality_comparison_p)
12263 op0 = XEXP (op0, 0);
12264 continue;
12266 break;
12268 case SIGN_EXTEND:
12269 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12270 (compare FOO CONST) if CONST fits in FOO's mode and we
12271 are either testing inequality or have an unsigned
12272 comparison with ZERO_EXTEND or a signed comparison with
12273 SIGN_EXTEND. But don't do it if we don't have a compare
12274 insn of the given mode, since we'd have to revert it
12275 later on, and then we wouldn't know whether to sign- or
12276 zero-extend. */
12277 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12278 && ! unsigned_comparison_p
12279 && HWI_COMPUTABLE_MODE_P (mode)
12280 && trunc_int_for_mode (const_op, mode) == const_op
12281 && have_insn_for (COMPARE, mode))
12283 op0 = XEXP (op0, 0);
12284 continue;
12286 break;
12288 case SUBREG:
12289 /* Check for the case where we are comparing A - C1 with C2, that is
12291 (subreg:MODE (plus (A) (-C1))) op (C2)
12293 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12294 comparison in the wider mode. One of the following two conditions
12295 must be true in order for this to be valid:
12297 1. The mode extension results in the same bit pattern being added
12298 on both sides and the comparison is equality or unsigned. As
12299 C2 has been truncated to fit in MODE, the pattern can only be
12300 all 0s or all 1s.
12302 2. The mode extension results in the sign bit being copied on
12303 each side.
12305 The difficulty here is that we have predicates for A but not for
12306 (A - C1) so we need to check that C1 is within proper bounds so
12307 as to perturbate A as little as possible. */
12309 if (mode_width <= HOST_BITS_PER_WIDE_INT
12310 && subreg_lowpart_p (op0)
12311 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12312 &inner_mode)
12313 && GET_MODE_PRECISION (inner_mode) > mode_width
12314 && GET_CODE (SUBREG_REG (op0)) == PLUS
12315 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12317 rtx a = XEXP (SUBREG_REG (op0), 0);
12318 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12320 if ((c1 > 0
12321 && (unsigned HOST_WIDE_INT) c1
12322 < HOST_WIDE_INT_1U << (mode_width - 1)
12323 && (equality_comparison_p || unsigned_comparison_p)
12324 /* (A - C1) zero-extends if it is positive and sign-extends
12325 if it is negative, C2 both zero- and sign-extends. */
12326 && (((nonzero_bits (a, inner_mode)
12327 & ~GET_MODE_MASK (mode)) == 0
12328 && const_op >= 0)
12329 /* (A - C1) sign-extends if it is positive and 1-extends
12330 if it is negative, C2 both sign- and 1-extends. */
12331 || (num_sign_bit_copies (a, inner_mode)
12332 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12333 - mode_width)
12334 && const_op < 0)))
12335 || ((unsigned HOST_WIDE_INT) c1
12336 < HOST_WIDE_INT_1U << (mode_width - 2)
12337 /* (A - C1) always sign-extends, like C2. */
12338 && num_sign_bit_copies (a, inner_mode)
12339 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12340 - (mode_width - 1))))
12342 op0 = SUBREG_REG (op0);
12343 continue;
12347 /* If the inner mode is narrower and we are extracting the low part,
12348 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12349 if (paradoxical_subreg_p (op0))
12351 else if (subreg_lowpart_p (op0)
12352 && GET_MODE_CLASS (mode) == MODE_INT
12353 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12354 && (code == NE || code == EQ)
12355 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12356 && !paradoxical_subreg_p (op0)
12357 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12358 & ~GET_MODE_MASK (mode)) == 0)
12360 /* Remove outer subregs that don't do anything. */
12361 tem = gen_lowpart (inner_mode, op1);
12363 if ((nonzero_bits (tem, inner_mode)
12364 & ~GET_MODE_MASK (mode)) == 0)
12366 op0 = SUBREG_REG (op0);
12367 op1 = tem;
12368 continue;
12370 break;
12372 else
12373 break;
12375 /* FALLTHROUGH */
12377 case ZERO_EXTEND:
12378 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12379 && (unsigned_comparison_p || equality_comparison_p)
12380 && HWI_COMPUTABLE_MODE_P (mode)
12381 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12382 && const_op >= 0
12383 && have_insn_for (COMPARE, mode))
12385 op0 = XEXP (op0, 0);
12386 continue;
12388 break;
12390 case PLUS:
12391 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12392 this for equality comparisons due to pathological cases involving
12393 overflows. */
12394 if (equality_comparison_p
12395 && (tem = simplify_binary_operation (MINUS, mode,
12396 op1, XEXP (op0, 1))) != 0)
12398 op0 = XEXP (op0, 0);
12399 op1 = tem;
12400 continue;
12403 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12404 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12405 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12407 op0 = XEXP (XEXP (op0, 0), 0);
12408 code = (code == LT ? EQ : NE);
12409 continue;
12411 break;
12413 case MINUS:
12414 /* We used to optimize signed comparisons against zero, but that
12415 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12416 arrive here as equality comparisons, or (GEU, LTU) are
12417 optimized away. No need to special-case them. */
12419 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12420 (eq B (minus A C)), whichever simplifies. We can only do
12421 this for equality comparisons due to pathological cases involving
12422 overflows. */
12423 if (equality_comparison_p
12424 && (tem = simplify_binary_operation (PLUS, mode,
12425 XEXP (op0, 1), op1)) != 0)
12427 op0 = XEXP (op0, 0);
12428 op1 = tem;
12429 continue;
12432 if (equality_comparison_p
12433 && (tem = simplify_binary_operation (MINUS, mode,
12434 XEXP (op0, 0), op1)) != 0)
12436 op0 = XEXP (op0, 1);
12437 op1 = tem;
12438 continue;
12441 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12442 of bits in X minus 1, is one iff X > 0. */
12443 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12444 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12445 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12446 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12448 op0 = XEXP (op0, 1);
12449 code = (code == GE ? LE : GT);
12450 continue;
12452 break;
12454 case XOR:
12455 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12456 if C is zero or B is a constant. */
12457 if (equality_comparison_p
12458 && (tem = simplify_binary_operation (XOR, mode,
12459 XEXP (op0, 1), op1)) != 0)
12461 op0 = XEXP (op0, 0);
12462 op1 = tem;
12463 continue;
12465 break;
12468 case IOR:
12469 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12470 iff X <= 0. */
12471 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12472 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12473 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12475 op0 = XEXP (op0, 1);
12476 code = (code == GE ? GT : LE);
12477 continue;
12479 break;
12481 case AND:
12482 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12483 will be converted to a ZERO_EXTRACT later. */
12484 if (const_op == 0 && equality_comparison_p
12485 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12486 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12488 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12489 XEXP (XEXP (op0, 0), 1));
12490 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12491 continue;
12494 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12495 zero and X is a comparison and C1 and C2 describe only bits set
12496 in STORE_FLAG_VALUE, we can compare with X. */
12497 if (const_op == 0 && equality_comparison_p
12498 && mode_width <= HOST_BITS_PER_WIDE_INT
12499 && CONST_INT_P (XEXP (op0, 1))
12500 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12501 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12502 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12503 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12505 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12506 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12507 if ((~STORE_FLAG_VALUE & mask) == 0
12508 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12509 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12510 && COMPARISON_P (tem))))
12512 op0 = XEXP (XEXP (op0, 0), 0);
12513 continue;
12517 /* If we are doing an equality comparison of an AND of a bit equal
12518 to the sign bit, replace this with a LT or GE comparison of
12519 the underlying value. */
12520 if (equality_comparison_p
12521 && const_op == 0
12522 && CONST_INT_P (XEXP (op0, 1))
12523 && mode_width <= HOST_BITS_PER_WIDE_INT
12524 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12525 == HOST_WIDE_INT_1U << (mode_width - 1)))
12527 op0 = XEXP (op0, 0);
12528 code = (code == EQ ? GE : LT);
12529 continue;
12532 /* If this AND operation is really a ZERO_EXTEND from a narrower
12533 mode, the constant fits within that mode, and this is either an
12534 equality or unsigned comparison, try to do this comparison in
12535 the narrower mode.
12537 Note that in:
12539 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12540 -> (ne:DI (reg:SI 4) (const_int 0))
12542 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12543 known to hold a value of the required mode the
12544 transformation is invalid. */
12545 if ((equality_comparison_p || unsigned_comparison_p)
12546 && CONST_INT_P (XEXP (op0, 1))
12547 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12548 & GET_MODE_MASK (mode))
12549 + 1)) >= 0
12550 && const_op >> i == 0
12551 && int_mode_for_size (i, 1).exists (&tmode))
12553 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12554 continue;
12557 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12558 fits in both M1 and M2 and the SUBREG is either paradoxical
12559 or represents the low part, permute the SUBREG and the AND
12560 and try again. */
12561 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12562 && CONST_INT_P (XEXP (op0, 1)))
12564 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12565 /* Require an integral mode, to avoid creating something like
12566 (AND:SF ...). */
12567 if ((is_a <scalar_int_mode>
12568 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12569 /* It is unsafe to commute the AND into the SUBREG if the
12570 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12571 not defined. As originally written the upper bits
12572 have a defined value due to the AND operation.
12573 However, if we commute the AND inside the SUBREG then
12574 they no longer have defined values and the meaning of
12575 the code has been changed.
12576 Also C1 should not change value in the smaller mode,
12577 see PR67028 (a positive C1 can become negative in the
12578 smaller mode, so that the AND does no longer mask the
12579 upper bits). */
12580 && ((WORD_REGISTER_OPERATIONS
12581 && mode_width > GET_MODE_PRECISION (tmode)
12582 && mode_width <= BITS_PER_WORD
12583 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12584 || (mode_width <= GET_MODE_PRECISION (tmode)
12585 && subreg_lowpart_p (XEXP (op0, 0))))
12586 && mode_width <= HOST_BITS_PER_WIDE_INT
12587 && HWI_COMPUTABLE_MODE_P (tmode)
12588 && (c1 & ~mask) == 0
12589 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12590 && c1 != mask
12591 && c1 != GET_MODE_MASK (tmode))
12593 op0 = simplify_gen_binary (AND, tmode,
12594 SUBREG_REG (XEXP (op0, 0)),
12595 gen_int_mode (c1, tmode));
12596 op0 = gen_lowpart (mode, op0);
12597 continue;
12601 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12602 if (const_op == 0 && equality_comparison_p
12603 && XEXP (op0, 1) == const1_rtx
12604 && GET_CODE (XEXP (op0, 0)) == NOT)
12606 op0 = simplify_and_const_int (NULL_RTX, mode,
12607 XEXP (XEXP (op0, 0), 0), 1);
12608 code = (code == NE ? EQ : NE);
12609 continue;
12612 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12613 (eq (and (lshiftrt X) 1) 0).
12614 Also handle the case where (not X) is expressed using xor. */
12615 if (const_op == 0 && equality_comparison_p
12616 && XEXP (op0, 1) == const1_rtx
12617 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12619 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12620 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12622 if (GET_CODE (shift_op) == NOT
12623 || (GET_CODE (shift_op) == XOR
12624 && CONST_INT_P (XEXP (shift_op, 1))
12625 && CONST_INT_P (shift_count)
12626 && HWI_COMPUTABLE_MODE_P (mode)
12627 && (UINTVAL (XEXP (shift_op, 1))
12628 == HOST_WIDE_INT_1U
12629 << INTVAL (shift_count))))
12632 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12633 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12634 code = (code == NE ? EQ : NE);
12635 continue;
12638 break;
12640 case ASHIFT:
12641 /* If we have (compare (ashift FOO N) (const_int C)) and
12642 the high order N bits of FOO (N+1 if an inequality comparison)
12643 are known to be zero, we can do this by comparing FOO with C
12644 shifted right N bits so long as the low-order N bits of C are
12645 zero. */
12646 if (CONST_INT_P (XEXP (op0, 1))
12647 && INTVAL (XEXP (op0, 1)) >= 0
12648 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12649 < HOST_BITS_PER_WIDE_INT)
12650 && (((unsigned HOST_WIDE_INT) const_op
12651 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12652 - 1)) == 0)
12653 && mode_width <= HOST_BITS_PER_WIDE_INT
12654 && (nonzero_bits (XEXP (op0, 0), mode)
12655 & ~(mask >> (INTVAL (XEXP (op0, 1))
12656 + ! equality_comparison_p))) == 0)
12658 /* We must perform a logical shift, not an arithmetic one,
12659 as we want the top N bits of C to be zero. */
12660 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12662 temp >>= INTVAL (XEXP (op0, 1));
12663 op1 = gen_int_mode (temp, mode);
12664 op0 = XEXP (op0, 0);
12665 continue;
12668 /* If we are doing a sign bit comparison, it means we are testing
12669 a particular bit. Convert it to the appropriate AND. */
12670 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12671 && mode_width <= HOST_BITS_PER_WIDE_INT)
12673 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12674 (HOST_WIDE_INT_1U
12675 << (mode_width - 1
12676 - INTVAL (XEXP (op0, 1)))));
12677 code = (code == LT ? NE : EQ);
12678 continue;
12681 /* If this an equality comparison with zero and we are shifting
12682 the low bit to the sign bit, we can convert this to an AND of the
12683 low-order bit. */
12684 if (const_op == 0 && equality_comparison_p
12685 && CONST_INT_P (XEXP (op0, 1))
12686 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12688 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12689 continue;
12691 break;
12693 case ASHIFTRT:
12694 /* If this is an equality comparison with zero, we can do this
12695 as a logical shift, which might be much simpler. */
12696 if (equality_comparison_p && const_op == 0
12697 && CONST_INT_P (XEXP (op0, 1)))
12699 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12700 XEXP (op0, 0),
12701 INTVAL (XEXP (op0, 1)));
12702 continue;
12705 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12706 do the comparison in a narrower mode. */
12707 if (! unsigned_comparison_p
12708 && CONST_INT_P (XEXP (op0, 1))
12709 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12710 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12711 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12712 .exists (&tmode))
12713 && (((unsigned HOST_WIDE_INT) const_op
12714 + (GET_MODE_MASK (tmode) >> 1) + 1)
12715 <= GET_MODE_MASK (tmode)))
12717 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12718 continue;
12721 /* Likewise if OP0 is a PLUS of a sign extension with a
12722 constant, which is usually represented with the PLUS
12723 between the shifts. */
12724 if (! unsigned_comparison_p
12725 && CONST_INT_P (XEXP (op0, 1))
12726 && GET_CODE (XEXP (op0, 0)) == PLUS
12727 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12728 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12729 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12730 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12731 .exists (&tmode))
12732 && (((unsigned HOST_WIDE_INT) const_op
12733 + (GET_MODE_MASK (tmode) >> 1) + 1)
12734 <= GET_MODE_MASK (tmode)))
12736 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12737 rtx add_const = XEXP (XEXP (op0, 0), 1);
12738 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12739 add_const, XEXP (op0, 1));
12741 op0 = simplify_gen_binary (PLUS, tmode,
12742 gen_lowpart (tmode, inner),
12743 new_const);
12744 continue;
12747 /* FALLTHROUGH */
12748 case LSHIFTRT:
12749 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12750 the low order N bits of FOO are known to be zero, we can do this
12751 by comparing FOO with C shifted left N bits so long as no
12752 overflow occurs. Even if the low order N bits of FOO aren't known
12753 to be zero, if the comparison is >= or < we can use the same
12754 optimization and for > or <= by setting all the low
12755 order N bits in the comparison constant. */
12756 if (CONST_INT_P (XEXP (op0, 1))
12757 && INTVAL (XEXP (op0, 1)) > 0
12758 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12759 && mode_width <= HOST_BITS_PER_WIDE_INT
12760 && (((unsigned HOST_WIDE_INT) const_op
12761 + (GET_CODE (op0) != LSHIFTRT
12762 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12763 + 1)
12764 : 0))
12765 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12767 unsigned HOST_WIDE_INT low_bits
12768 = (nonzero_bits (XEXP (op0, 0), mode)
12769 & ((HOST_WIDE_INT_1U
12770 << INTVAL (XEXP (op0, 1))) - 1));
12771 if (low_bits == 0 || !equality_comparison_p)
12773 /* If the shift was logical, then we must make the condition
12774 unsigned. */
12775 if (GET_CODE (op0) == LSHIFTRT)
12776 code = unsigned_condition (code);
12778 const_op = (unsigned HOST_WIDE_INT) const_op
12779 << INTVAL (XEXP (op0, 1));
12780 if (low_bits != 0
12781 && (code == GT || code == GTU
12782 || code == LE || code == LEU))
12783 const_op
12784 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12785 op1 = GEN_INT (const_op);
12786 op0 = XEXP (op0, 0);
12787 continue;
12791 /* If we are using this shift to extract just the sign bit, we
12792 can replace this with an LT or GE comparison. */
12793 if (const_op == 0
12794 && (equality_comparison_p || sign_bit_comparison_p)
12795 && CONST_INT_P (XEXP (op0, 1))
12796 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12798 op0 = XEXP (op0, 0);
12799 code = (code == NE || code == GT ? LT : GE);
12800 continue;
12802 break;
12804 default:
12805 break;
12808 break;
12811 /* Now make any compound operations involved in this comparison. Then,
12812 check for an outmost SUBREG on OP0 that is not doing anything or is
12813 paradoxical. The latter transformation must only be performed when
12814 it is known that the "extra" bits will be the same in op0 and op1 or
12815 that they don't matter. There are three cases to consider:
12817 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12818 care bits and we can assume they have any convenient value. So
12819 making the transformation is safe.
12821 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12822 In this case the upper bits of op0 are undefined. We should not make
12823 the simplification in that case as we do not know the contents of
12824 those bits.
12826 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12827 In that case we know those bits are zeros or ones. We must also be
12828 sure that they are the same as the upper bits of op1.
12830 We can never remove a SUBREG for a non-equality comparison because
12831 the sign bit is in a different place in the underlying object. */
12833 rtx_code op0_mco_code = SET;
12834 if (op1 == const0_rtx)
12835 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12837 op0 = make_compound_operation (op0, op0_mco_code);
12838 op1 = make_compound_operation (op1, SET);
12840 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12841 && is_int_mode (GET_MODE (op0), &mode)
12842 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12843 && (code == NE || code == EQ))
12845 if (paradoxical_subreg_p (op0))
12847 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12848 implemented. */
12849 if (REG_P (SUBREG_REG (op0)))
12851 op0 = SUBREG_REG (op0);
12852 op1 = gen_lowpart (inner_mode, op1);
12855 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12856 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12857 & ~GET_MODE_MASK (mode)) == 0)
12859 tem = gen_lowpart (inner_mode, op1);
12861 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12862 op0 = SUBREG_REG (op0), op1 = tem;
12866 /* We now do the opposite procedure: Some machines don't have compare
12867 insns in all modes. If OP0's mode is an integer mode smaller than a
12868 word and we can't do a compare in that mode, see if there is a larger
12869 mode for which we can do the compare. There are a number of cases in
12870 which we can use the wider mode. */
12872 if (is_int_mode (GET_MODE (op0), &mode)
12873 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12874 && ! have_insn_for (COMPARE, mode))
12875 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12877 tmode = tmode_iter.require ();
12878 if (!HWI_COMPUTABLE_MODE_P (tmode))
12879 break;
12880 if (have_insn_for (COMPARE, tmode))
12882 int zero_extended;
12884 /* If this is a test for negative, we can make an explicit
12885 test of the sign bit. Test this first so we can use
12886 a paradoxical subreg to extend OP0. */
12888 if (op1 == const0_rtx && (code == LT || code == GE)
12889 && HWI_COMPUTABLE_MODE_P (mode))
12891 unsigned HOST_WIDE_INT sign
12892 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12893 op0 = simplify_gen_binary (AND, tmode,
12894 gen_lowpart (tmode, op0),
12895 gen_int_mode (sign, tmode));
12896 code = (code == LT) ? NE : EQ;
12897 break;
12900 /* If the only nonzero bits in OP0 and OP1 are those in the
12901 narrower mode and this is an equality or unsigned comparison,
12902 we can use the wider mode. Similarly for sign-extended
12903 values, in which case it is true for all comparisons. */
12904 zero_extended = ((code == EQ || code == NE
12905 || code == GEU || code == GTU
12906 || code == LEU || code == LTU)
12907 && (nonzero_bits (op0, tmode)
12908 & ~GET_MODE_MASK (mode)) == 0
12909 && ((CONST_INT_P (op1)
12910 || (nonzero_bits (op1, tmode)
12911 & ~GET_MODE_MASK (mode)) == 0)));
12913 if (zero_extended
12914 || ((num_sign_bit_copies (op0, tmode)
12915 > (unsigned int) (GET_MODE_PRECISION (tmode)
12916 - GET_MODE_PRECISION (mode)))
12917 && (num_sign_bit_copies (op1, tmode)
12918 > (unsigned int) (GET_MODE_PRECISION (tmode)
12919 - GET_MODE_PRECISION (mode)))))
12921 /* If OP0 is an AND and we don't have an AND in MODE either,
12922 make a new AND in the proper mode. */
12923 if (GET_CODE (op0) == AND
12924 && !have_insn_for (AND, mode))
12925 op0 = simplify_gen_binary (AND, tmode,
12926 gen_lowpart (tmode,
12927 XEXP (op0, 0)),
12928 gen_lowpart (tmode,
12929 XEXP (op0, 1)));
12930 else
12932 if (zero_extended)
12934 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12935 op0, mode);
12936 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12937 op1, mode);
12939 else
12941 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12942 op0, mode);
12943 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12944 op1, mode);
12946 break;
12952 /* We may have changed the comparison operands. Re-canonicalize. */
12953 if (swap_commutative_operands_p (op0, op1))
12955 std::swap (op0, op1);
12956 code = swap_condition (code);
12959 /* If this machine only supports a subset of valid comparisons, see if we
12960 can convert an unsupported one into a supported one. */
12961 target_canonicalize_comparison (&code, &op0, &op1, 0);
12963 *pop0 = op0;
12964 *pop1 = op1;
12966 return code;
12969 /* Utility function for record_value_for_reg. Count number of
12970 rtxs in X. */
12971 static int
12972 count_rtxs (rtx x)
12974 enum rtx_code code = GET_CODE (x);
12975 const char *fmt;
12976 int i, j, ret = 1;
12978 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12979 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12981 rtx x0 = XEXP (x, 0);
12982 rtx x1 = XEXP (x, 1);
12984 if (x0 == x1)
12985 return 1 + 2 * count_rtxs (x0);
12987 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12988 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12989 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12990 return 2 + 2 * count_rtxs (x0)
12991 + count_rtxs (x == XEXP (x1, 0)
12992 ? XEXP (x1, 1) : XEXP (x1, 0));
12994 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12995 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12996 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12997 return 2 + 2 * count_rtxs (x1)
12998 + count_rtxs (x == XEXP (x0, 0)
12999 ? XEXP (x0, 1) : XEXP (x0, 0));
13002 fmt = GET_RTX_FORMAT (code);
13003 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13004 if (fmt[i] == 'e')
13005 ret += count_rtxs (XEXP (x, i));
13006 else if (fmt[i] == 'E')
13007 for (j = 0; j < XVECLEN (x, i); j++)
13008 ret += count_rtxs (XVECEXP (x, i, j));
13010 return ret;
13013 /* Utility function for following routine. Called when X is part of a value
13014 being stored into last_set_value. Sets last_set_table_tick
13015 for each register mentioned. Similar to mention_regs in cse.c */
13017 static void
13018 update_table_tick (rtx x)
13020 enum rtx_code code = GET_CODE (x);
13021 const char *fmt = GET_RTX_FORMAT (code);
13022 int i, j;
13024 if (code == REG)
13026 unsigned int regno = REGNO (x);
13027 unsigned int endregno = END_REGNO (x);
13028 unsigned int r;
13030 for (r = regno; r < endregno; r++)
13032 reg_stat_type *rsp = &reg_stat[r];
13033 rsp->last_set_table_tick = label_tick;
13036 return;
13039 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13040 if (fmt[i] == 'e')
13042 /* Check for identical subexpressions. If x contains
13043 identical subexpression we only have to traverse one of
13044 them. */
13045 if (i == 0 && ARITHMETIC_P (x))
13047 /* Note that at this point x1 has already been
13048 processed. */
13049 rtx x0 = XEXP (x, 0);
13050 rtx x1 = XEXP (x, 1);
13052 /* If x0 and x1 are identical then there is no need to
13053 process x0. */
13054 if (x0 == x1)
13055 break;
13057 /* If x0 is identical to a subexpression of x1 then while
13058 processing x1, x0 has already been processed. Thus we
13059 are done with x. */
13060 if (ARITHMETIC_P (x1)
13061 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13062 break;
13064 /* If x1 is identical to a subexpression of x0 then we
13065 still have to process the rest of x0. */
13066 if (ARITHMETIC_P (x0)
13067 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13069 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13070 break;
13074 update_table_tick (XEXP (x, i));
13076 else if (fmt[i] == 'E')
13077 for (j = 0; j < XVECLEN (x, i); j++)
13078 update_table_tick (XVECEXP (x, i, j));
13081 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13082 are saying that the register is clobbered and we no longer know its
13083 value. If INSN is zero, don't update reg_stat[].last_set; this is
13084 only permitted with VALUE also zero and is used to invalidate the
13085 register. */
13087 static void
13088 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13090 unsigned int regno = REGNO (reg);
13091 unsigned int endregno = END_REGNO (reg);
13092 unsigned int i;
13093 reg_stat_type *rsp;
13095 /* If VALUE contains REG and we have a previous value for REG, substitute
13096 the previous value. */
13097 if (value && insn && reg_overlap_mentioned_p (reg, value))
13099 rtx tem;
13101 /* Set things up so get_last_value is allowed to see anything set up to
13102 our insn. */
13103 subst_low_luid = DF_INSN_LUID (insn);
13104 tem = get_last_value (reg);
13106 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13107 it isn't going to be useful and will take a lot of time to process,
13108 so just use the CLOBBER. */
13110 if (tem)
13112 if (ARITHMETIC_P (tem)
13113 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13114 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13115 tem = XEXP (tem, 0);
13116 else if (count_occurrences (value, reg, 1) >= 2)
13118 /* If there are two or more occurrences of REG in VALUE,
13119 prevent the value from growing too much. */
13120 if (count_rtxs (tem) > param_max_last_value_rtl)
13121 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13124 value = replace_rtx (copy_rtx (value), reg, tem);
13128 /* For each register modified, show we don't know its value, that
13129 we don't know about its bitwise content, that its value has been
13130 updated, and that we don't know the location of the death of the
13131 register. */
13132 for (i = regno; i < endregno; i++)
13134 rsp = &reg_stat[i];
13136 if (insn)
13137 rsp->last_set = insn;
13139 rsp->last_set_value = 0;
13140 rsp->last_set_mode = VOIDmode;
13141 rsp->last_set_nonzero_bits = 0;
13142 rsp->last_set_sign_bit_copies = 0;
13143 rsp->last_death = 0;
13144 rsp->truncated_to_mode = VOIDmode;
13147 /* Mark registers that are being referenced in this value. */
13148 if (value)
13149 update_table_tick (value);
13151 /* Now update the status of each register being set.
13152 If someone is using this register in this block, set this register
13153 to invalid since we will get confused between the two lives in this
13154 basic block. This makes using this register always invalid. In cse, we
13155 scan the table to invalidate all entries using this register, but this
13156 is too much work for us. */
13158 for (i = regno; i < endregno; i++)
13160 rsp = &reg_stat[i];
13161 rsp->last_set_label = label_tick;
13162 if (!insn
13163 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13164 rsp->last_set_invalid = 1;
13165 else
13166 rsp->last_set_invalid = 0;
13169 /* The value being assigned might refer to X (like in "x++;"). In that
13170 case, we must replace it with (clobber (const_int 0)) to prevent
13171 infinite loops. */
13172 rsp = &reg_stat[regno];
13173 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13175 value = copy_rtx (value);
13176 if (!get_last_value_validate (&value, insn, label_tick, 1))
13177 value = 0;
13180 /* For the main register being modified, update the value, the mode, the
13181 nonzero bits, and the number of sign bit copies. */
13183 rsp->last_set_value = value;
13185 if (value)
13187 machine_mode mode = GET_MODE (reg);
13188 subst_low_luid = DF_INSN_LUID (insn);
13189 rsp->last_set_mode = mode;
13190 if (GET_MODE_CLASS (mode) == MODE_INT
13191 && HWI_COMPUTABLE_MODE_P (mode))
13192 mode = nonzero_bits_mode;
13193 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13194 rsp->last_set_sign_bit_copies
13195 = num_sign_bit_copies (value, GET_MODE (reg));
13199 /* Called via note_stores from record_dead_and_set_regs to handle one
13200 SET or CLOBBER in an insn. DATA is the instruction in which the
13201 set is occurring. */
13203 static void
13204 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13206 rtx_insn *record_dead_insn = (rtx_insn *) data;
13208 if (GET_CODE (dest) == SUBREG)
13209 dest = SUBREG_REG (dest);
13211 if (!record_dead_insn)
13213 if (REG_P (dest))
13214 record_value_for_reg (dest, NULL, NULL_RTX);
13215 return;
13218 if (REG_P (dest))
13220 /* If we are setting the whole register, we know its value. Otherwise
13221 show that we don't know the value. We can handle a SUBREG if it's
13222 the low part, but we must be careful with paradoxical SUBREGs on
13223 RISC architectures because we cannot strip e.g. an extension around
13224 a load and record the naked load since the RTL middle-end considers
13225 that the upper bits are defined according to LOAD_EXTEND_OP. */
13226 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13227 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13228 else if (GET_CODE (setter) == SET
13229 && GET_CODE (SET_DEST (setter)) == SUBREG
13230 && SUBREG_REG (SET_DEST (setter)) == dest
13231 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13232 BITS_PER_WORD)
13233 && subreg_lowpart_p (SET_DEST (setter)))
13234 record_value_for_reg (dest, record_dead_insn,
13235 WORD_REGISTER_OPERATIONS
13236 && word_register_operation_p (SET_SRC (setter))
13237 && paradoxical_subreg_p (SET_DEST (setter))
13238 ? SET_SRC (setter)
13239 : gen_lowpart (GET_MODE (dest),
13240 SET_SRC (setter)));
13241 else
13242 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13244 else if (MEM_P (dest)
13245 /* Ignore pushes, they clobber nothing. */
13246 && ! push_operand (dest, GET_MODE (dest)))
13247 mem_last_set = DF_INSN_LUID (record_dead_insn);
13250 /* Update the records of when each REG was most recently set or killed
13251 for the things done by INSN. This is the last thing done in processing
13252 INSN in the combiner loop.
13254 We update reg_stat[], in particular fields last_set, last_set_value,
13255 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13256 last_death, and also the similar information mem_last_set (which insn
13257 most recently modified memory) and last_call_luid (which insn was the
13258 most recent subroutine call). */
13260 static void
13261 record_dead_and_set_regs (rtx_insn *insn)
13263 rtx link;
13264 unsigned int i;
13266 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13268 if (REG_NOTE_KIND (link) == REG_DEAD
13269 && REG_P (XEXP (link, 0)))
13271 unsigned int regno = REGNO (XEXP (link, 0));
13272 unsigned int endregno = END_REGNO (XEXP (link, 0));
13274 for (i = regno; i < endregno; i++)
13276 reg_stat_type *rsp;
13278 rsp = &reg_stat[i];
13279 rsp->last_death = insn;
13282 else if (REG_NOTE_KIND (link) == REG_INC)
13283 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13286 if (CALL_P (insn))
13288 HARD_REG_SET callee_clobbers
13289 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
13290 hard_reg_set_iterator hrsi;
13291 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi)
13293 reg_stat_type *rsp;
13295 /* ??? We could try to preserve some information from the last
13296 set of register I if the call doesn't actually clobber
13297 (reg:last_set_mode I), which might be true for ABIs with
13298 partial clobbers. However, it would be difficult to
13299 update last_set_nonzero_bits and last_sign_bit_copies
13300 to account for the part of I that actually was clobbered.
13301 It wouldn't help much anyway, since we rarely see this
13302 situation before RA. */
13303 rsp = &reg_stat[i];
13304 rsp->last_set_invalid = 1;
13305 rsp->last_set = insn;
13306 rsp->last_set_value = 0;
13307 rsp->last_set_mode = VOIDmode;
13308 rsp->last_set_nonzero_bits = 0;
13309 rsp->last_set_sign_bit_copies = 0;
13310 rsp->last_death = 0;
13311 rsp->truncated_to_mode = VOIDmode;
13314 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13316 /* We can't combine into a call pattern. Remember, though, that
13317 the return value register is set at this LUID. We could
13318 still replace a register with the return value from the
13319 wrong subroutine call! */
13320 note_stores (insn, record_dead_and_set_regs_1, NULL_RTX);
13322 else
13323 note_stores (insn, record_dead_and_set_regs_1, insn);
13326 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13327 register present in the SUBREG, so for each such SUBREG go back and
13328 adjust nonzero and sign bit information of the registers that are
13329 known to have some zero/sign bits set.
13331 This is needed because when combine blows the SUBREGs away, the
13332 information on zero/sign bits is lost and further combines can be
13333 missed because of that. */
13335 static void
13336 record_promoted_value (rtx_insn *insn, rtx subreg)
13338 struct insn_link *links;
13339 rtx set;
13340 unsigned int regno = REGNO (SUBREG_REG (subreg));
13341 machine_mode mode = GET_MODE (subreg);
13343 if (!HWI_COMPUTABLE_MODE_P (mode))
13344 return;
13346 for (links = LOG_LINKS (insn); links;)
13348 reg_stat_type *rsp;
13350 insn = links->insn;
13351 set = single_set (insn);
13353 if (! set || !REG_P (SET_DEST (set))
13354 || REGNO (SET_DEST (set)) != regno
13355 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13357 links = links->next;
13358 continue;
13361 rsp = &reg_stat[regno];
13362 if (rsp->last_set == insn)
13364 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13365 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13368 if (REG_P (SET_SRC (set)))
13370 regno = REGNO (SET_SRC (set));
13371 links = LOG_LINKS (insn);
13373 else
13374 break;
13378 /* Check if X, a register, is known to contain a value already
13379 truncated to MODE. In this case we can use a subreg to refer to
13380 the truncated value even though in the generic case we would need
13381 an explicit truncation. */
13383 static bool
13384 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13386 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13387 machine_mode truncated = rsp->truncated_to_mode;
13389 if (truncated == 0
13390 || rsp->truncation_label < label_tick_ebb_start)
13391 return false;
13392 if (!partial_subreg_p (mode, truncated))
13393 return true;
13394 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13395 return true;
13396 return false;
13399 /* If X is a hard reg or a subreg record the mode that the register is
13400 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13401 able to turn a truncate into a subreg using this information. Return true
13402 if traversing X is complete. */
13404 static bool
13405 record_truncated_value (rtx x)
13407 machine_mode truncated_mode;
13408 reg_stat_type *rsp;
13410 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13412 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13413 truncated_mode = GET_MODE (x);
13415 if (!partial_subreg_p (truncated_mode, original_mode))
13416 return true;
13418 truncated_mode = GET_MODE (x);
13419 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13420 return true;
13422 x = SUBREG_REG (x);
13424 /* ??? For hard-regs we now record everything. We might be able to
13425 optimize this using last_set_mode. */
13426 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13427 truncated_mode = GET_MODE (x);
13428 else
13429 return false;
13431 rsp = &reg_stat[REGNO (x)];
13432 if (rsp->truncated_to_mode == 0
13433 || rsp->truncation_label < label_tick_ebb_start
13434 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13436 rsp->truncated_to_mode = truncated_mode;
13437 rsp->truncation_label = label_tick;
13440 return true;
13443 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13444 the modes they are used in. This can help truning TRUNCATEs into
13445 SUBREGs. */
13447 static void
13448 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13450 subrtx_var_iterator::array_type array;
13451 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13452 if (record_truncated_value (*iter))
13453 iter.skip_subrtxes ();
13456 /* Scan X for promoted SUBREGs. For each one found,
13457 note what it implies to the registers used in it. */
13459 static void
13460 check_promoted_subreg (rtx_insn *insn, rtx x)
13462 if (GET_CODE (x) == SUBREG
13463 && SUBREG_PROMOTED_VAR_P (x)
13464 && REG_P (SUBREG_REG (x)))
13465 record_promoted_value (insn, x);
13466 else
13468 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13469 int i, j;
13471 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13472 switch (format[i])
13474 case 'e':
13475 check_promoted_subreg (insn, XEXP (x, i));
13476 break;
13477 case 'V':
13478 case 'E':
13479 if (XVEC (x, i) != 0)
13480 for (j = 0; j < XVECLEN (x, i); j++)
13481 check_promoted_subreg (insn, XVECEXP (x, i, j));
13482 break;
13487 /* Verify that all the registers and memory references mentioned in *LOC are
13488 still valid. *LOC was part of a value set in INSN when label_tick was
13489 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13490 the invalid references with (clobber (const_int 0)) and return 1. This
13491 replacement is useful because we often can get useful information about
13492 the form of a value (e.g., if it was produced by a shift that always
13493 produces -1 or 0) even though we don't know exactly what registers it
13494 was produced from. */
13496 static int
13497 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13499 rtx x = *loc;
13500 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13501 int len = GET_RTX_LENGTH (GET_CODE (x));
13502 int i, j;
13504 if (REG_P (x))
13506 unsigned int regno = REGNO (x);
13507 unsigned int endregno = END_REGNO (x);
13508 unsigned int j;
13510 for (j = regno; j < endregno; j++)
13512 reg_stat_type *rsp = &reg_stat[j];
13513 if (rsp->last_set_invalid
13514 /* If this is a pseudo-register that was only set once and not
13515 live at the beginning of the function, it is always valid. */
13516 || (! (regno >= FIRST_PSEUDO_REGISTER
13517 && regno < reg_n_sets_max
13518 && REG_N_SETS (regno) == 1
13519 && (!REGNO_REG_SET_P
13520 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13521 regno)))
13522 && rsp->last_set_label > tick))
13524 if (replace)
13525 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13526 return replace;
13530 return 1;
13532 /* If this is a memory reference, make sure that there were no stores after
13533 it that might have clobbered the value. We don't have alias info, so we
13534 assume any store invalidates it. Moreover, we only have local UIDs, so
13535 we also assume that there were stores in the intervening basic blocks. */
13536 else if (MEM_P (x) && !MEM_READONLY_P (x)
13537 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13539 if (replace)
13540 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13541 return replace;
13544 for (i = 0; i < len; i++)
13546 if (fmt[i] == 'e')
13548 /* Check for identical subexpressions. If x contains
13549 identical subexpression we only have to traverse one of
13550 them. */
13551 if (i == 1 && ARITHMETIC_P (x))
13553 /* Note that at this point x0 has already been checked
13554 and found valid. */
13555 rtx x0 = XEXP (x, 0);
13556 rtx x1 = XEXP (x, 1);
13558 /* If x0 and x1 are identical then x is also valid. */
13559 if (x0 == x1)
13560 return 1;
13562 /* If x1 is identical to a subexpression of x0 then
13563 while checking x0, x1 has already been checked. Thus
13564 it is valid and so as x. */
13565 if (ARITHMETIC_P (x0)
13566 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13567 return 1;
13569 /* If x0 is identical to a subexpression of x1 then x is
13570 valid iff the rest of x1 is valid. */
13571 if (ARITHMETIC_P (x1)
13572 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13573 return
13574 get_last_value_validate (&XEXP (x1,
13575 x0 == XEXP (x1, 0) ? 1 : 0),
13576 insn, tick, replace);
13579 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13580 replace) == 0)
13581 return 0;
13583 else if (fmt[i] == 'E')
13584 for (j = 0; j < XVECLEN (x, i); j++)
13585 if (get_last_value_validate (&XVECEXP (x, i, j),
13586 insn, tick, replace) == 0)
13587 return 0;
13590 /* If we haven't found a reason for it to be invalid, it is valid. */
13591 return 1;
13594 /* Get the last value assigned to X, if known. Some registers
13595 in the value may be replaced with (clobber (const_int 0)) if their value
13596 is known longer known reliably. */
13598 static rtx
13599 get_last_value (const_rtx x)
13601 unsigned int regno;
13602 rtx value;
13603 reg_stat_type *rsp;
13605 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13606 then convert it to the desired mode. If this is a paradoxical SUBREG,
13607 we cannot predict what values the "extra" bits might have. */
13608 if (GET_CODE (x) == SUBREG
13609 && subreg_lowpart_p (x)
13610 && !paradoxical_subreg_p (x)
13611 && (value = get_last_value (SUBREG_REG (x))) != 0)
13612 return gen_lowpart (GET_MODE (x), value);
13614 if (!REG_P (x))
13615 return 0;
13617 regno = REGNO (x);
13618 rsp = &reg_stat[regno];
13619 value = rsp->last_set_value;
13621 /* If we don't have a value, or if it isn't for this basic block and
13622 it's either a hard register, set more than once, or it's a live
13623 at the beginning of the function, return 0.
13625 Because if it's not live at the beginning of the function then the reg
13626 is always set before being used (is never used without being set).
13627 And, if it's set only once, and it's always set before use, then all
13628 uses must have the same last value, even if it's not from this basic
13629 block. */
13631 if (value == 0
13632 || (rsp->last_set_label < label_tick_ebb_start
13633 && (regno < FIRST_PSEUDO_REGISTER
13634 || regno >= reg_n_sets_max
13635 || REG_N_SETS (regno) != 1
13636 || REGNO_REG_SET_P
13637 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13638 return 0;
13640 /* If the value was set in a later insn than the ones we are processing,
13641 we can't use it even if the register was only set once. */
13642 if (rsp->last_set_label == label_tick
13643 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13644 return 0;
13646 /* If fewer bits were set than what we are asked for now, we cannot use
13647 the value. */
13648 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13649 GET_MODE_PRECISION (GET_MODE (x))))
13650 return 0;
13652 /* If the value has all its registers valid, return it. */
13653 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13654 return value;
13656 /* Otherwise, make a copy and replace any invalid register with
13657 (clobber (const_int 0)). If that fails for some reason, return 0. */
13659 value = copy_rtx (value);
13660 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13661 return value;
13663 return 0;
13666 /* Define three variables used for communication between the following
13667 routines. */
13669 static unsigned int reg_dead_regno, reg_dead_endregno;
13670 static int reg_dead_flag;
13671 rtx reg_dead_reg;
13673 /* Function called via note_stores from reg_dead_at_p.
13675 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13676 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13678 static void
13679 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13681 unsigned int regno, endregno;
13683 if (!REG_P (dest))
13684 return;
13686 regno = REGNO (dest);
13687 endregno = END_REGNO (dest);
13688 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13689 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13692 /* Return nonzero if REG is known to be dead at INSN.
13694 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13695 referencing REG, it is dead. If we hit a SET referencing REG, it is
13696 live. Otherwise, see if it is live or dead at the start of the basic
13697 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13698 must be assumed to be always live. */
13700 static int
13701 reg_dead_at_p (rtx reg, rtx_insn *insn)
13703 basic_block block;
13704 unsigned int i;
13706 /* Set variables for reg_dead_at_p_1. */
13707 reg_dead_regno = REGNO (reg);
13708 reg_dead_endregno = END_REGNO (reg);
13709 reg_dead_reg = reg;
13711 reg_dead_flag = 0;
13713 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13714 we allow the machine description to decide whether use-and-clobber
13715 patterns are OK. */
13716 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13718 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13719 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13720 return 0;
13723 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13724 beginning of basic block. */
13725 block = BLOCK_FOR_INSN (insn);
13726 for (;;)
13728 if (INSN_P (insn))
13730 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13731 return 1;
13733 note_stores (insn, reg_dead_at_p_1, NULL);
13734 if (reg_dead_flag)
13735 return reg_dead_flag == 1 ? 1 : 0;
13737 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13738 return 1;
13741 if (insn == BB_HEAD (block))
13742 break;
13744 insn = PREV_INSN (insn);
13747 /* Look at live-in sets for the basic block that we were in. */
13748 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13749 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13750 return 0;
13752 return 1;
13755 /* Note hard registers in X that are used. */
13757 static void
13758 mark_used_regs_combine (rtx x)
13760 RTX_CODE code = GET_CODE (x);
13761 unsigned int regno;
13762 int i;
13764 switch (code)
13766 case LABEL_REF:
13767 case SYMBOL_REF:
13768 case CONST:
13769 CASE_CONST_ANY:
13770 case PC:
13771 case ADDR_VEC:
13772 case ADDR_DIFF_VEC:
13773 case ASM_INPUT:
13774 return;
13776 case CLOBBER:
13777 /* If we are clobbering a MEM, mark any hard registers inside the
13778 address as used. */
13779 if (MEM_P (XEXP (x, 0)))
13780 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13781 return;
13783 case REG:
13784 regno = REGNO (x);
13785 /* A hard reg in a wide mode may really be multiple registers.
13786 If so, mark all of them just like the first. */
13787 if (regno < FIRST_PSEUDO_REGISTER)
13789 /* None of this applies to the stack, frame or arg pointers. */
13790 if (regno == STACK_POINTER_REGNUM
13791 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13792 && regno == HARD_FRAME_POINTER_REGNUM)
13793 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13794 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13795 || regno == FRAME_POINTER_REGNUM)
13796 return;
13798 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13800 return;
13802 case SET:
13804 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13805 the address. */
13806 rtx testreg = SET_DEST (x);
13808 while (GET_CODE (testreg) == SUBREG
13809 || GET_CODE (testreg) == ZERO_EXTRACT
13810 || GET_CODE (testreg) == STRICT_LOW_PART)
13811 testreg = XEXP (testreg, 0);
13813 if (MEM_P (testreg))
13814 mark_used_regs_combine (XEXP (testreg, 0));
13816 mark_used_regs_combine (SET_SRC (x));
13818 return;
13820 default:
13821 break;
13824 /* Recursively scan the operands of this expression. */
13827 const char *fmt = GET_RTX_FORMAT (code);
13829 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13831 if (fmt[i] == 'e')
13832 mark_used_regs_combine (XEXP (x, i));
13833 else if (fmt[i] == 'E')
13835 int j;
13837 for (j = 0; j < XVECLEN (x, i); j++)
13838 mark_used_regs_combine (XVECEXP (x, i, j));
13844 /* Remove register number REGNO from the dead registers list of INSN.
13846 Return the note used to record the death, if there was one. */
13849 remove_death (unsigned int regno, rtx_insn *insn)
13851 rtx note = find_regno_note (insn, REG_DEAD, regno);
13853 if (note)
13854 remove_note (insn, note);
13856 return note;
13859 /* For each register (hardware or pseudo) used within expression X, if its
13860 death is in an instruction with luid between FROM_LUID (inclusive) and
13861 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13862 list headed by PNOTES.
13864 That said, don't move registers killed by maybe_kill_insn.
13866 This is done when X is being merged by combination into TO_INSN. These
13867 notes will then be distributed as needed. */
13869 static void
13870 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13871 rtx *pnotes)
13873 const char *fmt;
13874 int len, i;
13875 enum rtx_code code = GET_CODE (x);
13877 if (code == REG)
13879 unsigned int regno = REGNO (x);
13880 rtx_insn *where_dead = reg_stat[regno].last_death;
13882 /* If we do not know where the register died, it may still die between
13883 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13884 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
13886 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
13887 while (insn
13888 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
13889 && DF_INSN_LUID (insn) >= from_luid)
13891 if (dead_or_set_regno_p (insn, regno))
13893 if (find_regno_note (insn, REG_DEAD, regno))
13894 where_dead = insn;
13895 break;
13898 insn = prev_real_nondebug_insn (insn);
13902 /* Don't move the register if it gets killed in between from and to. */
13903 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13904 && ! reg_referenced_p (x, maybe_kill_insn))
13905 return;
13907 if (where_dead
13908 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13909 && DF_INSN_LUID (where_dead) >= from_luid
13910 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13912 rtx note = remove_death (regno, where_dead);
13914 /* It is possible for the call above to return 0. This can occur
13915 when last_death points to I2 or I1 that we combined with.
13916 In that case make a new note.
13918 We must also check for the case where X is a hard register
13919 and NOTE is a death note for a range of hard registers
13920 including X. In that case, we must put REG_DEAD notes for
13921 the remaining registers in place of NOTE. */
13923 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13924 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13926 unsigned int deadregno = REGNO (XEXP (note, 0));
13927 unsigned int deadend = END_REGNO (XEXP (note, 0));
13928 unsigned int ourend = END_REGNO (x);
13929 unsigned int i;
13931 for (i = deadregno; i < deadend; i++)
13932 if (i < regno || i >= ourend)
13933 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13936 /* If we didn't find any note, or if we found a REG_DEAD note that
13937 covers only part of the given reg, and we have a multi-reg hard
13938 register, then to be safe we must check for REG_DEAD notes
13939 for each register other than the first. They could have
13940 their own REG_DEAD notes lying around. */
13941 else if ((note == 0
13942 || (note != 0
13943 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13944 GET_MODE (x))))
13945 && regno < FIRST_PSEUDO_REGISTER
13946 && REG_NREGS (x) > 1)
13948 unsigned int ourend = END_REGNO (x);
13949 unsigned int i, offset;
13950 rtx oldnotes = 0;
13952 if (note)
13953 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13954 else
13955 offset = 1;
13957 for (i = regno + offset; i < ourend; i++)
13958 move_deaths (regno_reg_rtx[i],
13959 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13962 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13964 XEXP (note, 1) = *pnotes;
13965 *pnotes = note;
13967 else
13968 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13971 return;
13974 else if (GET_CODE (x) == SET)
13976 rtx dest = SET_DEST (x);
13978 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13980 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13981 that accesses one word of a multi-word item, some
13982 piece of everything register in the expression is used by
13983 this insn, so remove any old death. */
13984 /* ??? So why do we test for equality of the sizes? */
13986 if (GET_CODE (dest) == ZERO_EXTRACT
13987 || GET_CODE (dest) == STRICT_LOW_PART
13988 || (GET_CODE (dest) == SUBREG
13989 && !read_modify_subreg_p (dest)))
13991 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13992 return;
13995 /* If this is some other SUBREG, we know it replaces the entire
13996 value, so use that as the destination. */
13997 if (GET_CODE (dest) == SUBREG)
13998 dest = SUBREG_REG (dest);
14000 /* If this is a MEM, adjust deaths of anything used in the address.
14001 For a REG (the only other possibility), the entire value is
14002 being replaced so the old value is not used in this insn. */
14004 if (MEM_P (dest))
14005 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14006 to_insn, pnotes);
14007 return;
14010 else if (GET_CODE (x) == CLOBBER)
14011 return;
14013 len = GET_RTX_LENGTH (code);
14014 fmt = GET_RTX_FORMAT (code);
14016 for (i = 0; i < len; i++)
14018 if (fmt[i] == 'E')
14020 int j;
14021 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14022 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14023 to_insn, pnotes);
14025 else if (fmt[i] == 'e')
14026 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14030 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14031 pattern of an insn. X must be a REG. */
14033 static int
14034 reg_bitfield_target_p (rtx x, rtx body)
14036 int i;
14038 if (GET_CODE (body) == SET)
14040 rtx dest = SET_DEST (body);
14041 rtx target;
14042 unsigned int regno, tregno, endregno, endtregno;
14044 if (GET_CODE (dest) == ZERO_EXTRACT)
14045 target = XEXP (dest, 0);
14046 else if (GET_CODE (dest) == STRICT_LOW_PART)
14047 target = SUBREG_REG (XEXP (dest, 0));
14048 else
14049 return 0;
14051 if (GET_CODE (target) == SUBREG)
14052 target = SUBREG_REG (target);
14054 if (!REG_P (target))
14055 return 0;
14057 tregno = REGNO (target), regno = REGNO (x);
14058 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14059 return target == x;
14061 endtregno = end_hard_regno (GET_MODE (target), tregno);
14062 endregno = end_hard_regno (GET_MODE (x), regno);
14064 return endregno > tregno && regno < endtregno;
14067 else if (GET_CODE (body) == PARALLEL)
14068 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14069 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14070 return 1;
14072 return 0;
14075 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14076 as appropriate. I3 and I2 are the insns resulting from the combination
14077 insns including FROM (I2 may be zero).
14079 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14080 not need REG_DEAD notes because they are being substituted for. This
14081 saves searching in the most common cases.
14083 Each note in the list is either ignored or placed on some insns, depending
14084 on the type of note. */
14086 static void
14087 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14088 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14090 rtx note, next_note;
14091 rtx tem_note;
14092 rtx_insn *tem_insn;
14094 for (note = notes; note; note = next_note)
14096 rtx_insn *place = 0, *place2 = 0;
14098 next_note = XEXP (note, 1);
14099 switch (REG_NOTE_KIND (note))
14101 case REG_BR_PROB:
14102 case REG_BR_PRED:
14103 /* Doesn't matter much where we put this, as long as it's somewhere.
14104 It is preferable to keep these notes on branches, which is most
14105 likely to be i3. */
14106 place = i3;
14107 break;
14109 case REG_NON_LOCAL_GOTO:
14110 if (JUMP_P (i3))
14111 place = i3;
14112 else
14114 gcc_assert (i2 && JUMP_P (i2));
14115 place = i2;
14117 break;
14119 case REG_EH_REGION:
14120 /* These notes must remain with the call or trapping instruction. */
14121 if (CALL_P (i3))
14122 place = i3;
14123 else if (i2 && CALL_P (i2))
14124 place = i2;
14125 else
14127 gcc_assert (cfun->can_throw_non_call_exceptions);
14128 if (may_trap_p (i3))
14129 place = i3;
14130 else if (i2 && may_trap_p (i2))
14131 place = i2;
14132 /* ??? Otherwise assume we've combined things such that we
14133 can now prove that the instructions can't trap. Drop the
14134 note in this case. */
14136 break;
14138 case REG_ARGS_SIZE:
14139 /* ??? How to distribute between i3-i1. Assume i3 contains the
14140 entire adjustment. Assert i3 contains at least some adjust. */
14141 if (!noop_move_p (i3))
14143 poly_int64 old_size, args_size = get_args_size (note);
14144 /* fixup_args_size_notes looks at REG_NORETURN note,
14145 so ensure the note is placed there first. */
14146 if (CALL_P (i3))
14148 rtx *np;
14149 for (np = &next_note; *np; np = &XEXP (*np, 1))
14150 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14152 rtx n = *np;
14153 *np = XEXP (n, 1);
14154 XEXP (n, 1) = REG_NOTES (i3);
14155 REG_NOTES (i3) = n;
14156 break;
14159 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14160 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14161 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14162 gcc_assert (maybe_ne (old_size, args_size)
14163 || (CALL_P (i3)
14164 && !ACCUMULATE_OUTGOING_ARGS
14165 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14167 break;
14169 case REG_NORETURN:
14170 case REG_SETJMP:
14171 case REG_TM:
14172 case REG_CALL_DECL:
14173 case REG_UNTYPED_CALL:
14174 case REG_CALL_NOCF_CHECK:
14175 /* These notes must remain with the call. It should not be
14176 possible for both I2 and I3 to be a call. */
14177 if (CALL_P (i3))
14178 place = i3;
14179 else
14181 gcc_assert (i2 && CALL_P (i2));
14182 place = i2;
14184 break;
14186 case REG_UNUSED:
14187 /* Any clobbers for i3 may still exist, and so we must process
14188 REG_UNUSED notes from that insn.
14190 Any clobbers from i2 or i1 can only exist if they were added by
14191 recog_for_combine. In that case, recog_for_combine created the
14192 necessary REG_UNUSED notes. Trying to keep any original
14193 REG_UNUSED notes from these insns can cause incorrect output
14194 if it is for the same register as the original i3 dest.
14195 In that case, we will notice that the register is set in i3,
14196 and then add a REG_UNUSED note for the destination of i3, which
14197 is wrong. However, it is possible to have REG_UNUSED notes from
14198 i2 or i1 for register which were both used and clobbered, so
14199 we keep notes from i2 or i1 if they will turn into REG_DEAD
14200 notes. */
14202 /* If this register is set or clobbered between FROM_INSN and I3,
14203 we should not create a note for it. */
14204 if (reg_set_between_p (XEXP (note, 0), from_insn, i3))
14205 break;
14207 /* If this register is set or clobbered in I3, put the note there
14208 unless there is one already. */
14209 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14211 if (from_insn != i3)
14212 break;
14214 if (! (REG_P (XEXP (note, 0))
14215 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14216 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14217 place = i3;
14219 /* Otherwise, if this register is used by I3, then this register
14220 now dies here, so we must put a REG_DEAD note here unless there
14221 is one already. */
14222 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14223 && ! (REG_P (XEXP (note, 0))
14224 ? find_regno_note (i3, REG_DEAD,
14225 REGNO (XEXP (note, 0)))
14226 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14228 PUT_REG_NOTE_KIND (note, REG_DEAD);
14229 place = i3;
14232 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14233 but we can't tell which at this point. We must reset any
14234 expectations we had about the value that was previously
14235 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14236 and, if appropriate, restore its previous value, but we
14237 don't have enough information for that at this point. */
14238 else
14240 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14242 /* Otherwise, if this register is now referenced in i2
14243 then the register used to be modified in one of the
14244 original insns. If it was i3 (say, in an unused
14245 parallel), it's now completely gone, so the note can
14246 be discarded. But if it was modified in i2, i1 or i0
14247 and we still reference it in i2, then we're
14248 referencing the previous value, and since the
14249 register was modified and REG_UNUSED, we know that
14250 the previous value is now dead. So, if we only
14251 reference the register in i2, we change the note to
14252 REG_DEAD, to reflect the previous value. However, if
14253 we're also setting or clobbering the register as
14254 scratch, we know (because the register was not
14255 referenced in i3) that it's unused, just as it was
14256 unused before, and we place the note in i2. */
14257 if (from_insn != i3 && i2 && INSN_P (i2)
14258 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14260 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14261 PUT_REG_NOTE_KIND (note, REG_DEAD);
14262 if (! (REG_P (XEXP (note, 0))
14263 ? find_regno_note (i2, REG_NOTE_KIND (note),
14264 REGNO (XEXP (note, 0)))
14265 : find_reg_note (i2, REG_NOTE_KIND (note),
14266 XEXP (note, 0))))
14267 place = i2;
14271 break;
14273 case REG_EQUAL:
14274 case REG_EQUIV:
14275 case REG_NOALIAS:
14276 /* These notes say something about results of an insn. We can
14277 only support them if they used to be on I3 in which case they
14278 remain on I3. Otherwise they are ignored.
14280 If the note refers to an expression that is not a constant, we
14281 must also ignore the note since we cannot tell whether the
14282 equivalence is still true. It might be possible to do
14283 slightly better than this (we only have a problem if I2DEST
14284 or I1DEST is present in the expression), but it doesn't
14285 seem worth the trouble. */
14287 if (from_insn == i3
14288 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14289 place = i3;
14290 break;
14292 case REG_INC:
14293 /* These notes say something about how a register is used. They must
14294 be present on any use of the register in I2 or I3. */
14295 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14296 place = i3;
14298 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14300 if (place)
14301 place2 = i2;
14302 else
14303 place = i2;
14305 break;
14307 case REG_LABEL_TARGET:
14308 case REG_LABEL_OPERAND:
14309 /* This can show up in several ways -- either directly in the
14310 pattern, or hidden off in the constant pool with (or without?)
14311 a REG_EQUAL note. */
14312 /* ??? Ignore the without-reg_equal-note problem for now. */
14313 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14314 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14315 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14316 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14317 place = i3;
14319 if (i2
14320 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14321 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14322 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14323 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14325 if (place)
14326 place2 = i2;
14327 else
14328 place = i2;
14331 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14332 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14333 there. */
14334 if (place && JUMP_P (place)
14335 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14336 && (JUMP_LABEL (place) == NULL
14337 || JUMP_LABEL (place) == XEXP (note, 0)))
14339 rtx label = JUMP_LABEL (place);
14341 if (!label)
14342 JUMP_LABEL (place) = XEXP (note, 0);
14343 else if (LABEL_P (label))
14344 LABEL_NUSES (label)--;
14347 if (place2 && JUMP_P (place2)
14348 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14349 && (JUMP_LABEL (place2) == NULL
14350 || JUMP_LABEL (place2) == XEXP (note, 0)))
14352 rtx label = JUMP_LABEL (place2);
14354 if (!label)
14355 JUMP_LABEL (place2) = XEXP (note, 0);
14356 else if (LABEL_P (label))
14357 LABEL_NUSES (label)--;
14358 place2 = 0;
14360 break;
14362 case REG_NONNEG:
14363 /* This note says something about the value of a register prior
14364 to the execution of an insn. It is too much trouble to see
14365 if the note is still correct in all situations. It is better
14366 to simply delete it. */
14367 break;
14369 case REG_DEAD:
14370 /* If we replaced the right hand side of FROM_INSN with a
14371 REG_EQUAL note, the original use of the dying register
14372 will not have been combined into I3 and I2. In such cases,
14373 FROM_INSN is guaranteed to be the first of the combined
14374 instructions, so we simply need to search back before
14375 FROM_INSN for the previous use or set of this register,
14376 then alter the notes there appropriately.
14378 If the register is used as an input in I3, it dies there.
14379 Similarly for I2, if it is nonzero and adjacent to I3.
14381 If the register is not used as an input in either I3 or I2
14382 and it is not one of the registers we were supposed to eliminate,
14383 there are two possibilities. We might have a non-adjacent I2
14384 or we might have somehow eliminated an additional register
14385 from a computation. For example, we might have had A & B where
14386 we discover that B will always be zero. In this case we will
14387 eliminate the reference to A.
14389 In both cases, we must search to see if we can find a previous
14390 use of A and put the death note there. */
14392 if (from_insn
14393 && from_insn == i2mod
14394 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14395 tem_insn = from_insn;
14396 else
14398 if (from_insn
14399 && CALL_P (from_insn)
14400 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14401 place = from_insn;
14402 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14404 /* If the new I2 sets the same register that is marked
14405 dead in the note, we do not in general know where to
14406 put the note. One important case we _can_ handle is
14407 when the note comes from I3. */
14408 if (from_insn == i3)
14409 place = i3;
14410 else
14411 break;
14413 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14414 place = i3;
14415 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14416 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14417 place = i2;
14418 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14419 && !(i2mod
14420 && reg_overlap_mentioned_p (XEXP (note, 0),
14421 i2mod_old_rhs)))
14422 || rtx_equal_p (XEXP (note, 0), elim_i1)
14423 || rtx_equal_p (XEXP (note, 0), elim_i0))
14424 break;
14425 tem_insn = i3;
14428 if (place == 0)
14430 basic_block bb = this_basic_block;
14432 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14434 if (!NONDEBUG_INSN_P (tem_insn))
14436 if (tem_insn == BB_HEAD (bb))
14437 break;
14438 continue;
14441 /* If the register is being set at TEM_INSN, see if that is all
14442 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14443 into a REG_UNUSED note instead. Don't delete sets to
14444 global register vars. */
14445 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14446 || !global_regs[REGNO (XEXP (note, 0))])
14447 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14449 rtx set = single_set (tem_insn);
14450 rtx inner_dest = 0;
14452 if (set != 0)
14453 for (inner_dest = SET_DEST (set);
14454 (GET_CODE (inner_dest) == STRICT_LOW_PART
14455 || GET_CODE (inner_dest) == SUBREG
14456 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14457 inner_dest = XEXP (inner_dest, 0))
14460 /* Verify that it was the set, and not a clobber that
14461 modified the register.
14463 If we cannot delete the setter due to side
14464 effects, mark the user with an UNUSED note instead
14465 of deleting it. */
14467 if (set != 0 && ! side_effects_p (SET_SRC (set))
14468 && rtx_equal_p (XEXP (note, 0), inner_dest))
14470 /* Move the notes and links of TEM_INSN elsewhere.
14471 This might delete other dead insns recursively.
14472 First set the pattern to something that won't use
14473 any register. */
14474 rtx old_notes = REG_NOTES (tem_insn);
14476 PATTERN (tem_insn) = pc_rtx;
14477 REG_NOTES (tem_insn) = NULL;
14479 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14480 NULL_RTX, NULL_RTX, NULL_RTX);
14481 distribute_links (LOG_LINKS (tem_insn));
14483 unsigned int regno = REGNO (XEXP (note, 0));
14484 reg_stat_type *rsp = &reg_stat[regno];
14485 if (rsp->last_set == tem_insn)
14486 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14488 SET_INSN_DELETED (tem_insn);
14489 if (tem_insn == i2)
14490 i2 = NULL;
14492 else
14494 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14496 /* If there isn't already a REG_UNUSED note, put one
14497 here. Do not place a REG_DEAD note, even if
14498 the register is also used here; that would not
14499 match the algorithm used in lifetime analysis
14500 and can cause the consistency check in the
14501 scheduler to fail. */
14502 if (! find_regno_note (tem_insn, REG_UNUSED,
14503 REGNO (XEXP (note, 0))))
14504 place = tem_insn;
14505 break;
14508 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14509 || (CALL_P (tem_insn)
14510 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14512 place = tem_insn;
14514 /* If we are doing a 3->2 combination, and we have a
14515 register which formerly died in i3 and was not used
14516 by i2, which now no longer dies in i3 and is used in
14517 i2 but does not die in i2, and place is between i2
14518 and i3, then we may need to move a link from place to
14519 i2. */
14520 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14521 && from_insn
14522 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14523 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14525 struct insn_link *links = LOG_LINKS (place);
14526 LOG_LINKS (place) = NULL;
14527 distribute_links (links);
14529 break;
14532 if (tem_insn == BB_HEAD (bb))
14533 break;
14538 /* If the register is set or already dead at PLACE, we needn't do
14539 anything with this note if it is still a REG_DEAD note.
14540 We check here if it is set at all, not if is it totally replaced,
14541 which is what `dead_or_set_p' checks, so also check for it being
14542 set partially. */
14544 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14546 unsigned int regno = REGNO (XEXP (note, 0));
14547 reg_stat_type *rsp = &reg_stat[regno];
14549 if (dead_or_set_p (place, XEXP (note, 0))
14550 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14552 /* Unless the register previously died in PLACE, clear
14553 last_death. [I no longer understand why this is
14554 being done.] */
14555 if (rsp->last_death != place)
14556 rsp->last_death = 0;
14557 place = 0;
14559 else
14560 rsp->last_death = place;
14562 /* If this is a death note for a hard reg that is occupying
14563 multiple registers, ensure that we are still using all
14564 parts of the object. If we find a piece of the object
14565 that is unused, we must arrange for an appropriate REG_DEAD
14566 note to be added for it. However, we can't just emit a USE
14567 and tag the note to it, since the register might actually
14568 be dead; so we recourse, and the recursive call then finds
14569 the previous insn that used this register. */
14571 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14573 unsigned int endregno = END_REGNO (XEXP (note, 0));
14574 bool all_used = true;
14575 unsigned int i;
14577 for (i = regno; i < endregno; i++)
14578 if ((! refers_to_regno_p (i, PATTERN (place))
14579 && ! find_regno_fusage (place, USE, i))
14580 || dead_or_set_regno_p (place, i))
14582 all_used = false;
14583 break;
14586 if (! all_used)
14588 /* Put only REG_DEAD notes for pieces that are
14589 not already dead or set. */
14591 for (i = regno; i < endregno;
14592 i += hard_regno_nregs (i, reg_raw_mode[i]))
14594 rtx piece = regno_reg_rtx[i];
14595 basic_block bb = this_basic_block;
14597 if (! dead_or_set_p (place, piece)
14598 && ! reg_bitfield_target_p (piece,
14599 PATTERN (place)))
14601 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14602 NULL_RTX);
14604 distribute_notes (new_note, place, place,
14605 NULL, NULL_RTX, NULL_RTX,
14606 NULL_RTX);
14608 else if (! refers_to_regno_p (i, PATTERN (place))
14609 && ! find_regno_fusage (place, USE, i))
14610 for (tem_insn = PREV_INSN (place); ;
14611 tem_insn = PREV_INSN (tem_insn))
14613 if (!NONDEBUG_INSN_P (tem_insn))
14615 if (tem_insn == BB_HEAD (bb))
14616 break;
14617 continue;
14619 if (dead_or_set_p (tem_insn, piece)
14620 || reg_bitfield_target_p (piece,
14621 PATTERN (tem_insn)))
14623 add_reg_note (tem_insn, REG_UNUSED, piece);
14624 break;
14629 place = 0;
14633 break;
14635 default:
14636 /* Any other notes should not be present at this point in the
14637 compilation. */
14638 gcc_unreachable ();
14641 if (place)
14643 XEXP (note, 1) = REG_NOTES (place);
14644 REG_NOTES (place) = note;
14646 /* Set added_notes_insn to the earliest insn we added a note to. */
14647 if (added_notes_insn == 0
14648 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14649 added_notes_insn = place;
14652 if (place2)
14654 add_shallow_copy_of_reg_note (place2, note);
14656 /* Set added_notes_insn to the earliest insn we added a note to. */
14657 if (added_notes_insn == 0
14658 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14659 added_notes_insn = place2;
14664 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14665 I3, I2, and I1 to new locations. This is also called to add a link
14666 pointing at I3 when I3's destination is changed. */
14668 static void
14669 distribute_links (struct insn_link *links)
14671 struct insn_link *link, *next_link;
14673 for (link = links; link; link = next_link)
14675 rtx_insn *place = 0;
14676 rtx_insn *insn;
14677 rtx set, reg;
14679 next_link = link->next;
14681 /* If the insn that this link points to is a NOTE, ignore it. */
14682 if (NOTE_P (link->insn))
14683 continue;
14685 set = 0;
14686 rtx pat = PATTERN (link->insn);
14687 if (GET_CODE (pat) == SET)
14688 set = pat;
14689 else if (GET_CODE (pat) == PARALLEL)
14691 int i;
14692 for (i = 0; i < XVECLEN (pat, 0); i++)
14694 set = XVECEXP (pat, 0, i);
14695 if (GET_CODE (set) != SET)
14696 continue;
14698 reg = SET_DEST (set);
14699 while (GET_CODE (reg) == ZERO_EXTRACT
14700 || GET_CODE (reg) == STRICT_LOW_PART
14701 || GET_CODE (reg) == SUBREG)
14702 reg = XEXP (reg, 0);
14704 if (!REG_P (reg))
14705 continue;
14707 if (REGNO (reg) == link->regno)
14708 break;
14710 if (i == XVECLEN (pat, 0))
14711 continue;
14713 else
14714 continue;
14716 reg = SET_DEST (set);
14718 while (GET_CODE (reg) == ZERO_EXTRACT
14719 || GET_CODE (reg) == STRICT_LOW_PART
14720 || GET_CODE (reg) == SUBREG)
14721 reg = XEXP (reg, 0);
14723 if (reg == pc_rtx)
14724 continue;
14726 /* A LOG_LINK is defined as being placed on the first insn that uses
14727 a register and points to the insn that sets the register. Start
14728 searching at the next insn after the target of the link and stop
14729 when we reach a set of the register or the end of the basic block.
14731 Note that this correctly handles the link that used to point from
14732 I3 to I2. Also note that not much searching is typically done here
14733 since most links don't point very far away. */
14735 for (insn = NEXT_INSN (link->insn);
14736 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14737 || BB_HEAD (this_basic_block->next_bb) != insn));
14738 insn = NEXT_INSN (insn))
14739 if (DEBUG_INSN_P (insn))
14740 continue;
14741 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14743 if (reg_referenced_p (reg, PATTERN (insn)))
14744 place = insn;
14745 break;
14747 else if (CALL_P (insn)
14748 && find_reg_fusage (insn, USE, reg))
14750 place = insn;
14751 break;
14753 else if (INSN_P (insn) && reg_set_p (reg, insn))
14754 break;
14756 /* If we found a place to put the link, place it there unless there
14757 is already a link to the same insn as LINK at that point. */
14759 if (place)
14761 struct insn_link *link2;
14763 FOR_EACH_LOG_LINK (link2, place)
14764 if (link2->insn == link->insn && link2->regno == link->regno)
14765 break;
14767 if (link2 == NULL)
14769 link->next = LOG_LINKS (place);
14770 LOG_LINKS (place) = link;
14772 /* Set added_links_insn to the earliest insn we added a
14773 link to. */
14774 if (added_links_insn == 0
14775 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14776 added_links_insn = place;
14782 /* Check for any register or memory mentioned in EQUIV that is not
14783 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14784 of EXPR where some registers may have been replaced by constants. */
14786 static bool
14787 unmentioned_reg_p (rtx equiv, rtx expr)
14789 subrtx_iterator::array_type array;
14790 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14792 const_rtx x = *iter;
14793 if ((REG_P (x) || MEM_P (x))
14794 && !reg_mentioned_p (x, expr))
14795 return true;
14797 return false;
14800 DEBUG_FUNCTION void
14801 dump_combine_stats (FILE *file)
14803 fprintf
14804 (file,
14805 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14806 combine_attempts, combine_merges, combine_extras, combine_successes);
14809 void
14810 dump_combine_total_stats (FILE *file)
14812 fprintf
14813 (file,
14814 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14815 total_attempts, total_merges, total_extras, total_successes);
14818 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
14819 the reg-to-reg copy can usefully combine with later instructions, but we
14820 do not want to combine the hard reg into later instructions, for that
14821 restricts register allocation. */
14822 static void
14823 make_more_copies (void)
14825 basic_block bb;
14827 FOR_EACH_BB_FN (bb, cfun)
14829 rtx_insn *insn;
14831 FOR_BB_INSNS (bb, insn)
14833 if (!NONDEBUG_INSN_P (insn))
14834 continue;
14836 rtx set = single_set (insn);
14837 if (!set)
14838 continue;
14840 rtx dest = SET_DEST (set);
14841 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
14842 continue;
14844 rtx src = SET_SRC (set);
14845 if (!(REG_P (src) && HARD_REGISTER_P (src)))
14846 continue;
14847 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
14848 continue;
14850 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
14851 rtx_insn *new_insn = gen_move_insn (new_reg, src);
14852 SET_SRC (set) = new_reg;
14853 emit_insn_before (new_insn, insn);
14854 df_insn_rescan (insn);
14859 /* Try combining insns through substitution. */
14860 static unsigned int
14861 rest_of_handle_combine (void)
14863 make_more_copies ();
14865 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14866 df_note_add_problem ();
14867 df_analyze ();
14869 regstat_init_n_sets_and_refs ();
14870 reg_n_sets_max = max_reg_num ();
14872 int rebuild_jump_labels_after_combine
14873 = combine_instructions (get_insns (), max_reg_num ());
14875 /* Combining insns may have turned an indirect jump into a
14876 direct jump. Rebuild the JUMP_LABEL fields of jumping
14877 instructions. */
14878 if (rebuild_jump_labels_after_combine)
14880 if (dom_info_available_p (CDI_DOMINATORS))
14881 free_dominance_info (CDI_DOMINATORS);
14882 timevar_push (TV_JUMP);
14883 rebuild_jump_labels (get_insns ());
14884 cleanup_cfg (0);
14885 timevar_pop (TV_JUMP);
14888 regstat_free_n_sets_and_refs ();
14889 return 0;
14892 namespace {
14894 const pass_data pass_data_combine =
14896 RTL_PASS, /* type */
14897 "combine", /* name */
14898 OPTGROUP_NONE, /* optinfo_flags */
14899 TV_COMBINE, /* tv_id */
14900 PROP_cfglayout, /* properties_required */
14901 0, /* properties_provided */
14902 0, /* properties_destroyed */
14903 0, /* todo_flags_start */
14904 TODO_df_finish, /* todo_flags_finish */
14907 class pass_combine : public rtl_opt_pass
14909 public:
14910 pass_combine (gcc::context *ctxt)
14911 : rtl_opt_pass (pass_data_combine, ctxt)
14914 /* opt_pass methods: */
14915 virtual bool gate (function *) { return (optimize > 0); }
14916 virtual unsigned int execute (function *)
14918 return rest_of_handle_combine ();
14921 }; // class pass_combine
14923 } // anon namespace
14925 rtl_opt_pass *
14926 make_pass_combine (gcc::context *ctxt)
14928 return new pass_combine (ctxt);