extend jump thread for finite state automata
[official-gcc.git] / gcc / combine.c
blob39f9200913623e11983c605c9fcbd893bb370ed3
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of of success.
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "stor-layout.h"
85 #include "tm_p.h"
86 #include "flags.h"
87 #include "regs.h"
88 #include "hard-reg-set.h"
89 #include "predict.h"
90 #include "vec.h"
91 #include "hashtab.h"
92 #include "hash-set.h"
93 #include "machmode.h"
94 #include "input.h"
95 #include "function.h"
96 #include "dominance.h"
97 #include "cfg.h"
98 #include "cfgrtl.h"
99 #include "cfgcleanup.h"
100 #include "basic-block.h"
101 #include "insn-config.h"
102 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
103 #include "expr.h"
104 #include "insn-attr.h"
105 #include "recog.h"
106 #include "diagnostic-core.h"
107 #include "target.h"
108 #include "insn-codes.h"
109 #include "optabs.h"
110 #include "rtlhooks-def.h"
111 #include "params.h"
112 #include "tree-pass.h"
113 #include "df.h"
114 #include "valtrack.h"
115 #include "hash-map.h"
116 #include "is-a.h"
117 #include "plugin-api.h"
118 #include "ipa-ref.h"
119 #include "cgraph.h"
120 #include "obstack.h"
121 #include "statistics.h"
122 #include "params.h"
123 #include "rtl-iter.h"
125 /* Number of attempts to combine instructions in this function. */
127 static int combine_attempts;
129 /* Number of attempts that got as far as substitution in this function. */
131 static int combine_merges;
133 /* Number of instructions combined with added SETs in this function. */
135 static int combine_extras;
137 /* Number of instructions combined in this function. */
139 static int combine_successes;
141 /* Totals over entire compilation. */
143 static int total_attempts, total_merges, total_extras, total_successes;
145 /* combine_instructions may try to replace the right hand side of the
146 second instruction with the value of an associated REG_EQUAL note
147 before throwing it at try_combine. That is problematic when there
148 is a REG_DEAD note for a register used in the old right hand side
149 and can cause distribute_notes to do wrong things. This is the
150 second instruction if it has been so modified, null otherwise. */
152 static rtx_insn *i2mod;
154 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
156 static rtx i2mod_old_rhs;
158 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
160 static rtx i2mod_new_rhs;
162 typedef struct reg_stat_struct {
163 /* Record last point of death of (hard or pseudo) register n. */
164 rtx_insn *last_death;
166 /* Record last point of modification of (hard or pseudo) register n. */
167 rtx_insn *last_set;
169 /* The next group of fields allows the recording of the last value assigned
170 to (hard or pseudo) register n. We use this information to see if an
171 operation being processed is redundant given a prior operation performed
172 on the register. For example, an `and' with a constant is redundant if
173 all the zero bits are already known to be turned off.
175 We use an approach similar to that used by cse, but change it in the
176 following ways:
178 (1) We do not want to reinitialize at each label.
179 (2) It is useful, but not critical, to know the actual value assigned
180 to a register. Often just its form is helpful.
182 Therefore, we maintain the following fields:
184 last_set_value the last value assigned
185 last_set_label records the value of label_tick when the
186 register was assigned
187 last_set_table_tick records the value of label_tick when a
188 value using the register is assigned
189 last_set_invalid set to nonzero when it is not valid
190 to use the value of this register in some
191 register's value
193 To understand the usage of these tables, it is important to understand
194 the distinction between the value in last_set_value being valid and
195 the register being validly contained in some other expression in the
196 table.
198 (The next two parameters are out of date).
200 reg_stat[i].last_set_value is valid if it is nonzero, and either
201 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
203 Register I may validly appear in any expression returned for the value
204 of another register if reg_n_sets[i] is 1. It may also appear in the
205 value for register J if reg_stat[j].last_set_invalid is zero, or
206 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
208 If an expression is found in the table containing a register which may
209 not validly appear in an expression, the register is replaced by
210 something that won't match, (clobber (const_int 0)). */
212 /* Record last value assigned to (hard or pseudo) register n. */
214 rtx last_set_value;
216 /* Record the value of label_tick when an expression involving register n
217 is placed in last_set_value. */
219 int last_set_table_tick;
221 /* Record the value of label_tick when the value for register n is placed in
222 last_set_value. */
224 int last_set_label;
226 /* These fields are maintained in parallel with last_set_value and are
227 used to store the mode in which the register was last set, the bits
228 that were known to be zero when it was last set, and the number of
229 sign bits copies it was known to have when it was last set. */
231 unsigned HOST_WIDE_INT last_set_nonzero_bits;
232 char last_set_sign_bit_copies;
233 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
235 /* Set nonzero if references to register n in expressions should not be
236 used. last_set_invalid is set nonzero when this register is being
237 assigned to and last_set_table_tick == label_tick. */
239 char last_set_invalid;
241 /* Some registers that are set more than once and used in more than one
242 basic block are nevertheless always set in similar ways. For example,
243 a QImode register may be loaded from memory in two places on a machine
244 where byte loads zero extend.
246 We record in the following fields if a register has some leading bits
247 that are always equal to the sign bit, and what we know about the
248 nonzero bits of a register, specifically which bits are known to be
249 zero.
251 If an entry is zero, it means that we don't know anything special. */
253 unsigned char sign_bit_copies;
255 unsigned HOST_WIDE_INT nonzero_bits;
257 /* Record the value of the label_tick when the last truncation
258 happened. The field truncated_to_mode is only valid if
259 truncation_label == label_tick. */
261 int truncation_label;
263 /* Record the last truncation seen for this register. If truncation
264 is not a nop to this mode we might be able to save an explicit
265 truncation if we know that value already contains a truncated
266 value. */
268 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
269 } reg_stat_type;
272 static vec<reg_stat_type> reg_stat;
274 /* Record the luid of the last insn that invalidated memory
275 (anything that writes memory, and subroutine calls, but not pushes). */
277 static int mem_last_set;
279 /* Record the luid of the last CALL_INSN
280 so we can tell whether a potential combination crosses any calls. */
282 static int last_call_luid;
284 /* When `subst' is called, this is the insn that is being modified
285 (by combining in a previous insn). The PATTERN of this insn
286 is still the old pattern partially modified and it should not be
287 looked at, but this may be used to examine the successors of the insn
288 to judge whether a simplification is valid. */
290 static rtx_insn *subst_insn;
292 /* This is the lowest LUID that `subst' is currently dealing with.
293 get_last_value will not return a value if the register was set at or
294 after this LUID. If not for this mechanism, we could get confused if
295 I2 or I1 in try_combine were an insn that used the old value of a register
296 to obtain a new value. In that case, we might erroneously get the
297 new value of the register when we wanted the old one. */
299 static int subst_low_luid;
301 /* This contains any hard registers that are used in newpat; reg_dead_at_p
302 must consider all these registers to be always live. */
304 static HARD_REG_SET newpat_used_regs;
306 /* This is an insn to which a LOG_LINKS entry has been added. If this
307 insn is the earlier than I2 or I3, combine should rescan starting at
308 that location. */
310 static rtx_insn *added_links_insn;
312 /* Basic block in which we are performing combines. */
313 static basic_block this_basic_block;
314 static bool optimize_this_for_speed_p;
317 /* Length of the currently allocated uid_insn_cost array. */
319 static int max_uid_known;
321 /* The following array records the insn_rtx_cost for every insn
322 in the instruction stream. */
324 static int *uid_insn_cost;
326 /* The following array records the LOG_LINKS for every insn in the
327 instruction stream as struct insn_link pointers. */
329 struct insn_link {
330 rtx_insn *insn;
331 unsigned int regno;
332 struct insn_link *next;
335 static struct insn_link **uid_log_links;
337 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
338 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
340 #define FOR_EACH_LOG_LINK(L, INSN) \
341 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
343 /* Links for LOG_LINKS are allocated from this obstack. */
345 static struct obstack insn_link_obstack;
347 /* Allocate a link. */
349 static inline struct insn_link *
350 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
352 struct insn_link *l
353 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
354 sizeof (struct insn_link));
355 l->insn = insn;
356 l->regno = regno;
357 l->next = next;
358 return l;
361 /* Incremented for each basic block. */
363 static int label_tick;
365 /* Reset to label_tick for each extended basic block in scanning order. */
367 static int label_tick_ebb_start;
369 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
370 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
372 static machine_mode nonzero_bits_mode;
374 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
375 be safely used. It is zero while computing them and after combine has
376 completed. This former test prevents propagating values based on
377 previously set values, which can be incorrect if a variable is modified
378 in a loop. */
380 static int nonzero_sign_valid;
383 /* Record one modification to rtl structure
384 to be undone by storing old_contents into *where. */
386 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
388 struct undo
390 struct undo *next;
391 enum undo_kind kind;
392 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
393 union { rtx *r; int *i; struct insn_link **l; } where;
396 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
397 num_undo says how many are currently recorded.
399 other_insn is nonzero if we have modified some other insn in the process
400 of working on subst_insn. It must be verified too. */
402 struct undobuf
404 struct undo *undos;
405 struct undo *frees;
406 rtx_insn *other_insn;
409 static struct undobuf undobuf;
411 /* Number of times the pseudo being substituted for
412 was found and replaced. */
414 static int n_occurrences;
416 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
417 machine_mode,
418 unsigned HOST_WIDE_INT,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
421 machine_mode,
422 unsigned int, unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static rtx extract_left_shift (rtx, int);
448 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
449 unsigned HOST_WIDE_INT *);
450 static rtx canon_reg_for_combine (rtx, rtx);
451 static rtx force_to_mode (rtx, machine_mode,
452 unsigned HOST_WIDE_INT, int);
453 static rtx if_then_else_cond (rtx, rtx *, rtx *);
454 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
455 static int rtx_equal_for_field_assignment_p (rtx, rtx);
456 static rtx make_field_assignment (rtx);
457 static rtx apply_distributive_law (rtx);
458 static rtx distribute_and_simplify_rtx (rtx, int);
459 static rtx simplify_and_const_int_1 (machine_mode, rtx,
460 unsigned HOST_WIDE_INT);
461 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
462 unsigned HOST_WIDE_INT);
463 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
464 HOST_WIDE_INT, machine_mode, int *);
465 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
466 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
467 int);
468 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
469 static rtx gen_lowpart_for_combine (machine_mode, rtx);
470 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
471 rtx, rtx *);
472 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
473 static void update_table_tick (rtx);
474 static void record_value_for_reg (rtx, rtx_insn *, rtx);
475 static void check_promoted_subreg (rtx_insn *, rtx);
476 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
477 static void record_dead_and_set_regs (rtx_insn *);
478 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
479 static rtx get_last_value (const_rtx);
480 static int use_crosses_set_p (const_rtx, int);
481 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
482 static int reg_dead_at_p (rtx, rtx_insn *);
483 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
484 static int reg_bitfield_target_p (rtx, rtx);
485 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
486 static void distribute_links (struct insn_link *);
487 static void mark_used_regs_combine (rtx);
488 static void record_promoted_value (rtx_insn *, rtx);
489 static bool unmentioned_reg_p (rtx, rtx);
490 static void record_truncated_values (rtx *, void *);
491 static bool reg_truncated_to_mode (machine_mode, const_rtx);
492 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
495 /* It is not safe to use ordinary gen_lowpart in combine.
496 See comments in gen_lowpart_for_combine. */
497 #undef RTL_HOOKS_GEN_LOWPART
498 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
500 /* Our implementation of gen_lowpart never emits a new pseudo. */
501 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
502 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
504 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
505 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
507 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
508 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
510 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
511 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
513 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
516 /* Convenience wrapper for the canonicalize_comparison target hook.
517 Target hooks cannot use enum rtx_code. */
518 static inline void
519 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
520 bool op0_preserve_value)
522 int code_int = (int)*code;
523 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
524 *code = (enum rtx_code)code_int;
527 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
528 PATTERN can not be split. Otherwise, it returns an insn sequence.
529 This is a wrapper around split_insns which ensures that the
530 reg_stat vector is made larger if the splitter creates a new
531 register. */
533 static rtx_insn *
534 combine_split_insns (rtx pattern, rtx insn)
536 rtx_insn *ret;
537 unsigned int nregs;
539 ret = safe_as_a <rtx_insn *> (split_insns (pattern, insn));
540 nregs = max_reg_num ();
541 if (nregs > reg_stat.length ())
542 reg_stat.safe_grow_cleared (nregs);
543 return ret;
546 /* This is used by find_single_use to locate an rtx in LOC that
547 contains exactly one use of DEST, which is typically either a REG
548 or CC0. It returns a pointer to the innermost rtx expression
549 containing DEST. Appearances of DEST that are being used to
550 totally replace it are not counted. */
552 static rtx *
553 find_single_use_1 (rtx dest, rtx *loc)
555 rtx x = *loc;
556 enum rtx_code code = GET_CODE (x);
557 rtx *result = NULL;
558 rtx *this_result;
559 int i;
560 const char *fmt;
562 switch (code)
564 case CONST:
565 case LABEL_REF:
566 case SYMBOL_REF:
567 CASE_CONST_ANY:
568 case CLOBBER:
569 return 0;
571 case SET:
572 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
573 of a REG that occupies all of the REG, the insn uses DEST if
574 it is mentioned in the destination or the source. Otherwise, we
575 need just check the source. */
576 if (GET_CODE (SET_DEST (x)) != CC0
577 && GET_CODE (SET_DEST (x)) != PC
578 && !REG_P (SET_DEST (x))
579 && ! (GET_CODE (SET_DEST (x)) == SUBREG
580 && REG_P (SUBREG_REG (SET_DEST (x)))
581 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
582 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
583 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
584 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
585 break;
587 return find_single_use_1 (dest, &SET_SRC (x));
589 case MEM:
590 case SUBREG:
591 return find_single_use_1 (dest, &XEXP (x, 0));
593 default:
594 break;
597 /* If it wasn't one of the common cases above, check each expression and
598 vector of this code. Look for a unique usage of DEST. */
600 fmt = GET_RTX_FORMAT (code);
601 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603 if (fmt[i] == 'e')
605 if (dest == XEXP (x, i)
606 || (REG_P (dest) && REG_P (XEXP (x, i))
607 && REGNO (dest) == REGNO (XEXP (x, i))))
608 this_result = loc;
609 else
610 this_result = find_single_use_1 (dest, &XEXP (x, i));
612 if (result == NULL)
613 result = this_result;
614 else if (this_result)
615 /* Duplicate usage. */
616 return NULL;
618 else if (fmt[i] == 'E')
620 int j;
622 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
624 if (XVECEXP (x, i, j) == dest
625 || (REG_P (dest)
626 && REG_P (XVECEXP (x, i, j))
627 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
628 this_result = loc;
629 else
630 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
632 if (result == NULL)
633 result = this_result;
634 else if (this_result)
635 return NULL;
640 return result;
644 /* See if DEST, produced in INSN, is used only a single time in the
645 sequel. If so, return a pointer to the innermost rtx expression in which
646 it is used.
648 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
650 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
651 care about REG_DEAD notes or LOG_LINKS.
653 Otherwise, we find the single use by finding an insn that has a
654 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
655 only referenced once in that insn, we know that it must be the first
656 and last insn referencing DEST. */
658 static rtx *
659 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
661 basic_block bb;
662 rtx_insn *next;
663 rtx *result;
664 struct insn_link *link;
666 #ifdef HAVE_cc0
667 if (dest == cc0_rtx)
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
679 #endif
681 if (!REG_P (dest))
682 return 0;
684 bb = BLOCK_FOR_INSN (insn);
685 for (next = NEXT_INSN (insn);
686 next && BLOCK_FOR_INSN (next) == bb;
687 next = NEXT_INSN (next))
688 if (INSN_P (next) && dead_or_set_p (next, dest))
690 FOR_EACH_LOG_LINK (link, next)
691 if (link->insn == insn && link->regno == REGNO (dest))
692 break;
694 if (link)
696 result = find_single_use_1 (dest, &PATTERN (next));
697 if (ploc)
698 *ploc = next;
699 return result;
703 return 0;
706 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
707 insn. The substitution can be undone by undo_all. If INTO is already
708 set to NEWVAL, do not record this change. Because computing NEWVAL might
709 also call SUBST, we have to compute it before we put anything into
710 the undo table. */
712 static void
713 do_SUBST (rtx *into, rtx newval)
715 struct undo *buf;
716 rtx oldval = *into;
718 if (oldval == newval)
719 return;
721 /* We'd like to catch as many invalid transformations here as
722 possible. Unfortunately, there are way too many mode changes
723 that are perfectly valid, so we'd waste too much effort for
724 little gain doing the checks here. Focus on catching invalid
725 transformations involving integer constants. */
726 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
727 && CONST_INT_P (newval))
729 /* Sanity check that we're replacing oldval with a CONST_INT
730 that is a valid sign-extension for the original mode. */
731 gcc_assert (INTVAL (newval)
732 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
735 CONST_INT is not valid, because after the replacement, the
736 original mode would be gone. Unfortunately, we can't tell
737 when do_SUBST is called to replace the operand thereof, so we
738 perform this test on oldval instead, checking whether an
739 invalid replacement took place before we got here. */
740 gcc_assert (!(GET_CODE (oldval) == SUBREG
741 && CONST_INT_P (SUBREG_REG (oldval))));
742 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
743 && CONST_INT_P (XEXP (oldval, 0))));
746 if (undobuf.frees)
747 buf = undobuf.frees, undobuf.frees = buf->next;
748 else
749 buf = XNEW (struct undo);
751 buf->kind = UNDO_RTX;
752 buf->where.r = into;
753 buf->old_contents.r = oldval;
754 *into = newval;
756 buf->next = undobuf.undos, undobuf.undos = buf;
759 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
761 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
762 for the value of a HOST_WIDE_INT value (including CONST_INT) is
763 not safe. */
765 static void
766 do_SUBST_INT (int *into, int newval)
768 struct undo *buf;
769 int oldval = *into;
771 if (oldval == newval)
772 return;
774 if (undobuf.frees)
775 buf = undobuf.frees, undobuf.frees = buf->next;
776 else
777 buf = XNEW (struct undo);
779 buf->kind = UNDO_INT;
780 buf->where.i = into;
781 buf->old_contents.i = oldval;
782 *into = newval;
784 buf->next = undobuf.undos, undobuf.undos = buf;
787 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
789 /* Similar to SUBST, but just substitute the mode. This is used when
790 changing the mode of a pseudo-register, so that any other
791 references to the entry in the regno_reg_rtx array will change as
792 well. */
794 static void
795 do_SUBST_MODE (rtx *into, machine_mode newval)
797 struct undo *buf;
798 machine_mode oldval = GET_MODE (*into);
800 if (oldval == newval)
801 return;
803 if (undobuf.frees)
804 buf = undobuf.frees, undobuf.frees = buf->next;
805 else
806 buf = XNEW (struct undo);
808 buf->kind = UNDO_MODE;
809 buf->where.r = into;
810 buf->old_contents.m = oldval;
811 adjust_reg_mode (*into, newval);
813 buf->next = undobuf.undos, undobuf.undos = buf;
816 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
818 #ifndef HAVE_cc0
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
821 static void
822 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
824 struct undo *buf;
825 struct insn_link * oldval = *into;
827 if (oldval == newval)
828 return;
830 if (undobuf.frees)
831 buf = undobuf.frees, undobuf.frees = buf->next;
832 else
833 buf = XNEW (struct undo);
835 buf->kind = UNDO_LINKS;
836 buf->where.l = into;
837 buf->old_contents.l = oldval;
838 *into = newval;
840 buf->next = undobuf.undos, undobuf.undos = buf;
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 #endif
846 /* Subroutine of try_combine. Determine whether the replacement patterns
847 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
848 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
849 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
850 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
851 of all the instructions can be estimated and the replacements are more
852 expensive than the original sequence. */
854 static bool
855 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
856 rtx newpat, rtx newi2pat, rtx newotherpat)
858 int i0_cost, i1_cost, i2_cost, i3_cost;
859 int new_i2_cost, new_i3_cost;
860 int old_cost, new_cost;
862 /* Lookup the original insn_rtx_costs. */
863 i2_cost = INSN_COST (i2);
864 i3_cost = INSN_COST (i3);
866 if (i1)
868 i1_cost = INSN_COST (i1);
869 if (i0)
871 i0_cost = INSN_COST (i0);
872 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
873 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
875 else
877 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
878 ? i1_cost + i2_cost + i3_cost : 0);
879 i0_cost = 0;
882 else
884 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
885 i1_cost = i0_cost = 0;
888 /* Calculate the replacement insn_rtx_costs. */
889 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
890 if (newi2pat)
892 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
893 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
894 ? new_i2_cost + new_i3_cost : 0;
896 else
898 new_cost = new_i3_cost;
899 new_i2_cost = 0;
902 if (undobuf.other_insn)
904 int old_other_cost, new_other_cost;
906 old_other_cost = INSN_COST (undobuf.other_insn);
907 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
908 if (old_other_cost > 0 && new_other_cost > 0)
910 old_cost += old_other_cost;
911 new_cost += new_other_cost;
913 else
914 old_cost = 0;
917 /* Disallow this combination if both new_cost and old_cost are greater than
918 zero, and new_cost is greater than old cost. */
919 int reject = old_cost > 0 && new_cost > old_cost;
921 if (dump_file)
923 fprintf (dump_file, "%s combination of insns ",
924 reject ? "rejecting" : "allowing");
925 if (i0)
926 fprintf (dump_file, "%d, ", INSN_UID (i0));
927 if (i1)
928 fprintf (dump_file, "%d, ", INSN_UID (i1));
929 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
931 fprintf (dump_file, "original costs ");
932 if (i0)
933 fprintf (dump_file, "%d + ", i0_cost);
934 if (i1)
935 fprintf (dump_file, "%d + ", i1_cost);
936 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
938 if (newi2pat)
939 fprintf (dump_file, "replacement costs %d + %d = %d\n",
940 new_i2_cost, new_i3_cost, new_cost);
941 else
942 fprintf (dump_file, "replacement cost %d\n", new_cost);
945 if (reject)
946 return false;
948 /* Update the uid_insn_cost array with the replacement costs. */
949 INSN_COST (i2) = new_i2_cost;
950 INSN_COST (i3) = new_i3_cost;
951 if (i1)
953 INSN_COST (i1) = 0;
954 if (i0)
955 INSN_COST (i0) = 0;
958 return true;
962 /* Delete any insns that copy a register to itself. */
964 static void
965 delete_noop_moves (void)
967 rtx_insn *insn, *next;
968 basic_block bb;
970 FOR_EACH_BB_FN (bb, cfun)
972 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
974 next = NEXT_INSN (insn);
975 if (INSN_P (insn) && noop_move_p (insn))
977 if (dump_file)
978 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
980 delete_insn_and_edges (insn);
987 /* Return false if we do not want to (or cannot) combine DEF. */
988 static bool
989 can_combine_def_p (df_ref def)
991 /* Do not consider if it is pre/post modification in MEM. */
992 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
993 return false;
995 unsigned int regno = DF_REF_REGNO (def);
997 /* Do not combine frame pointer adjustments. */
998 if ((regno == FRAME_POINTER_REGNUM
999 && (!reload_completed || frame_pointer_needed))
1000 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1001 || (regno == HARD_FRAME_POINTER_REGNUM
1002 && (!reload_completed || frame_pointer_needed))
1003 #endif
1004 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1005 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1006 #endif
1008 return false;
1010 return true;
1013 /* Return false if we do not want to (or cannot) combine USE. */
1014 static bool
1015 can_combine_use_p (df_ref use)
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1019 return false;
1021 return true;
1024 /* Fill in log links field for all insns. */
1026 static void
1027 create_log_links (void)
1029 basic_block bb;
1030 rtx_insn **next_use;
1031 rtx_insn *insn;
1032 df_ref def, use;
1034 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1045 FOR_EACH_BB_FN (bb, cfun)
1047 FOR_BB_INSNS_REVERSE (bb, insn)
1049 if (!NONDEBUG_INSN_P (insn))
1050 continue;
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn));
1055 FOR_EACH_INSN_DEF (def, insn)
1057 unsigned int regno = DF_REF_REGNO (def);
1058 rtx_insn *use_insn;
1060 if (!next_use[regno])
1061 continue;
1063 if (!can_combine_def_p (def))
1064 continue;
1066 use_insn = next_use[regno];
1067 next_use[regno] = NULL;
1069 if (BLOCK_FOR_INSN (use_insn) != bb)
1070 continue;
1072 /* flow.c claimed:
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno < FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn)) >= 0)
1081 continue;
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link *links;
1085 FOR_EACH_LOG_LINK (links, use_insn)
1086 if (insn == links->insn && regno == links->regno)
1087 break;
1089 if (!links)
1090 LOG_LINKS (use_insn)
1091 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1094 FOR_EACH_INSN_USE (use, insn)
1095 if (can_combine_use_p (use))
1096 next_use[DF_REF_REGNO (use)] = insn;
1100 free (next_use);
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1108 pair. */
1110 static bool
1111 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1113 struct insn_link *links;
1114 FOR_EACH_LOG_LINK (links, b)
1115 if (links->insn == a)
1116 return true;
1117 #ifdef HAVE_cc0
1118 if (sets_cc0_p (a))
1119 return true;
1120 #endif
1121 return false;
1124 /* Main entry point for combiner. F is the first insn of the function.
1125 NREGS is the first unused pseudo-reg number.
1127 Return nonzero if the combiner has turned an indirect jump
1128 instruction into a direct jump. */
1129 static int
1130 combine_instructions (rtx_insn *f, unsigned int nregs)
1132 rtx_insn *insn, *next;
1133 #ifdef HAVE_cc0
1134 rtx_insn *prev;
1135 #endif
1136 struct insn_link *links, *nextlinks;
1137 rtx_insn *first;
1138 basic_block last_bb;
1140 int new_direct_jump_p = 0;
1142 for (first = f; first && !INSN_P (first); )
1143 first = NEXT_INSN (first);
1144 if (!first)
1145 return 0;
1147 combine_attempts = 0;
1148 combine_merges = 0;
1149 combine_extras = 0;
1150 combine_successes = 0;
1152 rtl_hooks = combine_rtl_hooks;
1154 reg_stat.safe_grow_cleared (nregs);
1156 init_recog_no_volatile ();
1158 /* Allocate array for insn info. */
1159 max_uid_known = get_max_uid ();
1160 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1161 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1162 gcc_obstack_init (&insn_link_obstack);
1164 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1166 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1167 problems when, for example, we have j <<= 1 in a loop. */
1169 nonzero_sign_valid = 0;
1170 label_tick = label_tick_ebb_start = 1;
1172 /* Scan all SETs and see if we can deduce anything about what
1173 bits are known to be zero for some registers and how many copies
1174 of the sign bit are known to exist for those registers.
1176 Also set any known values so that we can use it while searching
1177 for what bits are known to be set. */
1179 setup_incoming_promotions (first);
1180 /* Allow the entry block and the first block to fall into the same EBB.
1181 Conceptually the incoming promotions are assigned to the entry block. */
1182 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1184 create_log_links ();
1185 FOR_EACH_BB_FN (this_basic_block, cfun)
1187 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1188 last_call_luid = 0;
1189 mem_last_set = -1;
1191 label_tick++;
1192 if (!single_pred_p (this_basic_block)
1193 || single_pred (this_basic_block) != last_bb)
1194 label_tick_ebb_start = label_tick;
1195 last_bb = this_basic_block;
1197 FOR_BB_INSNS (this_basic_block, insn)
1198 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1200 #ifdef AUTO_INC_DEC
1201 rtx links;
1202 #endif
1204 subst_low_luid = DF_INSN_LUID (insn);
1205 subst_insn = insn;
1207 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1208 insn);
1209 record_dead_and_set_regs (insn);
1211 #ifdef AUTO_INC_DEC
1212 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1213 if (REG_NOTE_KIND (links) == REG_INC)
1214 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1215 insn);
1216 #endif
1218 /* Record the current insn_rtx_cost of this instruction. */
1219 if (NONJUMP_INSN_P (insn))
1220 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1221 optimize_this_for_speed_p);
1222 if (dump_file)
1223 fprintf (dump_file, "insn_cost %d: %d\n",
1224 INSN_UID (insn), INSN_COST (insn));
1228 nonzero_sign_valid = 1;
1230 /* Now scan all the insns in forward order. */
1231 label_tick = label_tick_ebb_start = 1;
1232 init_reg_last ();
1233 setup_incoming_promotions (first);
1234 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1235 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1237 FOR_EACH_BB_FN (this_basic_block, cfun)
1239 rtx_insn *last_combined_insn = NULL;
1240 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1241 last_call_luid = 0;
1242 mem_last_set = -1;
1244 label_tick++;
1245 if (!single_pred_p (this_basic_block)
1246 || single_pred (this_basic_block) != last_bb)
1247 label_tick_ebb_start = label_tick;
1248 last_bb = this_basic_block;
1250 rtl_profile_for_bb (this_basic_block);
1251 for (insn = BB_HEAD (this_basic_block);
1252 insn != NEXT_INSN (BB_END (this_basic_block));
1253 insn = next ? next : NEXT_INSN (insn))
1255 next = 0;
1256 if (!NONDEBUG_INSN_P (insn))
1257 continue;
1259 while (last_combined_insn
1260 && last_combined_insn->deleted ())
1261 last_combined_insn = PREV_INSN (last_combined_insn);
1262 if (last_combined_insn == NULL_RTX
1263 || BARRIER_P (last_combined_insn)
1264 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1265 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1266 last_combined_insn = insn;
1268 /* See if we know about function return values before this
1269 insn based upon SUBREG flags. */
1270 check_promoted_subreg (insn, PATTERN (insn));
1272 /* See if we can find hardregs and subreg of pseudos in
1273 narrower modes. This could help turning TRUNCATEs
1274 into SUBREGs. */
1275 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1277 /* Try this insn with each insn it links back to. */
1279 FOR_EACH_LOG_LINK (links, insn)
1280 if ((next = try_combine (insn, links->insn, NULL,
1281 NULL, &new_direct_jump_p,
1282 last_combined_insn)) != 0)
1284 statistics_counter_event (cfun, "two-insn combine", 1);
1285 goto retry;
1288 /* Try each sequence of three linked insns ending with this one. */
1290 if (max_combine >= 3)
1291 FOR_EACH_LOG_LINK (links, insn)
1293 rtx_insn *link = links->insn;
1295 /* If the linked insn has been replaced by a note, then there
1296 is no point in pursuing this chain any further. */
1297 if (NOTE_P (link))
1298 continue;
1300 FOR_EACH_LOG_LINK (nextlinks, link)
1301 if ((next = try_combine (insn, link, nextlinks->insn,
1302 NULL, &new_direct_jump_p,
1303 last_combined_insn)) != 0)
1305 statistics_counter_event (cfun, "three-insn combine", 1);
1306 goto retry;
1310 #ifdef HAVE_cc0
1311 /* Try to combine a jump insn that uses CC0
1312 with a preceding insn that sets CC0, and maybe with its
1313 logical predecessor as well.
1314 This is how we make decrement-and-branch insns.
1315 We need this special code because data flow connections
1316 via CC0 do not get entered in LOG_LINKS. */
1318 if (JUMP_P (insn)
1319 && (prev = prev_nonnote_insn (insn)) != 0
1320 && NONJUMP_INSN_P (prev)
1321 && sets_cc0_p (PATTERN (prev)))
1323 if ((next = try_combine (insn, prev, NULL, NULL,
1324 &new_direct_jump_p,
1325 last_combined_insn)) != 0)
1326 goto retry;
1328 FOR_EACH_LOG_LINK (nextlinks, prev)
1329 if ((next = try_combine (insn, prev, nextlinks->insn,
1330 NULL, &new_direct_jump_p,
1331 last_combined_insn)) != 0)
1332 goto retry;
1335 /* Do the same for an insn that explicitly references CC0. */
1336 if (NONJUMP_INSN_P (insn)
1337 && (prev = prev_nonnote_insn (insn)) != 0
1338 && NONJUMP_INSN_P (prev)
1339 && sets_cc0_p (PATTERN (prev))
1340 && GET_CODE (PATTERN (insn)) == SET
1341 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1343 if ((next = try_combine (insn, prev, NULL, NULL,
1344 &new_direct_jump_p,
1345 last_combined_insn)) != 0)
1346 goto retry;
1348 FOR_EACH_LOG_LINK (nextlinks, prev)
1349 if ((next = try_combine (insn, prev, nextlinks->insn,
1350 NULL, &new_direct_jump_p,
1351 last_combined_insn)) != 0)
1352 goto retry;
1355 /* Finally, see if any of the insns that this insn links to
1356 explicitly references CC0. If so, try this insn, that insn,
1357 and its predecessor if it sets CC0. */
1358 FOR_EACH_LOG_LINK (links, insn)
1359 if (NONJUMP_INSN_P (links->insn)
1360 && GET_CODE (PATTERN (links->insn)) == SET
1361 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1362 && (prev = prev_nonnote_insn (links->insn)) != 0
1363 && NONJUMP_INSN_P (prev)
1364 && sets_cc0_p (PATTERN (prev))
1365 && (next = try_combine (insn, links->insn,
1366 prev, NULL, &new_direct_jump_p,
1367 last_combined_insn)) != 0)
1368 goto retry;
1369 #endif
1371 /* Try combining an insn with two different insns whose results it
1372 uses. */
1373 if (max_combine >= 3)
1374 FOR_EACH_LOG_LINK (links, insn)
1375 for (nextlinks = links->next; nextlinks;
1376 nextlinks = nextlinks->next)
1377 if ((next = try_combine (insn, links->insn,
1378 nextlinks->insn, NULL,
1379 &new_direct_jump_p,
1380 last_combined_insn)) != 0)
1383 statistics_counter_event (cfun, "three-insn combine", 1);
1384 goto retry;
1387 /* Try four-instruction combinations. */
1388 if (max_combine >= 4)
1389 FOR_EACH_LOG_LINK (links, insn)
1391 struct insn_link *next1;
1392 rtx_insn *link = links->insn;
1394 /* If the linked insn has been replaced by a note, then there
1395 is no point in pursuing this chain any further. */
1396 if (NOTE_P (link))
1397 continue;
1399 FOR_EACH_LOG_LINK (next1, link)
1401 rtx_insn *link1 = next1->insn;
1402 if (NOTE_P (link1))
1403 continue;
1404 /* I0 -> I1 -> I2 -> I3. */
1405 FOR_EACH_LOG_LINK (nextlinks, link1)
1406 if ((next = try_combine (insn, link, link1,
1407 nextlinks->insn,
1408 &new_direct_jump_p,
1409 last_combined_insn)) != 0)
1411 statistics_counter_event (cfun, "four-insn combine", 1);
1412 goto retry;
1414 /* I0, I1 -> I2, I2 -> I3. */
1415 for (nextlinks = next1->next; nextlinks;
1416 nextlinks = nextlinks->next)
1417 if ((next = try_combine (insn, link, link1,
1418 nextlinks->insn,
1419 &new_direct_jump_p,
1420 last_combined_insn)) != 0)
1422 statistics_counter_event (cfun, "four-insn combine", 1);
1423 goto retry;
1427 for (next1 = links->next; next1; next1 = next1->next)
1429 rtx_insn *link1 = next1->insn;
1430 if (NOTE_P (link1))
1431 continue;
1432 /* I0 -> I2; I1, I2 -> I3. */
1433 FOR_EACH_LOG_LINK (nextlinks, link)
1434 if ((next = try_combine (insn, link, link1,
1435 nextlinks->insn,
1436 &new_direct_jump_p,
1437 last_combined_insn)) != 0)
1439 statistics_counter_event (cfun, "four-insn combine", 1);
1440 goto retry;
1442 /* I0 -> I1; I1, I2 -> I3. */
1443 FOR_EACH_LOG_LINK (nextlinks, link1)
1444 if ((next = try_combine (insn, link, link1,
1445 nextlinks->insn,
1446 &new_direct_jump_p,
1447 last_combined_insn)) != 0)
1449 statistics_counter_event (cfun, "four-insn combine", 1);
1450 goto retry;
1455 /* Try this insn with each REG_EQUAL note it links back to. */
1456 FOR_EACH_LOG_LINK (links, insn)
1458 rtx set, note;
1459 rtx_insn *temp = links->insn;
1460 if ((set = single_set (temp)) != 0
1461 && (note = find_reg_equal_equiv_note (temp)) != 0
1462 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1463 /* Avoid using a register that may already been marked
1464 dead by an earlier instruction. */
1465 && ! unmentioned_reg_p (note, SET_SRC (set))
1466 && (GET_MODE (note) == VOIDmode
1467 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1468 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig = SET_SRC (set);
1474 SET_SRC (set) = note;
1475 i2mod = temp;
1476 i2mod_old_rhs = copy_rtx (orig);
1477 i2mod_new_rhs = copy_rtx (note);
1478 next = try_combine (insn, i2mod, NULL, NULL,
1479 &new_direct_jump_p,
1480 last_combined_insn);
1481 i2mod = NULL;
1482 if (next)
1484 statistics_counter_event (cfun, "insn-with-note combine", 1);
1485 goto retry;
1487 SET_SRC (set) = orig;
1491 if (!NOTE_P (insn))
1492 record_dead_and_set_regs (insn);
1494 retry:
1499 default_rtl_profile ();
1500 clear_bb_flags ();
1501 new_direct_jump_p |= purge_all_dead_edges ();
1502 delete_noop_moves ();
1504 /* Clean up. */
1505 obstack_free (&insn_link_obstack, NULL);
1506 free (uid_log_links);
1507 free (uid_insn_cost);
1508 reg_stat.release ();
1511 struct undo *undo, *next;
1512 for (undo = undobuf.frees; undo; undo = next)
1514 next = undo->next;
1515 free (undo);
1517 undobuf.frees = 0;
1520 total_attempts += combine_attempts;
1521 total_merges += combine_merges;
1522 total_extras += combine_extras;
1523 total_successes += combine_successes;
1525 nonzero_sign_valid = 0;
1526 rtl_hooks = general_rtl_hooks;
1528 /* Make recognizer allow volatile MEMs again. */
1529 init_recog ();
1531 return new_direct_jump_p;
1534 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1536 static void
1537 init_reg_last (void)
1539 unsigned int i;
1540 reg_stat_type *p;
1542 FOR_EACH_VEC_ELT (reg_stat, i, p)
1543 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1546 /* Set up any promoted values for incoming argument registers. */
1548 static void
1549 setup_incoming_promotions (rtx_insn *first)
1551 tree arg;
1552 bool strictly_local = false;
1554 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1555 arg = DECL_CHAIN (arg))
1557 rtx x, reg = DECL_INCOMING_RTL (arg);
1558 int uns1, uns3;
1559 machine_mode mode1, mode2, mode3, mode4;
1561 /* Only continue if the incoming argument is in a register. */
1562 if (!REG_P (reg))
1563 continue;
1565 /* Determine, if possible, whether all call sites of the current
1566 function lie within the current compilation unit. (This does
1567 take into account the exporting of a function via taking its
1568 address, and so forth.) */
1569 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1571 /* The mode and signedness of the argument before any promotions happen
1572 (equal to the mode of the pseudo holding it at that stage). */
1573 mode1 = TYPE_MODE (TREE_TYPE (arg));
1574 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1576 /* The mode and signedness of the argument after any source language and
1577 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1578 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1579 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1581 /* The mode and signedness of the argument as it is actually passed,
1582 see assign_parm_setup_reg in function.c. */
1583 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns1,
1584 TREE_TYPE (cfun->decl), 0);
1586 /* The mode of the register in which the argument is being passed. */
1587 mode4 = GET_MODE (reg);
1589 /* Eliminate sign extensions in the callee when:
1590 (a) A mode promotion has occurred; */
1591 if (mode1 == mode3)
1592 continue;
1593 /* (b) The mode of the register is the same as the mode of
1594 the argument as it is passed; */
1595 if (mode3 != mode4)
1596 continue;
1597 /* (c) There's no language level extension; */
1598 if (mode1 == mode2)
1600 /* (c.1) All callers are from the current compilation unit. If that's
1601 the case we don't have to rely on an ABI, we only have to know
1602 what we're generating right now, and we know that we will do the
1603 mode1 to mode2 promotion with the given sign. */
1604 else if (!strictly_local)
1605 continue;
1606 /* (c.2) The combination of the two promotions is useful. This is
1607 true when the signs match, or if the first promotion is unsigned.
1608 In the later case, (sign_extend (zero_extend x)) is the same as
1609 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1610 else if (uns1)
1611 uns3 = true;
1612 else if (uns3)
1613 continue;
1615 /* Record that the value was promoted from mode1 to mode3,
1616 so that any sign extension at the head of the current
1617 function may be eliminated. */
1618 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1619 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1620 record_value_for_reg (reg, first, x);
1624 /* Called via note_stores. If X is a pseudo that is narrower than
1625 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1627 If we are setting only a portion of X and we can't figure out what
1628 portion, assume all bits will be used since we don't know what will
1629 be happening.
1631 Similarly, set how many bits of X are known to be copies of the sign bit
1632 at all locations in the function. This is the smallest number implied
1633 by any set of X. */
1635 static void
1636 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1638 rtx_insn *insn = (rtx_insn *) data;
1639 unsigned int num;
1641 if (REG_P (x)
1642 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1643 /* If this register is undefined at the start of the file, we can't
1644 say what its contents were. */
1645 && ! REGNO_REG_SET_P
1646 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1647 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1649 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1651 if (set == 0 || GET_CODE (set) == CLOBBER)
1653 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1654 rsp->sign_bit_copies = 1;
1655 return;
1658 /* If this register is being initialized using itself, and the
1659 register is uninitialized in this basic block, and there are
1660 no LOG_LINKS which set the register, then part of the
1661 register is uninitialized. In that case we can't assume
1662 anything about the number of nonzero bits.
1664 ??? We could do better if we checked this in
1665 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1666 could avoid making assumptions about the insn which initially
1667 sets the register, while still using the information in other
1668 insns. We would have to be careful to check every insn
1669 involved in the combination. */
1671 if (insn
1672 && reg_referenced_p (x, PATTERN (insn))
1673 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1674 REGNO (x)))
1676 struct insn_link *link;
1678 FOR_EACH_LOG_LINK (link, insn)
1679 if (dead_or_set_p (link->insn, x))
1680 break;
1681 if (!link)
1683 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1684 rsp->sign_bit_copies = 1;
1685 return;
1689 /* If this is a complex assignment, see if we can convert it into a
1690 simple assignment. */
1691 set = expand_field_assignment (set);
1693 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1694 set what we know about X. */
1696 if (SET_DEST (set) == x
1697 || (paradoxical_subreg_p (SET_DEST (set))
1698 && SUBREG_REG (SET_DEST (set)) == x))
1700 rtx src = SET_SRC (set);
1702 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1703 /* If X is narrower than a word and SRC is a non-negative
1704 constant that would appear negative in the mode of X,
1705 sign-extend it for use in reg_stat[].nonzero_bits because some
1706 machines (maybe most) will actually do the sign-extension
1707 and this is the conservative approach.
1709 ??? For 2.5, try to tighten up the MD files in this regard
1710 instead of this kludge. */
1712 if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
1713 && CONST_INT_P (src)
1714 && INTVAL (src) > 0
1715 && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
1716 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
1717 #endif
1719 /* Don't call nonzero_bits if it cannot change anything. */
1720 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1721 rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1722 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1723 if (rsp->sign_bit_copies == 0
1724 || rsp->sign_bit_copies > num)
1725 rsp->sign_bit_copies = num;
1727 else
1729 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1730 rsp->sign_bit_copies = 1;
1735 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1736 optionally insns that were previously combined into I3 or that will be
1737 combined into the merger of INSN and I3. The order is PRED, PRED2,
1738 INSN, SUCC, SUCC2, I3.
1740 Return 0 if the combination is not allowed for any reason.
1742 If the combination is allowed, *PDEST will be set to the single
1743 destination of INSN and *PSRC to the single source, and this function
1744 will return 1. */
1746 static int
1747 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1748 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1749 rtx *pdest, rtx *psrc)
1751 int i;
1752 const_rtx set = 0;
1753 rtx src, dest;
1754 rtx_insn *p;
1755 #ifdef AUTO_INC_DEC
1756 rtx link;
1757 #endif
1758 bool all_adjacent = true;
1759 int (*is_volatile_p) (const_rtx);
1761 if (succ)
1763 if (succ2)
1765 if (next_active_insn (succ2) != i3)
1766 all_adjacent = false;
1767 if (next_active_insn (succ) != succ2)
1768 all_adjacent = false;
1770 else if (next_active_insn (succ) != i3)
1771 all_adjacent = false;
1772 if (next_active_insn (insn) != succ)
1773 all_adjacent = false;
1775 else if (next_active_insn (insn) != i3)
1776 all_adjacent = false;
1778 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1779 or a PARALLEL consisting of such a SET and CLOBBERs.
1781 If INSN has CLOBBER parallel parts, ignore them for our processing.
1782 By definition, these happen during the execution of the insn. When it
1783 is merged with another insn, all bets are off. If they are, in fact,
1784 needed and aren't also supplied in I3, they may be added by
1785 recog_for_combine. Otherwise, it won't match.
1787 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1788 note.
1790 Get the source and destination of INSN. If more than one, can't
1791 combine. */
1793 if (GET_CODE (PATTERN (insn)) == SET)
1794 set = PATTERN (insn);
1795 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1796 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1798 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1800 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1802 switch (GET_CODE (elt))
1804 /* This is important to combine floating point insns
1805 for the SH4 port. */
1806 case USE:
1807 /* Combining an isolated USE doesn't make sense.
1808 We depend here on combinable_i3pat to reject them. */
1809 /* The code below this loop only verifies that the inputs of
1810 the SET in INSN do not change. We call reg_set_between_p
1811 to verify that the REG in the USE does not change between
1812 I3 and INSN.
1813 If the USE in INSN was for a pseudo register, the matching
1814 insn pattern will likely match any register; combining this
1815 with any other USE would only be safe if we knew that the
1816 used registers have identical values, or if there was
1817 something to tell them apart, e.g. different modes. For
1818 now, we forgo such complicated tests and simply disallow
1819 combining of USES of pseudo registers with any other USE. */
1820 if (REG_P (XEXP (elt, 0))
1821 && GET_CODE (PATTERN (i3)) == PARALLEL)
1823 rtx i3pat = PATTERN (i3);
1824 int i = XVECLEN (i3pat, 0) - 1;
1825 unsigned int regno = REGNO (XEXP (elt, 0));
1829 rtx i3elt = XVECEXP (i3pat, 0, i);
1831 if (GET_CODE (i3elt) == USE
1832 && REG_P (XEXP (i3elt, 0))
1833 && (REGNO (XEXP (i3elt, 0)) == regno
1834 ? reg_set_between_p (XEXP (elt, 0),
1835 PREV_INSN (insn), i3)
1836 : regno >= FIRST_PSEUDO_REGISTER))
1837 return 0;
1839 while (--i >= 0);
1841 break;
1843 /* We can ignore CLOBBERs. */
1844 case CLOBBER:
1845 break;
1847 case SET:
1848 /* Ignore SETs whose result isn't used but not those that
1849 have side-effects. */
1850 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1851 && insn_nothrow_p (insn)
1852 && !side_effects_p (elt))
1853 break;
1855 /* If we have already found a SET, this is a second one and
1856 so we cannot combine with this insn. */
1857 if (set)
1858 return 0;
1860 set = elt;
1861 break;
1863 default:
1864 /* Anything else means we can't combine. */
1865 return 0;
1869 if (set == 0
1870 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1871 so don't do anything with it. */
1872 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1873 return 0;
1875 else
1876 return 0;
1878 if (set == 0)
1879 return 0;
1881 /* The simplification in expand_field_assignment may call back to
1882 get_last_value, so set safe guard here. */
1883 subst_low_luid = DF_INSN_LUID (insn);
1885 set = expand_field_assignment (set);
1886 src = SET_SRC (set), dest = SET_DEST (set);
1888 /* Don't eliminate a store in the stack pointer. */
1889 if (dest == stack_pointer_rtx
1890 /* Don't combine with an insn that sets a register to itself if it has
1891 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1892 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1893 /* Can't merge an ASM_OPERANDS. */
1894 || GET_CODE (src) == ASM_OPERANDS
1895 /* Can't merge a function call. */
1896 || GET_CODE (src) == CALL
1897 /* Don't eliminate a function call argument. */
1898 || (CALL_P (i3)
1899 && (find_reg_fusage (i3, USE, dest)
1900 || (REG_P (dest)
1901 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1902 && global_regs[REGNO (dest)])))
1903 /* Don't substitute into an incremented register. */
1904 || FIND_REG_INC_NOTE (i3, dest)
1905 || (succ && FIND_REG_INC_NOTE (succ, dest))
1906 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1907 /* Don't substitute into a non-local goto, this confuses CFG. */
1908 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1909 /* Make sure that DEST is not used after SUCC but before I3. */
1910 || (!all_adjacent
1911 && ((succ2
1912 && (reg_used_between_p (dest, succ2, i3)
1913 || reg_used_between_p (dest, succ, succ2)))
1914 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1915 /* Make sure that the value that is to be substituted for the register
1916 does not use any registers whose values alter in between. However,
1917 If the insns are adjacent, a use can't cross a set even though we
1918 think it might (this can happen for a sequence of insns each setting
1919 the same destination; last_set of that register might point to
1920 a NOTE). If INSN has a REG_EQUIV note, the register is always
1921 equivalent to the memory so the substitution is valid even if there
1922 are intervening stores. Also, don't move a volatile asm or
1923 UNSPEC_VOLATILE across any other insns. */
1924 || (! all_adjacent
1925 && (((!MEM_P (src)
1926 || ! find_reg_note (insn, REG_EQUIV, src))
1927 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1928 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1929 || GET_CODE (src) == UNSPEC_VOLATILE))
1930 /* Don't combine across a CALL_INSN, because that would possibly
1931 change whether the life span of some REGs crosses calls or not,
1932 and it is a pain to update that information.
1933 Exception: if source is a constant, moving it later can't hurt.
1934 Accept that as a special case. */
1935 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1936 return 0;
1938 /* DEST must either be a REG or CC0. */
1939 if (REG_P (dest))
1941 /* If register alignment is being enforced for multi-word items in all
1942 cases except for parameters, it is possible to have a register copy
1943 insn referencing a hard register that is not allowed to contain the
1944 mode being copied and which would not be valid as an operand of most
1945 insns. Eliminate this problem by not combining with such an insn.
1947 Also, on some machines we don't want to extend the life of a hard
1948 register. */
1950 if (REG_P (src)
1951 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1952 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1953 /* Don't extend the life of a hard register unless it is
1954 user variable (if we have few registers) or it can't
1955 fit into the desired register (meaning something special
1956 is going on).
1957 Also avoid substituting a return register into I3, because
1958 reload can't handle a conflict with constraints of other
1959 inputs. */
1960 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1961 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1962 return 0;
1964 else if (GET_CODE (dest) != CC0)
1965 return 0;
1968 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1969 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1970 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1972 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1974 /* If the clobber represents an earlyclobber operand, we must not
1975 substitute an expression containing the clobbered register.
1976 As we do not analyze the constraint strings here, we have to
1977 make the conservative assumption. However, if the register is
1978 a fixed hard reg, the clobber cannot represent any operand;
1979 we leave it up to the machine description to either accept or
1980 reject use-and-clobber patterns. */
1981 if (!REG_P (reg)
1982 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1983 || !fixed_regs[REGNO (reg)])
1984 if (reg_overlap_mentioned_p (reg, src))
1985 return 0;
1988 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1989 or not), reject, unless nothing volatile comes between it and I3 */
1991 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1993 /* Make sure neither succ nor succ2 contains a volatile reference. */
1994 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1995 return 0;
1996 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1997 return 0;
1998 /* We'll check insns between INSN and I3 below. */
2001 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2002 to be an explicit register variable, and was chosen for a reason. */
2004 if (GET_CODE (src) == ASM_OPERANDS
2005 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2006 return 0;
2008 /* If INSN contains volatile references (specifically volatile MEMs),
2009 we cannot combine across any other volatile references.
2010 Even if INSN doesn't contain volatile references, any intervening
2011 volatile insn might affect machine state. */
2013 is_volatile_p = volatile_refs_p (PATTERN (insn))
2014 ? volatile_refs_p
2015 : volatile_insn_p;
2017 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2018 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2019 return 0;
2021 /* If INSN contains an autoincrement or autodecrement, make sure that
2022 register is not used between there and I3, and not already used in
2023 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2024 Also insist that I3 not be a jump; if it were one
2025 and the incremented register were spilled, we would lose. */
2027 #ifdef AUTO_INC_DEC
2028 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2029 if (REG_NOTE_KIND (link) == REG_INC
2030 && (JUMP_P (i3)
2031 || reg_used_between_p (XEXP (link, 0), insn, i3)
2032 || (pred != NULL_RTX
2033 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2034 || (pred2 != NULL_RTX
2035 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2036 || (succ != NULL_RTX
2037 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2038 || (succ2 != NULL_RTX
2039 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2040 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2041 return 0;
2042 #endif
2044 #ifdef HAVE_cc0
2045 /* Don't combine an insn that follows a CC0-setting insn.
2046 An insn that uses CC0 must not be separated from the one that sets it.
2047 We do, however, allow I2 to follow a CC0-setting insn if that insn
2048 is passed as I1; in that case it will be deleted also.
2049 We also allow combining in this case if all the insns are adjacent
2050 because that would leave the two CC0 insns adjacent as well.
2051 It would be more logical to test whether CC0 occurs inside I1 or I2,
2052 but that would be much slower, and this ought to be equivalent. */
2054 p = prev_nonnote_insn (insn);
2055 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2056 && ! all_adjacent)
2057 return 0;
2058 #endif
2060 /* If we get here, we have passed all the tests and the combination is
2061 to be allowed. */
2063 *pdest = dest;
2064 *psrc = src;
2066 return 1;
2069 /* LOC is the location within I3 that contains its pattern or the component
2070 of a PARALLEL of the pattern. We validate that it is valid for combining.
2072 One problem is if I3 modifies its output, as opposed to replacing it
2073 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2074 doing so would produce an insn that is not equivalent to the original insns.
2076 Consider:
2078 (set (reg:DI 101) (reg:DI 100))
2079 (set (subreg:SI (reg:DI 101) 0) <foo>)
2081 This is NOT equivalent to:
2083 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2084 (set (reg:DI 101) (reg:DI 100))])
2086 Not only does this modify 100 (in which case it might still be valid
2087 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2089 We can also run into a problem if I2 sets a register that I1
2090 uses and I1 gets directly substituted into I3 (not via I2). In that
2091 case, we would be getting the wrong value of I2DEST into I3, so we
2092 must reject the combination. This case occurs when I2 and I1 both
2093 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2094 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2095 of a SET must prevent combination from occurring. The same situation
2096 can occur for I0, in which case I0_NOT_IN_SRC is set.
2098 Before doing the above check, we first try to expand a field assignment
2099 into a set of logical operations.
2101 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2102 we place a register that is both set and used within I3. If more than one
2103 such register is detected, we fail.
2105 Return 1 if the combination is valid, zero otherwise. */
2107 static int
2108 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2109 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2111 rtx x = *loc;
2113 if (GET_CODE (x) == SET)
2115 rtx set = x ;
2116 rtx dest = SET_DEST (set);
2117 rtx src = SET_SRC (set);
2118 rtx inner_dest = dest;
2119 rtx subdest;
2121 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2122 || GET_CODE (inner_dest) == SUBREG
2123 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2124 inner_dest = XEXP (inner_dest, 0);
2126 /* Check for the case where I3 modifies its output, as discussed
2127 above. We don't want to prevent pseudos from being combined
2128 into the address of a MEM, so only prevent the combination if
2129 i1 or i2 set the same MEM. */
2130 if ((inner_dest != dest &&
2131 (!MEM_P (inner_dest)
2132 || rtx_equal_p (i2dest, inner_dest)
2133 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2134 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2135 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2136 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2137 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2139 /* This is the same test done in can_combine_p except we can't test
2140 all_adjacent; we don't have to, since this instruction will stay
2141 in place, thus we are not considering increasing the lifetime of
2142 INNER_DEST.
2144 Also, if this insn sets a function argument, combining it with
2145 something that might need a spill could clobber a previous
2146 function argument; the all_adjacent test in can_combine_p also
2147 checks this; here, we do a more specific test for this case. */
2149 || (REG_P (inner_dest)
2150 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2151 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2152 GET_MODE (inner_dest))))
2153 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2154 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2155 return 0;
2157 /* If DEST is used in I3, it is being killed in this insn, so
2158 record that for later. We have to consider paradoxical
2159 subregs here, since they kill the whole register, but we
2160 ignore partial subregs, STRICT_LOW_PART, etc.
2161 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2162 STACK_POINTER_REGNUM, since these are always considered to be
2163 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2164 subdest = dest;
2165 if (GET_CODE (subdest) == SUBREG
2166 && (GET_MODE_SIZE (GET_MODE (subdest))
2167 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2168 subdest = SUBREG_REG (subdest);
2169 if (pi3dest_killed
2170 && REG_P (subdest)
2171 && reg_referenced_p (subdest, PATTERN (i3))
2172 && REGNO (subdest) != FRAME_POINTER_REGNUM
2173 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2174 && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2175 #endif
2176 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2177 && (REGNO (subdest) != ARG_POINTER_REGNUM
2178 || ! fixed_regs [REGNO (subdest)])
2179 #endif
2180 && REGNO (subdest) != STACK_POINTER_REGNUM)
2182 if (*pi3dest_killed)
2183 return 0;
2185 *pi3dest_killed = subdest;
2189 else if (GET_CODE (x) == PARALLEL)
2191 int i;
2193 for (i = 0; i < XVECLEN (x, 0); i++)
2194 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2195 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2196 return 0;
2199 return 1;
2202 /* Return 1 if X is an arithmetic expression that contains a multiplication
2203 and division. We don't count multiplications by powers of two here. */
2205 static int
2206 contains_muldiv (rtx x)
2208 switch (GET_CODE (x))
2210 case MOD: case DIV: case UMOD: case UDIV:
2211 return 1;
2213 case MULT:
2214 return ! (CONST_INT_P (XEXP (x, 1))
2215 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2216 default:
2217 if (BINARY_P (x))
2218 return contains_muldiv (XEXP (x, 0))
2219 || contains_muldiv (XEXP (x, 1));
2221 if (UNARY_P (x))
2222 return contains_muldiv (XEXP (x, 0));
2224 return 0;
2228 /* Determine whether INSN can be used in a combination. Return nonzero if
2229 not. This is used in try_combine to detect early some cases where we
2230 can't perform combinations. */
2232 static int
2233 cant_combine_insn_p (rtx_insn *insn)
2235 rtx set;
2236 rtx src, dest;
2238 /* If this isn't really an insn, we can't do anything.
2239 This can occur when flow deletes an insn that it has merged into an
2240 auto-increment address. */
2241 if (! INSN_P (insn))
2242 return 1;
2244 /* Never combine loads and stores involving hard regs that are likely
2245 to be spilled. The register allocator can usually handle such
2246 reg-reg moves by tying. If we allow the combiner to make
2247 substitutions of likely-spilled regs, reload might die.
2248 As an exception, we allow combinations involving fixed regs; these are
2249 not available to the register allocator so there's no risk involved. */
2251 set = single_set (insn);
2252 if (! set)
2253 return 0;
2254 src = SET_SRC (set);
2255 dest = SET_DEST (set);
2256 if (GET_CODE (src) == SUBREG)
2257 src = SUBREG_REG (src);
2258 if (GET_CODE (dest) == SUBREG)
2259 dest = SUBREG_REG (dest);
2260 if (REG_P (src) && REG_P (dest)
2261 && ((HARD_REGISTER_P (src)
2262 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2263 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2264 || (HARD_REGISTER_P (dest)
2265 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2266 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2267 return 1;
2269 return 0;
2272 struct likely_spilled_retval_info
2274 unsigned regno, nregs;
2275 unsigned mask;
2278 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2279 hard registers that are known to be written to / clobbered in full. */
2280 static void
2281 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2283 struct likely_spilled_retval_info *const info =
2284 (struct likely_spilled_retval_info *) data;
2285 unsigned regno, nregs;
2286 unsigned new_mask;
2288 if (!REG_P (XEXP (set, 0)))
2289 return;
2290 regno = REGNO (x);
2291 if (regno >= info->regno + info->nregs)
2292 return;
2293 nregs = hard_regno_nregs[regno][GET_MODE (x)];
2294 if (regno + nregs <= info->regno)
2295 return;
2296 new_mask = (2U << (nregs - 1)) - 1;
2297 if (regno < info->regno)
2298 new_mask >>= info->regno - regno;
2299 else
2300 new_mask <<= regno - info->regno;
2301 info->mask &= ~new_mask;
2304 /* Return nonzero iff part of the return value is live during INSN, and
2305 it is likely spilled. This can happen when more than one insn is needed
2306 to copy the return value, e.g. when we consider to combine into the
2307 second copy insn for a complex value. */
2309 static int
2310 likely_spilled_retval_p (rtx_insn *insn)
2312 rtx_insn *use = BB_END (this_basic_block);
2313 rtx reg;
2314 rtx_insn *p;
2315 unsigned regno, nregs;
2316 /* We assume here that no machine mode needs more than
2317 32 hard registers when the value overlaps with a register
2318 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2319 unsigned mask;
2320 struct likely_spilled_retval_info info;
2322 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2323 return 0;
2324 reg = XEXP (PATTERN (use), 0);
2325 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2326 return 0;
2327 regno = REGNO (reg);
2328 nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2329 if (nregs == 1)
2330 return 0;
2331 mask = (2U << (nregs - 1)) - 1;
2333 /* Disregard parts of the return value that are set later. */
2334 info.regno = regno;
2335 info.nregs = nregs;
2336 info.mask = mask;
2337 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2338 if (INSN_P (p))
2339 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2340 mask = info.mask;
2342 /* Check if any of the (probably) live return value registers is
2343 likely spilled. */
2344 nregs --;
2347 if ((mask & 1 << nregs)
2348 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2349 return 1;
2350 } while (nregs--);
2351 return 0;
2354 /* Adjust INSN after we made a change to its destination.
2356 Changing the destination can invalidate notes that say something about
2357 the results of the insn and a LOG_LINK pointing to the insn. */
2359 static void
2360 adjust_for_new_dest (rtx_insn *insn)
2362 /* For notes, be conservative and simply remove them. */
2363 remove_reg_equal_equiv_notes (insn);
2365 /* The new insn will have a destination that was previously the destination
2366 of an insn just above it. Call distribute_links to make a LOG_LINK from
2367 the next use of that destination. */
2369 rtx set = single_set (insn);
2370 gcc_assert (set);
2372 rtx reg = SET_DEST (set);
2374 while (GET_CODE (reg) == ZERO_EXTRACT
2375 || GET_CODE (reg) == STRICT_LOW_PART
2376 || GET_CODE (reg) == SUBREG)
2377 reg = XEXP (reg, 0);
2378 gcc_assert (REG_P (reg));
2380 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2382 df_insn_rescan (insn);
2385 /* Return TRUE if combine can reuse reg X in mode MODE.
2386 ADDED_SETS is nonzero if the original set is still required. */
2387 static bool
2388 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2390 unsigned int regno;
2392 if (!REG_P (x))
2393 return false;
2395 regno = REGNO (x);
2396 /* Allow hard registers if the new mode is legal, and occupies no more
2397 registers than the old mode. */
2398 if (regno < FIRST_PSEUDO_REGISTER)
2399 return (HARD_REGNO_MODE_OK (regno, mode)
2400 && (hard_regno_nregs[regno][GET_MODE (x)]
2401 >= hard_regno_nregs[regno][mode]));
2403 /* Or a pseudo that is only used once. */
2404 return (REG_N_SETS (regno) == 1 && !added_sets
2405 && !REG_USERVAR_P (x));
2409 /* Check whether X, the destination of a set, refers to part of
2410 the register specified by REG. */
2412 static bool
2413 reg_subword_p (rtx x, rtx reg)
2415 /* Check that reg is an integer mode register. */
2416 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2417 return false;
2419 if (GET_CODE (x) == STRICT_LOW_PART
2420 || GET_CODE (x) == ZERO_EXTRACT)
2421 x = XEXP (x, 0);
2423 return GET_CODE (x) == SUBREG
2424 && SUBREG_REG (x) == reg
2425 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2428 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2429 Note that the INSN should be deleted *after* removing dead edges, so
2430 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2431 but not for a (set (pc) (label_ref FOO)). */
2433 static void
2434 update_cfg_for_uncondjump (rtx_insn *insn)
2436 basic_block bb = BLOCK_FOR_INSN (insn);
2437 gcc_assert (BB_END (bb) == insn);
2439 purge_dead_edges (bb);
2441 delete_insn (insn);
2442 if (EDGE_COUNT (bb->succs) == 1)
2444 rtx_insn *insn;
2446 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2448 /* Remove barriers from the footer if there are any. */
2449 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2450 if (BARRIER_P (insn))
2452 if (PREV_INSN (insn))
2453 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2454 else
2455 BB_FOOTER (bb) = NEXT_INSN (insn);
2456 if (NEXT_INSN (insn))
2457 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2459 else if (LABEL_P (insn))
2460 break;
2464 #ifndef HAVE_cc0
2465 /* Return whether INSN is a PARALLEL of exactly N register SETs followed
2466 by an arbitrary number of CLOBBERs. */
2467 static bool
2468 is_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2470 rtx pat = PATTERN (insn);
2472 if (GET_CODE (pat) != PARALLEL)
2473 return false;
2475 int len = XVECLEN (pat, 0);
2476 if (len < n)
2477 return false;
2479 int i;
2480 for (i = 0; i < n; i++)
2481 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2482 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2483 return false;
2484 for ( ; i < len; i++)
2485 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
2486 return false;
2488 return true;
2491 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2492 CLOBBERs), can be split into individual SETs in that order, without
2493 changing semantics. */
2494 static bool
2495 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2497 if (!insn_nothrow_p (insn))
2498 return false;
2500 rtx pat = PATTERN (insn);
2502 int i, j;
2503 for (i = 0; i < n; i++)
2505 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2506 return false;
2508 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2510 for (j = i + 1; j < n; j++)
2511 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2512 return false;
2515 return true;
2517 #endif
2519 /* Try to combine the insns I0, I1 and I2 into I3.
2520 Here I0, I1 and I2 appear earlier than I3.
2521 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2524 If we are combining more than two insns and the resulting insn is not
2525 recognized, try splitting it into two insns. If that happens, I2 and I3
2526 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2527 Otherwise, I0, I1 and I2 are pseudo-deleted.
2529 Return 0 if the combination does not work. Then nothing is changed.
2530 If we did the combination, return the insn at which combine should
2531 resume scanning.
2533 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2534 new direct jump instruction.
2536 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2537 been I3 passed to an earlier try_combine within the same basic
2538 block. */
2540 static rtx_insn *
2541 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2542 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2544 /* New patterns for I3 and I2, respectively. */
2545 rtx newpat, newi2pat = 0;
2546 rtvec newpat_vec_with_clobbers = 0;
2547 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2548 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2549 dead. */
2550 int added_sets_0, added_sets_1, added_sets_2;
2551 /* Total number of SETs to put into I3. */
2552 int total_sets;
2553 /* Nonzero if I2's or I1's body now appears in I3. */
2554 int i2_is_used = 0, i1_is_used = 0;
2555 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2556 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2557 /* Contains I3 if the destination of I3 is used in its source, which means
2558 that the old life of I3 is being killed. If that usage is placed into
2559 I2 and not in I3, a REG_DEAD note must be made. */
2560 rtx i3dest_killed = 0;
2561 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2562 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2563 /* Copy of SET_SRC of I1 and I0, if needed. */
2564 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2565 /* Set if I2DEST was reused as a scratch register. */
2566 bool i2scratch = false;
2567 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2568 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2569 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2570 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2571 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2572 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2573 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2574 /* Notes that must be added to REG_NOTES in I3 and I2. */
2575 rtx new_i3_notes, new_i2_notes;
2576 /* Notes that we substituted I3 into I2 instead of the normal case. */
2577 int i3_subst_into_i2 = 0;
2578 /* Notes that I1, I2 or I3 is a MULT operation. */
2579 int have_mult = 0;
2580 int swap_i2i3 = 0;
2581 int changed_i3_dest = 0;
2583 int maxreg;
2584 rtx_insn *temp_insn;
2585 rtx temp_expr;
2586 struct insn_link *link;
2587 rtx other_pat = 0;
2588 rtx new_other_notes;
2589 int i;
2591 /* Only try four-insn combinations when there's high likelihood of
2592 success. Look for simple insns, such as loads of constants or
2593 binary operations involving a constant. */
2594 if (i0)
2596 int i;
2597 int ngood = 0;
2598 int nshift = 0;
2600 if (!flag_expensive_optimizations)
2601 return 0;
2603 for (i = 0; i < 4; i++)
2605 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2606 rtx set = single_set (insn);
2607 rtx src;
2608 if (!set)
2609 continue;
2610 src = SET_SRC (set);
2611 if (CONSTANT_P (src))
2613 ngood += 2;
2614 break;
2616 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2617 ngood++;
2618 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2619 || GET_CODE (src) == LSHIFTRT)
2620 nshift++;
2622 if (ngood < 2 && nshift < 2)
2623 return 0;
2626 /* Exit early if one of the insns involved can't be used for
2627 combinations. */
2628 if (CALL_P (i2)
2629 || (i1 && CALL_P (i1))
2630 || (i0 && CALL_P (i0))
2631 || cant_combine_insn_p (i3)
2632 || cant_combine_insn_p (i2)
2633 || (i1 && cant_combine_insn_p (i1))
2634 || (i0 && cant_combine_insn_p (i0))
2635 || likely_spilled_retval_p (i3))
2636 return 0;
2638 combine_attempts++;
2639 undobuf.other_insn = 0;
2641 /* Reset the hard register usage information. */
2642 CLEAR_HARD_REG_SET (newpat_used_regs);
2644 if (dump_file && (dump_flags & TDF_DETAILS))
2646 if (i0)
2647 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2648 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2649 else if (i1)
2650 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2651 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2652 else
2653 fprintf (dump_file, "\nTrying %d -> %d:\n",
2654 INSN_UID (i2), INSN_UID (i3));
2657 /* If multiple insns feed into one of I2 or I3, they can be in any
2658 order. To simplify the code below, reorder them in sequence. */
2659 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2660 temp_insn = i2, i2 = i0, i0 = temp_insn;
2661 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2662 temp_insn = i1, i1 = i0, i0 = temp_insn;
2663 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2664 temp_insn = i1, i1 = i2, i2 = temp_insn;
2666 added_links_insn = 0;
2668 /* First check for one important special case that the code below will
2669 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2670 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2671 we may be able to replace that destination with the destination of I3.
2672 This occurs in the common code where we compute both a quotient and
2673 remainder into a structure, in which case we want to do the computation
2674 directly into the structure to avoid register-register copies.
2676 Note that this case handles both multiple sets in I2 and also cases
2677 where I2 has a number of CLOBBERs inside the PARALLEL.
2679 We make very conservative checks below and only try to handle the
2680 most common cases of this. For example, we only handle the case
2681 where I2 and I3 are adjacent to avoid making difficult register
2682 usage tests. */
2684 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2685 && REG_P (SET_SRC (PATTERN (i3)))
2686 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2687 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2688 && GET_CODE (PATTERN (i2)) == PARALLEL
2689 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2690 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2691 below would need to check what is inside (and reg_overlap_mentioned_p
2692 doesn't support those codes anyway). Don't allow those destinations;
2693 the resulting insn isn't likely to be recognized anyway. */
2694 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2695 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2696 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2697 SET_DEST (PATTERN (i3)))
2698 && next_active_insn (i2) == i3)
2700 rtx p2 = PATTERN (i2);
2702 /* Make sure that the destination of I3,
2703 which we are going to substitute into one output of I2,
2704 is not used within another output of I2. We must avoid making this:
2705 (parallel [(set (mem (reg 69)) ...)
2706 (set (reg 69) ...)])
2707 which is not well-defined as to order of actions.
2708 (Besides, reload can't handle output reloads for this.)
2710 The problem can also happen if the dest of I3 is a memory ref,
2711 if another dest in I2 is an indirect memory ref. */
2712 for (i = 0; i < XVECLEN (p2, 0); i++)
2713 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2714 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2715 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2716 SET_DEST (XVECEXP (p2, 0, i))))
2717 break;
2719 if (i == XVECLEN (p2, 0))
2720 for (i = 0; i < XVECLEN (p2, 0); i++)
2721 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2722 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2724 combine_merges++;
2726 subst_insn = i3;
2727 subst_low_luid = DF_INSN_LUID (i2);
2729 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2730 i2src = SET_SRC (XVECEXP (p2, 0, i));
2731 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2732 i2dest_killed = dead_or_set_p (i2, i2dest);
2734 /* Replace the dest in I2 with our dest and make the resulting
2735 insn the new pattern for I3. Then skip to where we validate
2736 the pattern. Everything was set up above. */
2737 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2738 newpat = p2;
2739 i3_subst_into_i2 = 1;
2740 goto validate_replacement;
2744 /* If I2 is setting a pseudo to a constant and I3 is setting some
2745 sub-part of it to another constant, merge them by making a new
2746 constant. */
2747 if (i1 == 0
2748 && (temp_expr = single_set (i2)) != 0
2749 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2750 && GET_CODE (PATTERN (i3)) == SET
2751 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2752 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2754 rtx dest = SET_DEST (PATTERN (i3));
2755 int offset = -1;
2756 int width = 0;
2758 if (GET_CODE (dest) == ZERO_EXTRACT)
2760 if (CONST_INT_P (XEXP (dest, 1))
2761 && CONST_INT_P (XEXP (dest, 2)))
2763 width = INTVAL (XEXP (dest, 1));
2764 offset = INTVAL (XEXP (dest, 2));
2765 dest = XEXP (dest, 0);
2766 if (BITS_BIG_ENDIAN)
2767 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2770 else
2772 if (GET_CODE (dest) == STRICT_LOW_PART)
2773 dest = XEXP (dest, 0);
2774 width = GET_MODE_PRECISION (GET_MODE (dest));
2775 offset = 0;
2778 if (offset >= 0)
2780 /* If this is the low part, we're done. */
2781 if (subreg_lowpart_p (dest))
2783 /* Handle the case where inner is twice the size of outer. */
2784 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2785 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2786 offset += GET_MODE_PRECISION (GET_MODE (dest));
2787 /* Otherwise give up for now. */
2788 else
2789 offset = -1;
2792 if (offset >= 0)
2794 rtx inner = SET_SRC (PATTERN (i3));
2795 rtx outer = SET_SRC (temp_expr);
2797 wide_int o
2798 = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp_expr))),
2799 std::make_pair (inner, GET_MODE (dest)),
2800 offset, width);
2802 combine_merges++;
2803 subst_insn = i3;
2804 subst_low_luid = DF_INSN_LUID (i2);
2805 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2806 i2dest = SET_DEST (temp_expr);
2807 i2dest_killed = dead_or_set_p (i2, i2dest);
2809 /* Replace the source in I2 with the new constant and make the
2810 resulting insn the new pattern for I3. Then skip to where we
2811 validate the pattern. Everything was set up above. */
2812 SUBST (SET_SRC (temp_expr),
2813 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2815 newpat = PATTERN (i2);
2817 /* The dest of I3 has been replaced with the dest of I2. */
2818 changed_i3_dest = 1;
2819 goto validate_replacement;
2823 #ifndef HAVE_cc0
2824 /* If we have no I1 and I2 looks like:
2825 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2826 (set Y OP)])
2827 make up a dummy I1 that is
2828 (set Y OP)
2829 and change I2 to be
2830 (set (reg:CC X) (compare:CC Y (const_int 0)))
2832 (We can ignore any trailing CLOBBERs.)
2834 This undoes a previous combination and allows us to match a branch-and-
2835 decrement insn. */
2837 if (i1 == 0
2838 && is_parallel_of_n_reg_sets (i2, 2)
2839 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2840 == MODE_CC)
2841 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2842 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2843 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2844 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2845 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2846 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2848 /* We make I1 with the same INSN_UID as I2. This gives it
2849 the same DF_INSN_LUID for value tracking. Our fake I1 will
2850 never appear in the insn stream so giving it the same INSN_UID
2851 as I2 will not cause a problem. */
2853 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2854 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2855 -1, NULL_RTX);
2856 INSN_UID (i1) = INSN_UID (i2);
2858 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2859 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2860 SET_DEST (PATTERN (i1)));
2861 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2862 SUBST_LINK (LOG_LINKS (i2),
2863 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2866 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2867 make those two SETs separate I1 and I2 insns, and make an I0 that is
2868 the original I1. */
2869 if (i0 == 0
2870 && is_parallel_of_n_reg_sets (i2, 2)
2871 && can_split_parallel_of_n_reg_sets (i2, 2)
2872 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2873 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2875 /* If there is no I1, there is no I0 either. */
2876 i0 = i1;
2878 /* We make I1 with the same INSN_UID as I2. This gives it
2879 the same DF_INSN_LUID for value tracking. Our fake I1 will
2880 never appear in the insn stream so giving it the same INSN_UID
2881 as I2 will not cause a problem. */
2883 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2884 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2885 -1, NULL_RTX);
2886 INSN_UID (i1) = INSN_UID (i2);
2888 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2890 #endif
2892 /* Verify that I2 and I1 are valid for combining. */
2893 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2894 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2895 &i1dest, &i1src))
2896 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2897 &i0dest, &i0src)))
2899 undo_all ();
2900 return 0;
2903 /* Record whether I2DEST is used in I2SRC and similarly for the other
2904 cases. Knowing this will help in register status updating below. */
2905 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2906 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2907 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2908 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2909 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2910 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2911 i2dest_killed = dead_or_set_p (i2, i2dest);
2912 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2913 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2915 /* For the earlier insns, determine which of the subsequent ones they
2916 feed. */
2917 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2918 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2919 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2920 : (!reg_overlap_mentioned_p (i1dest, i0dest)
2921 && reg_overlap_mentioned_p (i0dest, i2src))));
2923 /* Ensure that I3's pattern can be the destination of combines. */
2924 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2925 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2926 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2927 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2928 &i3dest_killed))
2930 undo_all ();
2931 return 0;
2934 /* See if any of the insns is a MULT operation. Unless one is, we will
2935 reject a combination that is, since it must be slower. Be conservative
2936 here. */
2937 if (GET_CODE (i2src) == MULT
2938 || (i1 != 0 && GET_CODE (i1src) == MULT)
2939 || (i0 != 0 && GET_CODE (i0src) == MULT)
2940 || (GET_CODE (PATTERN (i3)) == SET
2941 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2942 have_mult = 1;
2944 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2945 We used to do this EXCEPT in one case: I3 has a post-inc in an
2946 output operand. However, that exception can give rise to insns like
2947 mov r3,(r3)+
2948 which is a famous insn on the PDP-11 where the value of r3 used as the
2949 source was model-dependent. Avoid this sort of thing. */
2951 #if 0
2952 if (!(GET_CODE (PATTERN (i3)) == SET
2953 && REG_P (SET_SRC (PATTERN (i3)))
2954 && MEM_P (SET_DEST (PATTERN (i3)))
2955 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2956 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2957 /* It's not the exception. */
2958 #endif
2959 #ifdef AUTO_INC_DEC
2961 rtx link;
2962 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2963 if (REG_NOTE_KIND (link) == REG_INC
2964 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2965 || (i1 != 0
2966 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2968 undo_all ();
2969 return 0;
2972 #endif
2974 /* See if the SETs in I1 or I2 need to be kept around in the merged
2975 instruction: whenever the value set there is still needed past I3.
2976 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
2978 For the SET in I1, we have two cases: if I1 and I2 independently feed
2979 into I3, the set in I1 needs to be kept around unless I1DEST dies
2980 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
2981 in I1 needs to be kept around unless I1DEST dies or is set in either
2982 I2 or I3. The same considerations apply to I0. */
2984 added_sets_2 = !dead_or_set_p (i3, i2dest);
2986 if (i1)
2987 added_sets_1 = !(dead_or_set_p (i3, i1dest)
2988 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2989 else
2990 added_sets_1 = 0;
2992 if (i0)
2993 added_sets_0 = !(dead_or_set_p (i3, i0dest)
2994 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
2995 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
2996 && dead_or_set_p (i2, i0dest)));
2997 else
2998 added_sets_0 = 0;
3000 /* We are about to copy insns for the case where they need to be kept
3001 around. Check that they can be copied in the merged instruction. */
3003 if (targetm.cannot_copy_insn_p
3004 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3005 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3006 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3008 undo_all ();
3009 return 0;
3012 /* If the set in I2 needs to be kept around, we must make a copy of
3013 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3014 PATTERN (I2), we are only substituting for the original I1DEST, not into
3015 an already-substituted copy. This also prevents making self-referential
3016 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3017 I2DEST. */
3019 if (added_sets_2)
3021 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3022 i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
3023 else
3024 i2pat = copy_rtx (PATTERN (i2));
3027 if (added_sets_1)
3029 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3030 i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
3031 else
3032 i1pat = copy_rtx (PATTERN (i1));
3035 if (added_sets_0)
3037 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3038 i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
3039 else
3040 i0pat = copy_rtx (PATTERN (i0));
3043 combine_merges++;
3045 /* Substitute in the latest insn for the regs set by the earlier ones. */
3047 maxreg = max_reg_num ();
3049 subst_insn = i3;
3051 #ifndef HAVE_cc0
3052 /* Many machines that don't use CC0 have insns that can both perform an
3053 arithmetic operation and set the condition code. These operations will
3054 be represented as a PARALLEL with the first element of the vector
3055 being a COMPARE of an arithmetic operation with the constant zero.
3056 The second element of the vector will set some pseudo to the result
3057 of the same arithmetic operation. If we simplify the COMPARE, we won't
3058 match such a pattern and so will generate an extra insn. Here we test
3059 for this case, where both the comparison and the operation result are
3060 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3061 I2SRC. Later we will make the PARALLEL that contains I2. */
3063 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3064 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3065 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3066 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3068 rtx newpat_dest;
3069 rtx *cc_use_loc = NULL;
3070 rtx_insn *cc_use_insn = NULL;
3071 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3072 machine_mode compare_mode, orig_compare_mode;
3073 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3075 newpat = PATTERN (i3);
3076 newpat_dest = SET_DEST (newpat);
3077 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3079 if (undobuf.other_insn == 0
3080 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3081 &cc_use_insn)))
3083 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3084 compare_code = simplify_compare_const (compare_code,
3085 GET_MODE (i2dest), op0, &op1);
3086 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3089 /* Do the rest only if op1 is const0_rtx, which may be the
3090 result of simplification. */
3091 if (op1 == const0_rtx)
3093 /* If a single use of the CC is found, prepare to modify it
3094 when SELECT_CC_MODE returns a new CC-class mode, or when
3095 the above simplify_compare_const() returned a new comparison
3096 operator. undobuf.other_insn is assigned the CC use insn
3097 when modifying it. */
3098 if (cc_use_loc)
3100 #ifdef SELECT_CC_MODE
3101 machine_mode new_mode
3102 = SELECT_CC_MODE (compare_code, op0, op1);
3103 if (new_mode != orig_compare_mode
3104 && can_change_dest_mode (SET_DEST (newpat),
3105 added_sets_2, new_mode))
3107 unsigned int regno = REGNO (newpat_dest);
3108 compare_mode = new_mode;
3109 if (regno < FIRST_PSEUDO_REGISTER)
3110 newpat_dest = gen_rtx_REG (compare_mode, regno);
3111 else
3113 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3114 newpat_dest = regno_reg_rtx[regno];
3117 #endif
3118 /* Cases for modifying the CC-using comparison. */
3119 if (compare_code != orig_compare_code
3120 /* ??? Do we need to verify the zero rtx? */
3121 && XEXP (*cc_use_loc, 1) == const0_rtx)
3123 /* Replace cc_use_loc with entire new RTX. */
3124 SUBST (*cc_use_loc,
3125 gen_rtx_fmt_ee (compare_code, compare_mode,
3126 newpat_dest, const0_rtx));
3127 undobuf.other_insn = cc_use_insn;
3129 else if (compare_mode != orig_compare_mode)
3131 /* Just replace the CC reg with a new mode. */
3132 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3133 undobuf.other_insn = cc_use_insn;
3137 /* Now we modify the current newpat:
3138 First, SET_DEST(newpat) is updated if the CC mode has been
3139 altered. For targets without SELECT_CC_MODE, this should be
3140 optimized away. */
3141 if (compare_mode != orig_compare_mode)
3142 SUBST (SET_DEST (newpat), newpat_dest);
3143 /* This is always done to propagate i2src into newpat. */
3144 SUBST (SET_SRC (newpat),
3145 gen_rtx_COMPARE (compare_mode, op0, op1));
3146 /* Create new version of i2pat if needed; the below PARALLEL
3147 creation needs this to work correctly. */
3148 if (! rtx_equal_p (i2src, op0))
3149 i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
3150 i2_is_used = 1;
3153 #endif
3155 if (i2_is_used == 0)
3157 /* It is possible that the source of I2 or I1 may be performing
3158 an unneeded operation, such as a ZERO_EXTEND of something
3159 that is known to have the high part zero. Handle that case
3160 by letting subst look at the inner insns.
3162 Another way to do this would be to have a function that tries
3163 to simplify a single insn instead of merging two or more
3164 insns. We don't do this because of the potential of infinite
3165 loops and because of the potential extra memory required.
3166 However, doing it the way we are is a bit of a kludge and
3167 doesn't catch all cases.
3169 But only do this if -fexpensive-optimizations since it slows
3170 things down and doesn't usually win.
3172 This is not done in the COMPARE case above because the
3173 unmodified I2PAT is used in the PARALLEL and so a pattern
3174 with a modified I2SRC would not match. */
3176 if (flag_expensive_optimizations)
3178 /* Pass pc_rtx so no substitutions are done, just
3179 simplifications. */
3180 if (i1)
3182 subst_low_luid = DF_INSN_LUID (i1);
3183 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3186 subst_low_luid = DF_INSN_LUID (i2);
3187 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3190 n_occurrences = 0; /* `subst' counts here */
3191 subst_low_luid = DF_INSN_LUID (i2);
3193 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3194 copy of I2SRC each time we substitute it, in order to avoid creating
3195 self-referential RTL when we will be substituting I1SRC for I1DEST
3196 later. Likewise if I0 feeds into I2, either directly or indirectly
3197 through I1, and I0DEST is in I0SRC. */
3198 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3199 (i1_feeds_i2_n && i1dest_in_i1src)
3200 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3201 && i0dest_in_i0src));
3202 substed_i2 = 1;
3204 /* Record whether I2's body now appears within I3's body. */
3205 i2_is_used = n_occurrences;
3208 /* If we already got a failure, don't try to do more. Otherwise, try to
3209 substitute I1 if we have it. */
3211 if (i1 && GET_CODE (newpat) != CLOBBER)
3213 /* Check that an autoincrement side-effect on I1 has not been lost.
3214 This happens if I1DEST is mentioned in I2 and dies there, and
3215 has disappeared from the new pattern. */
3216 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3217 && i1_feeds_i2_n
3218 && dead_or_set_p (i2, i1dest)
3219 && !reg_overlap_mentioned_p (i1dest, newpat))
3220 /* Before we can do this substitution, we must redo the test done
3221 above (see detailed comments there) that ensures I1DEST isn't
3222 mentioned in any SETs in NEWPAT that are field assignments. */
3223 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3224 0, 0, 0))
3226 undo_all ();
3227 return 0;
3230 n_occurrences = 0;
3231 subst_low_luid = DF_INSN_LUID (i1);
3233 /* If the following substitution will modify I1SRC, make a copy of it
3234 for the case where it is substituted for I1DEST in I2PAT later. */
3235 if (added_sets_2 && i1_feeds_i2_n)
3236 i1src_copy = copy_rtx (i1src);
3238 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3239 copy of I1SRC each time we substitute it, in order to avoid creating
3240 self-referential RTL when we will be substituting I0SRC for I0DEST
3241 later. */
3242 newpat = subst (newpat, i1dest, i1src, 0, 0,
3243 i0_feeds_i1_n && i0dest_in_i0src);
3244 substed_i1 = 1;
3246 /* Record whether I1's body now appears within I3's body. */
3247 i1_is_used = n_occurrences;
3250 /* Likewise for I0 if we have it. */
3252 if (i0 && GET_CODE (newpat) != CLOBBER)
3254 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3255 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3256 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3257 && !reg_overlap_mentioned_p (i0dest, newpat))
3258 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3259 0, 0, 0))
3261 undo_all ();
3262 return 0;
3265 /* If the following substitution will modify I0SRC, make a copy of it
3266 for the case where it is substituted for I0DEST in I1PAT later. */
3267 if (added_sets_1 && i0_feeds_i1_n)
3268 i0src_copy = copy_rtx (i0src);
3269 /* And a copy for I0DEST in I2PAT substitution. */
3270 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3271 || (i0_feeds_i2_n)))
3272 i0src_copy2 = copy_rtx (i0src);
3274 n_occurrences = 0;
3275 subst_low_luid = DF_INSN_LUID (i0);
3276 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3277 substed_i0 = 1;
3280 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3281 to count all the ways that I2SRC and I1SRC can be used. */
3282 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3283 && i2_is_used + added_sets_2 > 1)
3284 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3285 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3286 > 1))
3287 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3288 && (n_occurrences + added_sets_0
3289 + (added_sets_1 && i0_feeds_i1_n)
3290 + (added_sets_2 && i0_feeds_i2_n)
3291 > 1))
3292 /* Fail if we tried to make a new register. */
3293 || max_reg_num () != maxreg
3294 /* Fail if we couldn't do something and have a CLOBBER. */
3295 || GET_CODE (newpat) == CLOBBER
3296 /* Fail if this new pattern is a MULT and we didn't have one before
3297 at the outer level. */
3298 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3299 && ! have_mult))
3301 undo_all ();
3302 return 0;
3305 /* If the actions of the earlier insns must be kept
3306 in addition to substituting them into the latest one,
3307 we must make a new PARALLEL for the latest insn
3308 to hold additional the SETs. */
3310 if (added_sets_0 || added_sets_1 || added_sets_2)
3312 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3313 combine_extras++;
3315 if (GET_CODE (newpat) == PARALLEL)
3317 rtvec old = XVEC (newpat, 0);
3318 total_sets = XVECLEN (newpat, 0) + extra_sets;
3319 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3320 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3321 sizeof (old->elem[0]) * old->num_elem);
3323 else
3325 rtx old = newpat;
3326 total_sets = 1 + extra_sets;
3327 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3328 XVECEXP (newpat, 0, 0) = old;
3331 if (added_sets_0)
3332 XVECEXP (newpat, 0, --total_sets) = i0pat;
3334 if (added_sets_1)
3336 rtx t = i1pat;
3337 if (i0_feeds_i1_n)
3338 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3340 XVECEXP (newpat, 0, --total_sets) = t;
3342 if (added_sets_2)
3344 rtx t = i2pat;
3345 if (i1_feeds_i2_n)
3346 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3347 i0_feeds_i1_n && i0dest_in_i0src);
3348 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3349 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3351 XVECEXP (newpat, 0, --total_sets) = t;
3355 validate_replacement:
3357 /* Note which hard regs this insn has as inputs. */
3358 mark_used_regs_combine (newpat);
3360 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3361 consider splitting this pattern, we might need these clobbers. */
3362 if (i1 && GET_CODE (newpat) == PARALLEL
3363 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3365 int len = XVECLEN (newpat, 0);
3367 newpat_vec_with_clobbers = rtvec_alloc (len);
3368 for (i = 0; i < len; i++)
3369 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3372 /* We have recognized nothing yet. */
3373 insn_code_number = -1;
3375 /* See if this is a PARALLEL of two SETs where one SET's destination is
3376 a register that is unused and this isn't marked as an instruction that
3377 might trap in an EH region. In that case, we just need the other SET.
3378 We prefer this over the PARALLEL.
3380 This can occur when simplifying a divmod insn. We *must* test for this
3381 case here because the code below that splits two independent SETs doesn't
3382 handle this case correctly when it updates the register status.
3384 It's pointless doing this if we originally had two sets, one from
3385 i3, and one from i2. Combining then splitting the parallel results
3386 in the original i2 again plus an invalid insn (which we delete).
3387 The net effect is only to move instructions around, which makes
3388 debug info less accurate. */
3390 if (!(added_sets_2 && i1 == 0)
3391 && GET_CODE (newpat) == PARALLEL
3392 && XVECLEN (newpat, 0) == 2
3393 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3394 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3395 && asm_noperands (newpat) < 0)
3397 rtx set0 = XVECEXP (newpat, 0, 0);
3398 rtx set1 = XVECEXP (newpat, 0, 1);
3399 rtx oldpat = newpat;
3401 if (((REG_P (SET_DEST (set1))
3402 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3403 || (GET_CODE (SET_DEST (set1)) == SUBREG
3404 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3405 && insn_nothrow_p (i3)
3406 && !side_effects_p (SET_SRC (set1)))
3408 newpat = set0;
3409 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3412 else if (((REG_P (SET_DEST (set0))
3413 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3414 || (GET_CODE (SET_DEST (set0)) == SUBREG
3415 && find_reg_note (i3, REG_UNUSED,
3416 SUBREG_REG (SET_DEST (set0)))))
3417 && insn_nothrow_p (i3)
3418 && !side_effects_p (SET_SRC (set0)))
3420 newpat = set1;
3421 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3423 if (insn_code_number >= 0)
3424 changed_i3_dest = 1;
3427 if (insn_code_number < 0)
3428 newpat = oldpat;
3431 /* Is the result of combination a valid instruction? */
3432 if (insn_code_number < 0)
3433 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3435 /* If we were combining three insns and the result is a simple SET
3436 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3437 insns. There are two ways to do this. It can be split using a
3438 machine-specific method (like when you have an addition of a large
3439 constant) or by combine in the function find_split_point. */
3441 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3442 && asm_noperands (newpat) < 0)
3444 rtx parallel, *split;
3445 rtx_insn *m_split_insn;
3447 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3448 use I2DEST as a scratch register will help. In the latter case,
3449 convert I2DEST to the mode of the source of NEWPAT if we can. */
3451 m_split_insn = combine_split_insns (newpat, i3);
3453 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3454 inputs of NEWPAT. */
3456 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3457 possible to try that as a scratch reg. This would require adding
3458 more code to make it work though. */
3460 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3462 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3464 /* First try to split using the original register as a
3465 scratch register. */
3466 parallel = gen_rtx_PARALLEL (VOIDmode,
3467 gen_rtvec (2, newpat,
3468 gen_rtx_CLOBBER (VOIDmode,
3469 i2dest)));
3470 m_split_insn = combine_split_insns (parallel, i3);
3472 /* If that didn't work, try changing the mode of I2DEST if
3473 we can. */
3474 if (m_split_insn == 0
3475 && new_mode != GET_MODE (i2dest)
3476 && new_mode != VOIDmode
3477 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3479 machine_mode old_mode = GET_MODE (i2dest);
3480 rtx ni2dest;
3482 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3483 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3484 else
3486 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3487 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3490 parallel = (gen_rtx_PARALLEL
3491 (VOIDmode,
3492 gen_rtvec (2, newpat,
3493 gen_rtx_CLOBBER (VOIDmode,
3494 ni2dest))));
3495 m_split_insn = combine_split_insns (parallel, i3);
3497 if (m_split_insn == 0
3498 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3500 struct undo *buf;
3502 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3503 buf = undobuf.undos;
3504 undobuf.undos = buf->next;
3505 buf->next = undobuf.frees;
3506 undobuf.frees = buf;
3510 i2scratch = m_split_insn != 0;
3513 /* If recog_for_combine has discarded clobbers, try to use them
3514 again for the split. */
3515 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3517 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3518 m_split_insn = combine_split_insns (parallel, i3);
3521 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3523 rtx m_split_pat = PATTERN (m_split_insn);
3524 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3525 if (insn_code_number >= 0)
3526 newpat = m_split_pat;
3528 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3529 && (next_nonnote_nondebug_insn (i2) == i3
3530 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3532 rtx i2set, i3set;
3533 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3534 newi2pat = PATTERN (m_split_insn);
3536 i3set = single_set (NEXT_INSN (m_split_insn));
3537 i2set = single_set (m_split_insn);
3539 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3541 /* If I2 or I3 has multiple SETs, we won't know how to track
3542 register status, so don't use these insns. If I2's destination
3543 is used between I2 and I3, we also can't use these insns. */
3545 if (i2_code_number >= 0 && i2set && i3set
3546 && (next_nonnote_nondebug_insn (i2) == i3
3547 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3548 insn_code_number = recog_for_combine (&newi3pat, i3,
3549 &new_i3_notes);
3550 if (insn_code_number >= 0)
3551 newpat = newi3pat;
3553 /* It is possible that both insns now set the destination of I3.
3554 If so, we must show an extra use of it. */
3556 if (insn_code_number >= 0)
3558 rtx new_i3_dest = SET_DEST (i3set);
3559 rtx new_i2_dest = SET_DEST (i2set);
3561 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3562 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3563 || GET_CODE (new_i3_dest) == SUBREG)
3564 new_i3_dest = XEXP (new_i3_dest, 0);
3566 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3567 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3568 || GET_CODE (new_i2_dest) == SUBREG)
3569 new_i2_dest = XEXP (new_i2_dest, 0);
3571 if (REG_P (new_i3_dest)
3572 && REG_P (new_i2_dest)
3573 && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3574 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3578 /* If we can split it and use I2DEST, go ahead and see if that
3579 helps things be recognized. Verify that none of the registers
3580 are set between I2 and I3. */
3581 if (insn_code_number < 0
3582 && (split = find_split_point (&newpat, i3, false)) != 0
3583 #ifdef HAVE_cc0
3584 && REG_P (i2dest)
3585 #endif
3586 /* We need I2DEST in the proper mode. If it is a hard register
3587 or the only use of a pseudo, we can change its mode.
3588 Make sure we don't change a hard register to have a mode that
3589 isn't valid for it, or change the number of registers. */
3590 && (GET_MODE (*split) == GET_MODE (i2dest)
3591 || GET_MODE (*split) == VOIDmode
3592 || can_change_dest_mode (i2dest, added_sets_2,
3593 GET_MODE (*split)))
3594 && (next_nonnote_nondebug_insn (i2) == i3
3595 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3596 /* We can't overwrite I2DEST if its value is still used by
3597 NEWPAT. */
3598 && ! reg_referenced_p (i2dest, newpat))
3600 rtx newdest = i2dest;
3601 enum rtx_code split_code = GET_CODE (*split);
3602 machine_mode split_mode = GET_MODE (*split);
3603 bool subst_done = false;
3604 newi2pat = NULL_RTX;
3606 i2scratch = true;
3608 /* *SPLIT may be part of I2SRC, so make sure we have the
3609 original expression around for later debug processing.
3610 We should not need I2SRC any more in other cases. */
3611 if (MAY_HAVE_DEBUG_INSNS)
3612 i2src = copy_rtx (i2src);
3613 else
3614 i2src = NULL;
3616 /* Get NEWDEST as a register in the proper mode. We have already
3617 validated that we can do this. */
3618 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3620 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3621 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3622 else
3624 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3625 newdest = regno_reg_rtx[REGNO (i2dest)];
3629 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3630 an ASHIFT. This can occur if it was inside a PLUS and hence
3631 appeared to be a memory address. This is a kludge. */
3632 if (split_code == MULT
3633 && CONST_INT_P (XEXP (*split, 1))
3634 && INTVAL (XEXP (*split, 1)) > 0
3635 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3637 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3638 XEXP (*split, 0), GEN_INT (i)));
3639 /* Update split_code because we may not have a multiply
3640 anymore. */
3641 split_code = GET_CODE (*split);
3644 #ifdef INSN_SCHEDULING
3645 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3646 be written as a ZERO_EXTEND. */
3647 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3649 #ifdef LOAD_EXTEND_OP
3650 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3651 what it really is. */
3652 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3653 == SIGN_EXTEND)
3654 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3655 SUBREG_REG (*split)));
3656 else
3657 #endif
3658 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3659 SUBREG_REG (*split)));
3661 #endif
3663 /* Attempt to split binary operators using arithmetic identities. */
3664 if (BINARY_P (SET_SRC (newpat))
3665 && split_mode == GET_MODE (SET_SRC (newpat))
3666 && ! side_effects_p (SET_SRC (newpat)))
3668 rtx setsrc = SET_SRC (newpat);
3669 machine_mode mode = GET_MODE (setsrc);
3670 enum rtx_code code = GET_CODE (setsrc);
3671 rtx src_op0 = XEXP (setsrc, 0);
3672 rtx src_op1 = XEXP (setsrc, 1);
3674 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3675 if (rtx_equal_p (src_op0, src_op1))
3677 newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3678 SUBST (XEXP (setsrc, 0), newdest);
3679 SUBST (XEXP (setsrc, 1), newdest);
3680 subst_done = true;
3682 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3683 else if ((code == PLUS || code == MULT)
3684 && GET_CODE (src_op0) == code
3685 && GET_CODE (XEXP (src_op0, 0)) == code
3686 && (INTEGRAL_MODE_P (mode)
3687 || (FLOAT_MODE_P (mode)
3688 && flag_unsafe_math_optimizations)))
3690 rtx p = XEXP (XEXP (src_op0, 0), 0);
3691 rtx q = XEXP (XEXP (src_op0, 0), 1);
3692 rtx r = XEXP (src_op0, 1);
3693 rtx s = src_op1;
3695 /* Split both "((X op Y) op X) op Y" and
3696 "((X op Y) op Y) op X" as "T op T" where T is
3697 "X op Y". */
3698 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3699 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3701 newi2pat = gen_rtx_SET (VOIDmode, newdest,
3702 XEXP (src_op0, 0));
3703 SUBST (XEXP (setsrc, 0), newdest);
3704 SUBST (XEXP (setsrc, 1), newdest);
3705 subst_done = true;
3707 /* Split "((X op X) op Y) op Y)" as "T op T" where
3708 T is "X op Y". */
3709 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3711 rtx tmp = simplify_gen_binary (code, mode, p, r);
3712 newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3713 SUBST (XEXP (setsrc, 0), newdest);
3714 SUBST (XEXP (setsrc, 1), newdest);
3715 subst_done = true;
3720 if (!subst_done)
3722 newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3723 SUBST (*split, newdest);
3726 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3728 /* recog_for_combine might have added CLOBBERs to newi2pat.
3729 Make sure NEWPAT does not depend on the clobbered regs. */
3730 if (GET_CODE (newi2pat) == PARALLEL)
3731 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3732 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3734 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3735 if (reg_overlap_mentioned_p (reg, newpat))
3737 undo_all ();
3738 return 0;
3742 /* If the split point was a MULT and we didn't have one before,
3743 don't use one now. */
3744 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3745 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3749 /* Check for a case where we loaded from memory in a narrow mode and
3750 then sign extended it, but we need both registers. In that case,
3751 we have a PARALLEL with both loads from the same memory location.
3752 We can split this into a load from memory followed by a register-register
3753 copy. This saves at least one insn, more if register allocation can
3754 eliminate the copy.
3756 We cannot do this if the destination of the first assignment is a
3757 condition code register or cc0. We eliminate this case by making sure
3758 the SET_DEST and SET_SRC have the same mode.
3760 We cannot do this if the destination of the second assignment is
3761 a register that we have already assumed is zero-extended. Similarly
3762 for a SUBREG of such a register. */
3764 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3765 && GET_CODE (newpat) == PARALLEL
3766 && XVECLEN (newpat, 0) == 2
3767 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3768 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3769 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3770 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3771 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3772 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3773 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3774 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3775 DF_INSN_LUID (i2))
3776 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3777 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3778 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3779 (REG_P (temp_expr)
3780 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3781 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3782 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3783 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3784 != GET_MODE_MASK (word_mode))))
3785 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3786 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3787 (REG_P (temp_expr)
3788 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3789 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3790 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3791 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3792 != GET_MODE_MASK (word_mode)))))
3793 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3794 SET_SRC (XVECEXP (newpat, 0, 1)))
3795 && ! find_reg_note (i3, REG_UNUSED,
3796 SET_DEST (XVECEXP (newpat, 0, 0))))
3798 rtx ni2dest;
3800 newi2pat = XVECEXP (newpat, 0, 0);
3801 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3802 newpat = XVECEXP (newpat, 0, 1);
3803 SUBST (SET_SRC (newpat),
3804 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3805 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3807 if (i2_code_number >= 0)
3808 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3810 if (insn_code_number >= 0)
3811 swap_i2i3 = 1;
3814 /* Similarly, check for a case where we have a PARALLEL of two independent
3815 SETs but we started with three insns. In this case, we can do the sets
3816 as two separate insns. This case occurs when some SET allows two
3817 other insns to combine, but the destination of that SET is still live.
3819 Also do this if we started with two insns and (at least) one of the
3820 resulting sets is a noop; this noop will be deleted later. */
3822 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3823 && GET_CODE (newpat) == PARALLEL
3824 && XVECLEN (newpat, 0) == 2
3825 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3826 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3827 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3828 || set_noop_p (XVECEXP (newpat, 0, 1)))
3829 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3830 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3831 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3832 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3833 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3834 XVECEXP (newpat, 0, 0))
3835 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3836 XVECEXP (newpat, 0, 1))
3837 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3838 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3840 rtx set0 = XVECEXP (newpat, 0, 0);
3841 rtx set1 = XVECEXP (newpat, 0, 1);
3843 /* Normally, it doesn't matter which of the two is done first,
3844 but the one that references cc0 can't be the second, and
3845 one which uses any regs/memory set in between i2 and i3 can't
3846 be first. The PARALLEL might also have been pre-existing in i3,
3847 so we need to make sure that we won't wrongly hoist a SET to i2
3848 that would conflict with a death note present in there. */
3849 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3850 && !(REG_P (SET_DEST (set1))
3851 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3852 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3853 && find_reg_note (i2, REG_DEAD,
3854 SUBREG_REG (SET_DEST (set1))))
3855 #ifdef HAVE_cc0
3856 && !reg_referenced_p (cc0_rtx, set0)
3857 #endif
3858 /* If I3 is a jump, ensure that set0 is a jump so that
3859 we do not create invalid RTL. */
3860 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3863 newi2pat = set1;
3864 newpat = set0;
3866 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3867 && !(REG_P (SET_DEST (set0))
3868 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3869 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3870 && find_reg_note (i2, REG_DEAD,
3871 SUBREG_REG (SET_DEST (set0))))
3872 #ifdef HAVE_cc0
3873 && !reg_referenced_p (cc0_rtx, set1)
3874 #endif
3875 /* If I3 is a jump, ensure that set1 is a jump so that
3876 we do not create invalid RTL. */
3877 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3880 newi2pat = set0;
3881 newpat = set1;
3883 else
3885 undo_all ();
3886 return 0;
3889 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3891 if (i2_code_number >= 0)
3893 /* recog_for_combine might have added CLOBBERs to newi2pat.
3894 Make sure NEWPAT does not depend on the clobbered regs. */
3895 if (GET_CODE (newi2pat) == PARALLEL)
3897 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3898 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3900 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3901 if (reg_overlap_mentioned_p (reg, newpat))
3903 undo_all ();
3904 return 0;
3909 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3913 /* If it still isn't recognized, fail and change things back the way they
3914 were. */
3915 if ((insn_code_number < 0
3916 /* Is the result a reasonable ASM_OPERANDS? */
3917 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3919 undo_all ();
3920 return 0;
3923 /* If we had to change another insn, make sure it is valid also. */
3924 if (undobuf.other_insn)
3926 CLEAR_HARD_REG_SET (newpat_used_regs);
3928 other_pat = PATTERN (undobuf.other_insn);
3929 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3930 &new_other_notes);
3932 if (other_code_number < 0 && ! check_asm_operands (other_pat))
3934 undo_all ();
3935 return 0;
3939 #ifdef HAVE_cc0
3940 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3941 they are adjacent to each other or not. */
3943 rtx_insn *p = prev_nonnote_insn (i3);
3944 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3945 && sets_cc0_p (newi2pat))
3947 undo_all ();
3948 return 0;
3951 #endif
3953 /* Only allow this combination if insn_rtx_costs reports that the
3954 replacement instructions are cheaper than the originals. */
3955 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3957 undo_all ();
3958 return 0;
3961 if (MAY_HAVE_DEBUG_INSNS)
3963 struct undo *undo;
3965 for (undo = undobuf.undos; undo; undo = undo->next)
3966 if (undo->kind == UNDO_MODE)
3968 rtx reg = *undo->where.r;
3969 machine_mode new_mode = GET_MODE (reg);
3970 machine_mode old_mode = undo->old_contents.m;
3972 /* Temporarily revert mode back. */
3973 adjust_reg_mode (reg, old_mode);
3975 if (reg == i2dest && i2scratch)
3977 /* If we used i2dest as a scratch register with a
3978 different mode, substitute it for the original
3979 i2src while its original mode is temporarily
3980 restored, and then clear i2scratch so that we don't
3981 do it again later. */
3982 propagate_for_debug (i2, last_combined_insn, reg, i2src,
3983 this_basic_block);
3984 i2scratch = false;
3985 /* Put back the new mode. */
3986 adjust_reg_mode (reg, new_mode);
3988 else
3990 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3991 rtx_insn *first, *last;
3993 if (reg == i2dest)
3995 first = i2;
3996 last = last_combined_insn;
3998 else
4000 first = i3;
4001 last = undobuf.other_insn;
4002 gcc_assert (last);
4003 if (DF_INSN_LUID (last)
4004 < DF_INSN_LUID (last_combined_insn))
4005 last = last_combined_insn;
4008 /* We're dealing with a reg that changed mode but not
4009 meaning, so we want to turn it into a subreg for
4010 the new mode. However, because of REG sharing and
4011 because its mode had already changed, we have to do
4012 it in two steps. First, replace any debug uses of
4013 reg, with its original mode temporarily restored,
4014 with this copy we have created; then, replace the
4015 copy with the SUBREG of the original shared reg,
4016 once again changed to the new mode. */
4017 propagate_for_debug (first, last, reg, tempreg,
4018 this_basic_block);
4019 adjust_reg_mode (reg, new_mode);
4020 propagate_for_debug (first, last, tempreg,
4021 lowpart_subreg (old_mode, reg, new_mode),
4022 this_basic_block);
4027 /* If we will be able to accept this, we have made a
4028 change to the destination of I3. This requires us to
4029 do a few adjustments. */
4031 if (changed_i3_dest)
4033 PATTERN (i3) = newpat;
4034 adjust_for_new_dest (i3);
4037 /* We now know that we can do this combination. Merge the insns and
4038 update the status of registers and LOG_LINKS. */
4040 if (undobuf.other_insn)
4042 rtx note, next;
4044 PATTERN (undobuf.other_insn) = other_pat;
4046 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4047 ensure that they are still valid. Then add any non-duplicate
4048 notes added by recog_for_combine. */
4049 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4051 next = XEXP (note, 1);
4053 if ((REG_NOTE_KIND (note) == REG_DEAD
4054 && !reg_referenced_p (XEXP (note, 0),
4055 PATTERN (undobuf.other_insn)))
4056 ||(REG_NOTE_KIND (note) == REG_UNUSED
4057 && !reg_set_p (XEXP (note, 0),
4058 PATTERN (undobuf.other_insn))))
4059 remove_note (undobuf.other_insn, note);
4062 distribute_notes (new_other_notes, undobuf.other_insn,
4063 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4064 NULL_RTX);
4067 if (swap_i2i3)
4069 rtx_insn *insn;
4070 struct insn_link *link;
4071 rtx ni2dest;
4073 /* I3 now uses what used to be its destination and which is now
4074 I2's destination. This requires us to do a few adjustments. */
4075 PATTERN (i3) = newpat;
4076 adjust_for_new_dest (i3);
4078 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4079 so we still will.
4081 However, some later insn might be using I2's dest and have
4082 a LOG_LINK pointing at I3. We must remove this link.
4083 The simplest way to remove the link is to point it at I1,
4084 which we know will be a NOTE. */
4086 /* newi2pat is usually a SET here; however, recog_for_combine might
4087 have added some clobbers. */
4088 if (GET_CODE (newi2pat) == PARALLEL)
4089 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4090 else
4091 ni2dest = SET_DEST (newi2pat);
4093 for (insn = NEXT_INSN (i3);
4094 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4095 || insn != BB_HEAD (this_basic_block->next_bb));
4096 insn = NEXT_INSN (insn))
4098 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
4100 FOR_EACH_LOG_LINK (link, insn)
4101 if (link->insn == i3)
4102 link->insn = i1;
4104 break;
4110 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4111 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4112 rtx midnotes = 0;
4113 int from_luid;
4114 /* Compute which registers we expect to eliminate. newi2pat may be setting
4115 either i3dest or i2dest, so we must check it. Also, i1dest may be the
4116 same as i3dest, in which case newi2pat may be setting i1dest. */
4117 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4118 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4119 || !i2dest_killed
4120 ? 0 : i2dest);
4121 rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4122 || (newi2pat && reg_set_p (i1dest, newi2pat))
4123 || !i1dest_killed
4124 ? 0 : i1dest);
4125 rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
4126 || (newi2pat && reg_set_p (i0dest, newi2pat))
4127 || !i0dest_killed
4128 ? 0 : i0dest);
4130 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4131 clear them. */
4132 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4133 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4134 if (i1)
4135 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4136 if (i0)
4137 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4139 /* Ensure that we do not have something that should not be shared but
4140 occurs multiple times in the new insns. Check this by first
4141 resetting all the `used' flags and then copying anything is shared. */
4143 reset_used_flags (i3notes);
4144 reset_used_flags (i2notes);
4145 reset_used_flags (i1notes);
4146 reset_used_flags (i0notes);
4147 reset_used_flags (newpat);
4148 reset_used_flags (newi2pat);
4149 if (undobuf.other_insn)
4150 reset_used_flags (PATTERN (undobuf.other_insn));
4152 i3notes = copy_rtx_if_shared (i3notes);
4153 i2notes = copy_rtx_if_shared (i2notes);
4154 i1notes = copy_rtx_if_shared (i1notes);
4155 i0notes = copy_rtx_if_shared (i0notes);
4156 newpat = copy_rtx_if_shared (newpat);
4157 newi2pat = copy_rtx_if_shared (newi2pat);
4158 if (undobuf.other_insn)
4159 reset_used_flags (PATTERN (undobuf.other_insn));
4161 INSN_CODE (i3) = insn_code_number;
4162 PATTERN (i3) = newpat;
4164 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4166 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4168 reset_used_flags (call_usage);
4169 call_usage = copy_rtx (call_usage);
4171 if (substed_i2)
4173 /* I2SRC must still be meaningful at this point. Some splitting
4174 operations can invalidate I2SRC, but those operations do not
4175 apply to calls. */
4176 gcc_assert (i2src);
4177 replace_rtx (call_usage, i2dest, i2src);
4180 if (substed_i1)
4181 replace_rtx (call_usage, i1dest, i1src);
4182 if (substed_i0)
4183 replace_rtx (call_usage, i0dest, i0src);
4185 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4188 if (undobuf.other_insn)
4189 INSN_CODE (undobuf.other_insn) = other_code_number;
4191 /* We had one special case above where I2 had more than one set and
4192 we replaced a destination of one of those sets with the destination
4193 of I3. In that case, we have to update LOG_LINKS of insns later
4194 in this basic block. Note that this (expensive) case is rare.
4196 Also, in this case, we must pretend that all REG_NOTEs for I2
4197 actually came from I3, so that REG_UNUSED notes from I2 will be
4198 properly handled. */
4200 if (i3_subst_into_i2)
4202 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4203 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4204 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4205 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4206 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4207 && ! find_reg_note (i2, REG_UNUSED,
4208 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4209 for (temp_insn = NEXT_INSN (i2);
4210 temp_insn
4211 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4212 || BB_HEAD (this_basic_block) != temp_insn);
4213 temp_insn = NEXT_INSN (temp_insn))
4214 if (temp_insn != i3 && INSN_P (temp_insn))
4215 FOR_EACH_LOG_LINK (link, temp_insn)
4216 if (link->insn == i2)
4217 link->insn = i3;
4219 if (i3notes)
4221 rtx link = i3notes;
4222 while (XEXP (link, 1))
4223 link = XEXP (link, 1);
4224 XEXP (link, 1) = i2notes;
4226 else
4227 i3notes = i2notes;
4228 i2notes = 0;
4231 LOG_LINKS (i3) = NULL;
4232 REG_NOTES (i3) = 0;
4233 LOG_LINKS (i2) = NULL;
4234 REG_NOTES (i2) = 0;
4236 if (newi2pat)
4238 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4239 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4240 this_basic_block);
4241 INSN_CODE (i2) = i2_code_number;
4242 PATTERN (i2) = newi2pat;
4244 else
4246 if (MAY_HAVE_DEBUG_INSNS && i2src)
4247 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4248 this_basic_block);
4249 SET_INSN_DELETED (i2);
4252 if (i1)
4254 LOG_LINKS (i1) = NULL;
4255 REG_NOTES (i1) = 0;
4256 if (MAY_HAVE_DEBUG_INSNS)
4257 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4258 this_basic_block);
4259 SET_INSN_DELETED (i1);
4262 if (i0)
4264 LOG_LINKS (i0) = NULL;
4265 REG_NOTES (i0) = 0;
4266 if (MAY_HAVE_DEBUG_INSNS)
4267 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4268 this_basic_block);
4269 SET_INSN_DELETED (i0);
4272 /* Get death notes for everything that is now used in either I3 or
4273 I2 and used to die in a previous insn. If we built two new
4274 patterns, move from I1 to I2 then I2 to I3 so that we get the
4275 proper movement on registers that I2 modifies. */
4277 if (i0)
4278 from_luid = DF_INSN_LUID (i0);
4279 else if (i1)
4280 from_luid = DF_INSN_LUID (i1);
4281 else
4282 from_luid = DF_INSN_LUID (i2);
4283 if (newi2pat)
4284 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4285 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4287 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4288 if (i3notes)
4289 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4290 elim_i2, elim_i1, elim_i0);
4291 if (i2notes)
4292 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4293 elim_i2, elim_i1, elim_i0);
4294 if (i1notes)
4295 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4296 elim_i2, elim_i1, elim_i0);
4297 if (i0notes)
4298 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4299 elim_i2, elim_i1, elim_i0);
4300 if (midnotes)
4301 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4302 elim_i2, elim_i1, elim_i0);
4304 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4305 know these are REG_UNUSED and want them to go to the desired insn,
4306 so we always pass it as i3. */
4308 if (newi2pat && new_i2_notes)
4309 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4310 NULL_RTX);
4312 if (new_i3_notes)
4313 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4314 NULL_RTX);
4316 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4317 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4318 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4319 in that case, it might delete I2. Similarly for I2 and I1.
4320 Show an additional death due to the REG_DEAD note we make here. If
4321 we discard it in distribute_notes, we will decrement it again. */
4323 if (i3dest_killed)
4325 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4326 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4327 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4328 elim_i1, elim_i0);
4329 else
4330 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4331 elim_i2, elim_i1, elim_i0);
4334 if (i2dest_in_i2src)
4336 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4337 if (newi2pat && reg_set_p (i2dest, newi2pat))
4338 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4339 NULL_RTX, NULL_RTX);
4340 else
4341 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4342 NULL_RTX, NULL_RTX, NULL_RTX);
4345 if (i1dest_in_i1src)
4347 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4348 if (newi2pat && reg_set_p (i1dest, newi2pat))
4349 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4350 NULL_RTX, NULL_RTX);
4351 else
4352 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4353 NULL_RTX, NULL_RTX, NULL_RTX);
4356 if (i0dest_in_i0src)
4358 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4359 if (newi2pat && reg_set_p (i0dest, newi2pat))
4360 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4361 NULL_RTX, NULL_RTX);
4362 else
4363 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4364 NULL_RTX, NULL_RTX, NULL_RTX);
4367 distribute_links (i3links);
4368 distribute_links (i2links);
4369 distribute_links (i1links);
4370 distribute_links (i0links);
4372 if (REG_P (i2dest))
4374 struct insn_link *link;
4375 rtx_insn *i2_insn = 0;
4376 rtx i2_val = 0, set;
4378 /* The insn that used to set this register doesn't exist, and
4379 this life of the register may not exist either. See if one of
4380 I3's links points to an insn that sets I2DEST. If it does,
4381 that is now the last known value for I2DEST. If we don't update
4382 this and I2 set the register to a value that depended on its old
4383 contents, we will get confused. If this insn is used, thing
4384 will be set correctly in combine_instructions. */
4385 FOR_EACH_LOG_LINK (link, i3)
4386 if ((set = single_set (link->insn)) != 0
4387 && rtx_equal_p (i2dest, SET_DEST (set)))
4388 i2_insn = link->insn, i2_val = SET_SRC (set);
4390 record_value_for_reg (i2dest, i2_insn, i2_val);
4392 /* If the reg formerly set in I2 died only once and that was in I3,
4393 zero its use count so it won't make `reload' do any work. */
4394 if (! added_sets_2
4395 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4396 && ! i2dest_in_i2src)
4397 INC_REG_N_SETS (REGNO (i2dest), -1);
4400 if (i1 && REG_P (i1dest))
4402 struct insn_link *link;
4403 rtx_insn *i1_insn = 0;
4404 rtx i1_val = 0, set;
4406 FOR_EACH_LOG_LINK (link, i3)
4407 if ((set = single_set (link->insn)) != 0
4408 && rtx_equal_p (i1dest, SET_DEST (set)))
4409 i1_insn = link->insn, i1_val = SET_SRC (set);
4411 record_value_for_reg (i1dest, i1_insn, i1_val);
4413 if (! added_sets_1 && ! i1dest_in_i1src)
4414 INC_REG_N_SETS (REGNO (i1dest), -1);
4417 if (i0 && REG_P (i0dest))
4419 struct insn_link *link;
4420 rtx_insn *i0_insn = 0;
4421 rtx i0_val = 0, set;
4423 FOR_EACH_LOG_LINK (link, i3)
4424 if ((set = single_set (link->insn)) != 0
4425 && rtx_equal_p (i0dest, SET_DEST (set)))
4426 i0_insn = link->insn, i0_val = SET_SRC (set);
4428 record_value_for_reg (i0dest, i0_insn, i0_val);
4430 if (! added_sets_0 && ! i0dest_in_i0src)
4431 INC_REG_N_SETS (REGNO (i0dest), -1);
4434 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4435 been made to this insn. The order is important, because newi2pat
4436 can affect nonzero_bits of newpat. */
4437 if (newi2pat)
4438 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4439 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4442 if (undobuf.other_insn != NULL_RTX)
4444 if (dump_file)
4446 fprintf (dump_file, "modifying other_insn ");
4447 dump_insn_slim (dump_file, undobuf.other_insn);
4449 df_insn_rescan (undobuf.other_insn);
4452 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4454 if (dump_file)
4456 fprintf (dump_file, "modifying insn i0 ");
4457 dump_insn_slim (dump_file, i0);
4459 df_insn_rescan (i0);
4462 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4464 if (dump_file)
4466 fprintf (dump_file, "modifying insn i1 ");
4467 dump_insn_slim (dump_file, i1);
4469 df_insn_rescan (i1);
4472 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4474 if (dump_file)
4476 fprintf (dump_file, "modifying insn i2 ");
4477 dump_insn_slim (dump_file, i2);
4479 df_insn_rescan (i2);
4482 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4484 if (dump_file)
4486 fprintf (dump_file, "modifying insn i3 ");
4487 dump_insn_slim (dump_file, i3);
4489 df_insn_rescan (i3);
4492 /* Set new_direct_jump_p if a new return or simple jump instruction
4493 has been created. Adjust the CFG accordingly. */
4494 if (returnjump_p (i3) || any_uncondjump_p (i3))
4496 *new_direct_jump_p = 1;
4497 mark_jump_label (PATTERN (i3), i3, 0);
4498 update_cfg_for_uncondjump (i3);
4501 if (undobuf.other_insn != NULL_RTX
4502 && (returnjump_p (undobuf.other_insn)
4503 || any_uncondjump_p (undobuf.other_insn)))
4505 *new_direct_jump_p = 1;
4506 update_cfg_for_uncondjump (undobuf.other_insn);
4509 /* A noop might also need cleaning up of CFG, if it comes from the
4510 simplification of a jump. */
4511 if (JUMP_P (i3)
4512 && GET_CODE (newpat) == SET
4513 && SET_SRC (newpat) == pc_rtx
4514 && SET_DEST (newpat) == pc_rtx)
4516 *new_direct_jump_p = 1;
4517 update_cfg_for_uncondjump (i3);
4520 if (undobuf.other_insn != NULL_RTX
4521 && JUMP_P (undobuf.other_insn)
4522 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4523 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4524 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4526 *new_direct_jump_p = 1;
4527 update_cfg_for_uncondjump (undobuf.other_insn);
4530 combine_successes++;
4531 undo_commit ();
4533 if (added_links_insn
4534 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4535 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4536 return added_links_insn;
4537 else
4538 return newi2pat ? i2 : i3;
4541 /* Undo all the modifications recorded in undobuf. */
4543 static void
4544 undo_all (void)
4546 struct undo *undo, *next;
4548 for (undo = undobuf.undos; undo; undo = next)
4550 next = undo->next;
4551 switch (undo->kind)
4553 case UNDO_RTX:
4554 *undo->where.r = undo->old_contents.r;
4555 break;
4556 case UNDO_INT:
4557 *undo->where.i = undo->old_contents.i;
4558 break;
4559 case UNDO_MODE:
4560 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4561 break;
4562 case UNDO_LINKS:
4563 *undo->where.l = undo->old_contents.l;
4564 break;
4565 default:
4566 gcc_unreachable ();
4569 undo->next = undobuf.frees;
4570 undobuf.frees = undo;
4573 undobuf.undos = 0;
4576 /* We've committed to accepting the changes we made. Move all
4577 of the undos to the free list. */
4579 static void
4580 undo_commit (void)
4582 struct undo *undo, *next;
4584 for (undo = undobuf.undos; undo; undo = next)
4586 next = undo->next;
4587 undo->next = undobuf.frees;
4588 undobuf.frees = undo;
4590 undobuf.undos = 0;
4593 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4594 where we have an arithmetic expression and return that point. LOC will
4595 be inside INSN.
4597 try_combine will call this function to see if an insn can be split into
4598 two insns. */
4600 static rtx *
4601 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4603 rtx x = *loc;
4604 enum rtx_code code = GET_CODE (x);
4605 rtx *split;
4606 unsigned HOST_WIDE_INT len = 0;
4607 HOST_WIDE_INT pos = 0;
4608 int unsignedp = 0;
4609 rtx inner = NULL_RTX;
4611 /* First special-case some codes. */
4612 switch (code)
4614 case SUBREG:
4615 #ifdef INSN_SCHEDULING
4616 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4617 point. */
4618 if (MEM_P (SUBREG_REG (x)))
4619 return loc;
4620 #endif
4621 return find_split_point (&SUBREG_REG (x), insn, false);
4623 case MEM:
4624 #ifdef HAVE_lo_sum
4625 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4626 using LO_SUM and HIGH. */
4627 if (GET_CODE (XEXP (x, 0)) == CONST
4628 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4630 machine_mode address_mode = get_address_mode (x);
4632 SUBST (XEXP (x, 0),
4633 gen_rtx_LO_SUM (address_mode,
4634 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4635 XEXP (x, 0)));
4636 return &XEXP (XEXP (x, 0), 0);
4638 #endif
4640 /* If we have a PLUS whose second operand is a constant and the
4641 address is not valid, perhaps will can split it up using
4642 the machine-specific way to split large constants. We use
4643 the first pseudo-reg (one of the virtual regs) as a placeholder;
4644 it will not remain in the result. */
4645 if (GET_CODE (XEXP (x, 0)) == PLUS
4646 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4647 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4648 MEM_ADDR_SPACE (x)))
4650 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4651 rtx_insn *seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4652 XEXP (x, 0)),
4653 subst_insn);
4655 /* This should have produced two insns, each of which sets our
4656 placeholder. If the source of the second is a valid address,
4657 we can make put both sources together and make a split point
4658 in the middle. */
4660 if (seq
4661 && NEXT_INSN (seq) != NULL_RTX
4662 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4663 && NONJUMP_INSN_P (seq)
4664 && GET_CODE (PATTERN (seq)) == SET
4665 && SET_DEST (PATTERN (seq)) == reg
4666 && ! reg_mentioned_p (reg,
4667 SET_SRC (PATTERN (seq)))
4668 && NONJUMP_INSN_P (NEXT_INSN (seq))
4669 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4670 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4671 && memory_address_addr_space_p
4672 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4673 MEM_ADDR_SPACE (x)))
4675 rtx src1 = SET_SRC (PATTERN (seq));
4676 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4678 /* Replace the placeholder in SRC2 with SRC1. If we can
4679 find where in SRC2 it was placed, that can become our
4680 split point and we can replace this address with SRC2.
4681 Just try two obvious places. */
4683 src2 = replace_rtx (src2, reg, src1);
4684 split = 0;
4685 if (XEXP (src2, 0) == src1)
4686 split = &XEXP (src2, 0);
4687 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4688 && XEXP (XEXP (src2, 0), 0) == src1)
4689 split = &XEXP (XEXP (src2, 0), 0);
4691 if (split)
4693 SUBST (XEXP (x, 0), src2);
4694 return split;
4698 /* If that didn't work, perhaps the first operand is complex and
4699 needs to be computed separately, so make a split point there.
4700 This will occur on machines that just support REG + CONST
4701 and have a constant moved through some previous computation. */
4703 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4704 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4705 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4706 return &XEXP (XEXP (x, 0), 0);
4709 /* If we have a PLUS whose first operand is complex, try computing it
4710 separately by making a split there. */
4711 if (GET_CODE (XEXP (x, 0)) == PLUS
4712 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4713 MEM_ADDR_SPACE (x))
4714 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4715 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4716 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4717 return &XEXP (XEXP (x, 0), 0);
4718 break;
4720 case SET:
4721 #ifdef HAVE_cc0
4722 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4723 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4724 we need to put the operand into a register. So split at that
4725 point. */
4727 if (SET_DEST (x) == cc0_rtx
4728 && GET_CODE (SET_SRC (x)) != COMPARE
4729 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4730 && !OBJECT_P (SET_SRC (x))
4731 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4732 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4733 return &SET_SRC (x);
4734 #endif
4736 /* See if we can split SET_SRC as it stands. */
4737 split = find_split_point (&SET_SRC (x), insn, true);
4738 if (split && split != &SET_SRC (x))
4739 return split;
4741 /* See if we can split SET_DEST as it stands. */
4742 split = find_split_point (&SET_DEST (x), insn, false);
4743 if (split && split != &SET_DEST (x))
4744 return split;
4746 /* See if this is a bitfield assignment with everything constant. If
4747 so, this is an IOR of an AND, so split it into that. */
4748 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4749 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4750 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4751 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4752 && CONST_INT_P (SET_SRC (x))
4753 && ((INTVAL (XEXP (SET_DEST (x), 1))
4754 + INTVAL (XEXP (SET_DEST (x), 2)))
4755 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4756 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4758 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4759 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4760 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4761 rtx dest = XEXP (SET_DEST (x), 0);
4762 machine_mode mode = GET_MODE (dest);
4763 unsigned HOST_WIDE_INT mask
4764 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4765 rtx or_mask;
4767 if (BITS_BIG_ENDIAN)
4768 pos = GET_MODE_PRECISION (mode) - len - pos;
4770 or_mask = gen_int_mode (src << pos, mode);
4771 if (src == mask)
4772 SUBST (SET_SRC (x),
4773 simplify_gen_binary (IOR, mode, dest, or_mask));
4774 else
4776 rtx negmask = gen_int_mode (~(mask << pos), mode);
4777 SUBST (SET_SRC (x),
4778 simplify_gen_binary (IOR, mode,
4779 simplify_gen_binary (AND, mode,
4780 dest, negmask),
4781 or_mask));
4784 SUBST (SET_DEST (x), dest);
4786 split = find_split_point (&SET_SRC (x), insn, true);
4787 if (split && split != &SET_SRC (x))
4788 return split;
4791 /* Otherwise, see if this is an operation that we can split into two.
4792 If so, try to split that. */
4793 code = GET_CODE (SET_SRC (x));
4795 switch (code)
4797 case AND:
4798 /* If we are AND'ing with a large constant that is only a single
4799 bit and the result is only being used in a context where we
4800 need to know if it is zero or nonzero, replace it with a bit
4801 extraction. This will avoid the large constant, which might
4802 have taken more than one insn to make. If the constant were
4803 not a valid argument to the AND but took only one insn to make,
4804 this is no worse, but if it took more than one insn, it will
4805 be better. */
4807 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4808 && REG_P (XEXP (SET_SRC (x), 0))
4809 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4810 && REG_P (SET_DEST (x))
4811 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4812 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4813 && XEXP (*split, 0) == SET_DEST (x)
4814 && XEXP (*split, 1) == const0_rtx)
4816 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4817 XEXP (SET_SRC (x), 0),
4818 pos, NULL_RTX, 1, 1, 0, 0);
4819 if (extraction != 0)
4821 SUBST (SET_SRC (x), extraction);
4822 return find_split_point (loc, insn, false);
4825 break;
4827 case NE:
4828 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4829 is known to be on, this can be converted into a NEG of a shift. */
4830 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4831 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4832 && 1 <= (pos = exact_log2
4833 (nonzero_bits (XEXP (SET_SRC (x), 0),
4834 GET_MODE (XEXP (SET_SRC (x), 0))))))
4836 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4838 SUBST (SET_SRC (x),
4839 gen_rtx_NEG (mode,
4840 gen_rtx_LSHIFTRT (mode,
4841 XEXP (SET_SRC (x), 0),
4842 GEN_INT (pos))));
4844 split = find_split_point (&SET_SRC (x), insn, true);
4845 if (split && split != &SET_SRC (x))
4846 return split;
4848 break;
4850 case SIGN_EXTEND:
4851 inner = XEXP (SET_SRC (x), 0);
4853 /* We can't optimize if either mode is a partial integer
4854 mode as we don't know how many bits are significant
4855 in those modes. */
4856 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4857 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4858 break;
4860 pos = 0;
4861 len = GET_MODE_PRECISION (GET_MODE (inner));
4862 unsignedp = 0;
4863 break;
4865 case SIGN_EXTRACT:
4866 case ZERO_EXTRACT:
4867 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4868 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4870 inner = XEXP (SET_SRC (x), 0);
4871 len = INTVAL (XEXP (SET_SRC (x), 1));
4872 pos = INTVAL (XEXP (SET_SRC (x), 2));
4874 if (BITS_BIG_ENDIAN)
4875 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
4876 unsignedp = (code == ZERO_EXTRACT);
4878 break;
4880 default:
4881 break;
4884 if (len && pos >= 0
4885 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
4887 machine_mode mode = GET_MODE (SET_SRC (x));
4889 /* For unsigned, we have a choice of a shift followed by an
4890 AND or two shifts. Use two shifts for field sizes where the
4891 constant might be too large. We assume here that we can
4892 always at least get 8-bit constants in an AND insn, which is
4893 true for every current RISC. */
4895 if (unsignedp && len <= 8)
4897 unsigned HOST_WIDE_INT mask
4898 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4899 SUBST (SET_SRC (x),
4900 gen_rtx_AND (mode,
4901 gen_rtx_LSHIFTRT
4902 (mode, gen_lowpart (mode, inner),
4903 GEN_INT (pos)),
4904 gen_int_mode (mask, mode)));
4906 split = find_split_point (&SET_SRC (x), insn, true);
4907 if (split && split != &SET_SRC (x))
4908 return split;
4910 else
4912 SUBST (SET_SRC (x),
4913 gen_rtx_fmt_ee
4914 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4915 gen_rtx_ASHIFT (mode,
4916 gen_lowpart (mode, inner),
4917 GEN_INT (GET_MODE_PRECISION (mode)
4918 - len - pos)),
4919 GEN_INT (GET_MODE_PRECISION (mode) - len)));
4921 split = find_split_point (&SET_SRC (x), insn, true);
4922 if (split && split != &SET_SRC (x))
4923 return split;
4927 /* See if this is a simple operation with a constant as the second
4928 operand. It might be that this constant is out of range and hence
4929 could be used as a split point. */
4930 if (BINARY_P (SET_SRC (x))
4931 && CONSTANT_P (XEXP (SET_SRC (x), 1))
4932 && (OBJECT_P (XEXP (SET_SRC (x), 0))
4933 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4934 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4935 return &XEXP (SET_SRC (x), 1);
4937 /* Finally, see if this is a simple operation with its first operand
4938 not in a register. The operation might require this operand in a
4939 register, so return it as a split point. We can always do this
4940 because if the first operand were another operation, we would have
4941 already found it as a split point. */
4942 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4943 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4944 return &XEXP (SET_SRC (x), 0);
4946 return 0;
4948 case AND:
4949 case IOR:
4950 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4951 it is better to write this as (not (ior A B)) so we can split it.
4952 Similarly for IOR. */
4953 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4955 SUBST (*loc,
4956 gen_rtx_NOT (GET_MODE (x),
4957 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4958 GET_MODE (x),
4959 XEXP (XEXP (x, 0), 0),
4960 XEXP (XEXP (x, 1), 0))));
4961 return find_split_point (loc, insn, set_src);
4964 /* Many RISC machines have a large set of logical insns. If the
4965 second operand is a NOT, put it first so we will try to split the
4966 other operand first. */
4967 if (GET_CODE (XEXP (x, 1)) == NOT)
4969 rtx tem = XEXP (x, 0);
4970 SUBST (XEXP (x, 0), XEXP (x, 1));
4971 SUBST (XEXP (x, 1), tem);
4973 break;
4975 case PLUS:
4976 case MINUS:
4977 /* Canonicalization can produce (minus A (mult B C)), where C is a
4978 constant. It may be better to try splitting (plus (mult B -C) A)
4979 instead if this isn't a multiply by a power of two. */
4980 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4981 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4982 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4984 machine_mode mode = GET_MODE (x);
4985 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4986 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4987 SUBST (*loc, gen_rtx_PLUS (mode,
4988 gen_rtx_MULT (mode,
4989 XEXP (XEXP (x, 1), 0),
4990 gen_int_mode (other_int,
4991 mode)),
4992 XEXP (x, 0)));
4993 return find_split_point (loc, insn, set_src);
4996 /* Split at a multiply-accumulate instruction. However if this is
4997 the SET_SRC, we likely do not have such an instruction and it's
4998 worthless to try this split. */
4999 if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
5000 return loc;
5002 default:
5003 break;
5006 /* Otherwise, select our actions depending on our rtx class. */
5007 switch (GET_RTX_CLASS (code))
5009 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5010 case RTX_TERNARY:
5011 split = find_split_point (&XEXP (x, 2), insn, false);
5012 if (split)
5013 return split;
5014 /* ... fall through ... */
5015 case RTX_BIN_ARITH:
5016 case RTX_COMM_ARITH:
5017 case RTX_COMPARE:
5018 case RTX_COMM_COMPARE:
5019 split = find_split_point (&XEXP (x, 1), insn, false);
5020 if (split)
5021 return split;
5022 /* ... fall through ... */
5023 case RTX_UNARY:
5024 /* Some machines have (and (shift ...) ...) insns. If X is not
5025 an AND, but XEXP (X, 0) is, use it as our split point. */
5026 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5027 return &XEXP (x, 0);
5029 split = find_split_point (&XEXP (x, 0), insn, false);
5030 if (split)
5031 return split;
5032 return loc;
5034 default:
5035 /* Otherwise, we don't have a split point. */
5036 return 0;
5040 /* Throughout X, replace FROM with TO, and return the result.
5041 The result is TO if X is FROM;
5042 otherwise the result is X, but its contents may have been modified.
5043 If they were modified, a record was made in undobuf so that
5044 undo_all will (among other things) return X to its original state.
5046 If the number of changes necessary is too much to record to undo,
5047 the excess changes are not made, so the result is invalid.
5048 The changes already made can still be undone.
5049 undobuf.num_undo is incremented for such changes, so by testing that
5050 the caller can tell whether the result is valid.
5052 `n_occurrences' is incremented each time FROM is replaced.
5054 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5056 IN_COND is nonzero if we are at the top level of a condition.
5058 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5059 by copying if `n_occurrences' is nonzero. */
5061 static rtx
5062 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5064 enum rtx_code code = GET_CODE (x);
5065 machine_mode op0_mode = VOIDmode;
5066 const char *fmt;
5067 int len, i;
5068 rtx new_rtx;
5070 /* Two expressions are equal if they are identical copies of a shared
5071 RTX or if they are both registers with the same register number
5072 and mode. */
5074 #define COMBINE_RTX_EQUAL_P(X,Y) \
5075 ((X) == (Y) \
5076 || (REG_P (X) && REG_P (Y) \
5077 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5079 /* Do not substitute into clobbers of regs -- this will never result in
5080 valid RTL. */
5081 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5082 return x;
5084 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5086 n_occurrences++;
5087 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5090 /* If X and FROM are the same register but different modes, they
5091 will not have been seen as equal above. However, the log links code
5092 will make a LOG_LINKS entry for that case. If we do nothing, we
5093 will try to rerecognize our original insn and, when it succeeds,
5094 we will delete the feeding insn, which is incorrect.
5096 So force this insn not to match in this (rare) case. */
5097 if (! in_dest && code == REG && REG_P (from)
5098 && reg_overlap_mentioned_p (x, from))
5099 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5101 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5102 of which may contain things that can be combined. */
5103 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5104 return x;
5106 /* It is possible to have a subexpression appear twice in the insn.
5107 Suppose that FROM is a register that appears within TO.
5108 Then, after that subexpression has been scanned once by `subst',
5109 the second time it is scanned, TO may be found. If we were
5110 to scan TO here, we would find FROM within it and create a
5111 self-referent rtl structure which is completely wrong. */
5112 if (COMBINE_RTX_EQUAL_P (x, to))
5113 return to;
5115 /* Parallel asm_operands need special attention because all of the
5116 inputs are shared across the arms. Furthermore, unsharing the
5117 rtl results in recognition failures. Failure to handle this case
5118 specially can result in circular rtl.
5120 Solve this by doing a normal pass across the first entry of the
5121 parallel, and only processing the SET_DESTs of the subsequent
5122 entries. Ug. */
5124 if (code == PARALLEL
5125 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5126 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5128 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5130 /* If this substitution failed, this whole thing fails. */
5131 if (GET_CODE (new_rtx) == CLOBBER
5132 && XEXP (new_rtx, 0) == const0_rtx)
5133 return new_rtx;
5135 SUBST (XVECEXP (x, 0, 0), new_rtx);
5137 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5139 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5141 if (!REG_P (dest)
5142 && GET_CODE (dest) != CC0
5143 && GET_CODE (dest) != PC)
5145 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5147 /* If this substitution failed, this whole thing fails. */
5148 if (GET_CODE (new_rtx) == CLOBBER
5149 && XEXP (new_rtx, 0) == const0_rtx)
5150 return new_rtx;
5152 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5156 else
5158 len = GET_RTX_LENGTH (code);
5159 fmt = GET_RTX_FORMAT (code);
5161 /* We don't need to process a SET_DEST that is a register, CC0,
5162 or PC, so set up to skip this common case. All other cases
5163 where we want to suppress replacing something inside a
5164 SET_SRC are handled via the IN_DEST operand. */
5165 if (code == SET
5166 && (REG_P (SET_DEST (x))
5167 || GET_CODE (SET_DEST (x)) == CC0
5168 || GET_CODE (SET_DEST (x)) == PC))
5169 fmt = "ie";
5171 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5172 constant. */
5173 if (fmt[0] == 'e')
5174 op0_mode = GET_MODE (XEXP (x, 0));
5176 for (i = 0; i < len; i++)
5178 if (fmt[i] == 'E')
5180 int j;
5181 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5183 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5185 new_rtx = (unique_copy && n_occurrences
5186 ? copy_rtx (to) : to);
5187 n_occurrences++;
5189 else
5191 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5192 unique_copy);
5194 /* If this substitution failed, this whole thing
5195 fails. */
5196 if (GET_CODE (new_rtx) == CLOBBER
5197 && XEXP (new_rtx, 0) == const0_rtx)
5198 return new_rtx;
5201 SUBST (XVECEXP (x, i, j), new_rtx);
5204 else if (fmt[i] == 'e')
5206 /* If this is a register being set, ignore it. */
5207 new_rtx = XEXP (x, i);
5208 if (in_dest
5209 && i == 0
5210 && (((code == SUBREG || code == ZERO_EXTRACT)
5211 && REG_P (new_rtx))
5212 || code == STRICT_LOW_PART))
5215 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5217 /* In general, don't install a subreg involving two
5218 modes not tieable. It can worsen register
5219 allocation, and can even make invalid reload
5220 insns, since the reg inside may need to be copied
5221 from in the outside mode, and that may be invalid
5222 if it is an fp reg copied in integer mode.
5224 We allow two exceptions to this: It is valid if
5225 it is inside another SUBREG and the mode of that
5226 SUBREG and the mode of the inside of TO is
5227 tieable and it is valid if X is a SET that copies
5228 FROM to CC0. */
5230 if (GET_CODE (to) == SUBREG
5231 && ! MODES_TIEABLE_P (GET_MODE (to),
5232 GET_MODE (SUBREG_REG (to)))
5233 && ! (code == SUBREG
5234 && MODES_TIEABLE_P (GET_MODE (x),
5235 GET_MODE (SUBREG_REG (to))))
5236 #ifdef HAVE_cc0
5237 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5238 #endif
5240 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5242 if (code == SUBREG
5243 && REG_P (to)
5244 && REGNO (to) < FIRST_PSEUDO_REGISTER
5245 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5246 SUBREG_BYTE (x),
5247 GET_MODE (x)) < 0)
5248 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5250 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5251 n_occurrences++;
5253 else
5254 /* If we are in a SET_DEST, suppress most cases unless we
5255 have gone inside a MEM, in which case we want to
5256 simplify the address. We assume here that things that
5257 are actually part of the destination have their inner
5258 parts in the first expression. This is true for SUBREG,
5259 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5260 things aside from REG and MEM that should appear in a
5261 SET_DEST. */
5262 new_rtx = subst (XEXP (x, i), from, to,
5263 (((in_dest
5264 && (code == SUBREG || code == STRICT_LOW_PART
5265 || code == ZERO_EXTRACT))
5266 || code == SET)
5267 && i == 0),
5268 code == IF_THEN_ELSE && i == 0,
5269 unique_copy);
5271 /* If we found that we will have to reject this combination,
5272 indicate that by returning the CLOBBER ourselves, rather than
5273 an expression containing it. This will speed things up as
5274 well as prevent accidents where two CLOBBERs are considered
5275 to be equal, thus producing an incorrect simplification. */
5277 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5278 return new_rtx;
5280 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5282 machine_mode mode = GET_MODE (x);
5284 x = simplify_subreg (GET_MODE (x), new_rtx,
5285 GET_MODE (SUBREG_REG (x)),
5286 SUBREG_BYTE (x));
5287 if (! x)
5288 x = gen_rtx_CLOBBER (mode, const0_rtx);
5290 else if (CONST_SCALAR_INT_P (new_rtx)
5291 && GET_CODE (x) == ZERO_EXTEND)
5293 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5294 new_rtx, GET_MODE (XEXP (x, 0)));
5295 gcc_assert (x);
5297 else
5298 SUBST (XEXP (x, i), new_rtx);
5303 /* Check if we are loading something from the constant pool via float
5304 extension; in this case we would undo compress_float_constant
5305 optimization and degenerate constant load to an immediate value. */
5306 if (GET_CODE (x) == FLOAT_EXTEND
5307 && MEM_P (XEXP (x, 0))
5308 && MEM_READONLY_P (XEXP (x, 0)))
5310 rtx tmp = avoid_constant_pool_reference (x);
5311 if (x != tmp)
5312 return x;
5315 /* Try to simplify X. If the simplification changed the code, it is likely
5316 that further simplification will help, so loop, but limit the number
5317 of repetitions that will be performed. */
5319 for (i = 0; i < 4; i++)
5321 /* If X is sufficiently simple, don't bother trying to do anything
5322 with it. */
5323 if (code != CONST_INT && code != REG && code != CLOBBER)
5324 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5326 if (GET_CODE (x) == code)
5327 break;
5329 code = GET_CODE (x);
5331 /* We no longer know the original mode of operand 0 since we
5332 have changed the form of X) */
5333 op0_mode = VOIDmode;
5336 return x;
5339 /* Simplify X, a piece of RTL. We just operate on the expression at the
5340 outer level; call `subst' to simplify recursively. Return the new
5341 expression.
5343 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5344 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5345 of a condition. */
5347 static rtx
5348 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5349 int in_cond)
5351 enum rtx_code code = GET_CODE (x);
5352 machine_mode mode = GET_MODE (x);
5353 rtx temp;
5354 int i;
5356 /* If this is a commutative operation, put a constant last and a complex
5357 expression first. We don't need to do this for comparisons here. */
5358 if (COMMUTATIVE_ARITH_P (x)
5359 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5361 temp = XEXP (x, 0);
5362 SUBST (XEXP (x, 0), XEXP (x, 1));
5363 SUBST (XEXP (x, 1), temp);
5366 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5367 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5368 things. Check for cases where both arms are testing the same
5369 condition.
5371 Don't do anything if all operands are very simple. */
5373 if ((BINARY_P (x)
5374 && ((!OBJECT_P (XEXP (x, 0))
5375 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5376 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5377 || (!OBJECT_P (XEXP (x, 1))
5378 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5379 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5380 || (UNARY_P (x)
5381 && (!OBJECT_P (XEXP (x, 0))
5382 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5383 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5385 rtx cond, true_rtx, false_rtx;
5387 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5388 if (cond != 0
5389 /* If everything is a comparison, what we have is highly unlikely
5390 to be simpler, so don't use it. */
5391 && ! (COMPARISON_P (x)
5392 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5394 rtx cop1 = const0_rtx;
5395 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5397 if (cond_code == NE && COMPARISON_P (cond))
5398 return x;
5400 /* Simplify the alternative arms; this may collapse the true and
5401 false arms to store-flag values. Be careful to use copy_rtx
5402 here since true_rtx or false_rtx might share RTL with x as a
5403 result of the if_then_else_cond call above. */
5404 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5405 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5407 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5408 is unlikely to be simpler. */
5409 if (general_operand (true_rtx, VOIDmode)
5410 && general_operand (false_rtx, VOIDmode))
5412 enum rtx_code reversed;
5414 /* Restarting if we generate a store-flag expression will cause
5415 us to loop. Just drop through in this case. */
5417 /* If the result values are STORE_FLAG_VALUE and zero, we can
5418 just make the comparison operation. */
5419 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5420 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5421 cond, cop1);
5422 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5423 && ((reversed = reversed_comparison_code_parts
5424 (cond_code, cond, cop1, NULL))
5425 != UNKNOWN))
5426 x = simplify_gen_relational (reversed, mode, VOIDmode,
5427 cond, cop1);
5429 /* Likewise, we can make the negate of a comparison operation
5430 if the result values are - STORE_FLAG_VALUE and zero. */
5431 else if (CONST_INT_P (true_rtx)
5432 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5433 && false_rtx == const0_rtx)
5434 x = simplify_gen_unary (NEG, mode,
5435 simplify_gen_relational (cond_code,
5436 mode, VOIDmode,
5437 cond, cop1),
5438 mode);
5439 else if (CONST_INT_P (false_rtx)
5440 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5441 && true_rtx == const0_rtx
5442 && ((reversed = reversed_comparison_code_parts
5443 (cond_code, cond, cop1, NULL))
5444 != UNKNOWN))
5445 x = simplify_gen_unary (NEG, mode,
5446 simplify_gen_relational (reversed,
5447 mode, VOIDmode,
5448 cond, cop1),
5449 mode);
5450 else
5451 return gen_rtx_IF_THEN_ELSE (mode,
5452 simplify_gen_relational (cond_code,
5453 mode,
5454 VOIDmode,
5455 cond,
5456 cop1),
5457 true_rtx, false_rtx);
5459 code = GET_CODE (x);
5460 op0_mode = VOIDmode;
5465 /* Try to fold this expression in case we have constants that weren't
5466 present before. */
5467 temp = 0;
5468 switch (GET_RTX_CLASS (code))
5470 case RTX_UNARY:
5471 if (op0_mode == VOIDmode)
5472 op0_mode = GET_MODE (XEXP (x, 0));
5473 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5474 break;
5475 case RTX_COMPARE:
5476 case RTX_COMM_COMPARE:
5478 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5479 if (cmp_mode == VOIDmode)
5481 cmp_mode = GET_MODE (XEXP (x, 1));
5482 if (cmp_mode == VOIDmode)
5483 cmp_mode = op0_mode;
5485 temp = simplify_relational_operation (code, mode, cmp_mode,
5486 XEXP (x, 0), XEXP (x, 1));
5488 break;
5489 case RTX_COMM_ARITH:
5490 case RTX_BIN_ARITH:
5491 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5492 break;
5493 case RTX_BITFIELD_OPS:
5494 case RTX_TERNARY:
5495 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5496 XEXP (x, 1), XEXP (x, 2));
5497 break;
5498 default:
5499 break;
5502 if (temp)
5504 x = temp;
5505 code = GET_CODE (temp);
5506 op0_mode = VOIDmode;
5507 mode = GET_MODE (temp);
5510 /* First see if we can apply the inverse distributive law. */
5511 if (code == PLUS || code == MINUS
5512 || code == AND || code == IOR || code == XOR)
5514 x = apply_distributive_law (x);
5515 code = GET_CODE (x);
5516 op0_mode = VOIDmode;
5519 /* If CODE is an associative operation not otherwise handled, see if we
5520 can associate some operands. This can win if they are constants or
5521 if they are logically related (i.e. (a & b) & a). */
5522 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5523 || code == AND || code == IOR || code == XOR
5524 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5525 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5526 || (flag_associative_math && FLOAT_MODE_P (mode))))
5528 if (GET_CODE (XEXP (x, 0)) == code)
5530 rtx other = XEXP (XEXP (x, 0), 0);
5531 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5532 rtx inner_op1 = XEXP (x, 1);
5533 rtx inner;
5535 /* Make sure we pass the constant operand if any as the second
5536 one if this is a commutative operation. */
5537 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5539 rtx tem = inner_op0;
5540 inner_op0 = inner_op1;
5541 inner_op1 = tem;
5543 inner = simplify_binary_operation (code == MINUS ? PLUS
5544 : code == DIV ? MULT
5545 : code,
5546 mode, inner_op0, inner_op1);
5548 /* For commutative operations, try the other pair if that one
5549 didn't simplify. */
5550 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5552 other = XEXP (XEXP (x, 0), 1);
5553 inner = simplify_binary_operation (code, mode,
5554 XEXP (XEXP (x, 0), 0),
5555 XEXP (x, 1));
5558 if (inner)
5559 return simplify_gen_binary (code, mode, other, inner);
5563 /* A little bit of algebraic simplification here. */
5564 switch (code)
5566 case MEM:
5567 /* Ensure that our address has any ASHIFTs converted to MULT in case
5568 address-recognizing predicates are called later. */
5569 temp = make_compound_operation (XEXP (x, 0), MEM);
5570 SUBST (XEXP (x, 0), temp);
5571 break;
5573 case SUBREG:
5574 if (op0_mode == VOIDmode)
5575 op0_mode = GET_MODE (SUBREG_REG (x));
5577 /* See if this can be moved to simplify_subreg. */
5578 if (CONSTANT_P (SUBREG_REG (x))
5579 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5580 /* Don't call gen_lowpart if the inner mode
5581 is VOIDmode and we cannot simplify it, as SUBREG without
5582 inner mode is invalid. */
5583 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5584 || gen_lowpart_common (mode, SUBREG_REG (x))))
5585 return gen_lowpart (mode, SUBREG_REG (x));
5587 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5588 break;
5590 rtx temp;
5591 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5592 SUBREG_BYTE (x));
5593 if (temp)
5594 return temp;
5596 /* If op is known to have all lower bits zero, the result is zero. */
5597 if (!in_dest
5598 && SCALAR_INT_MODE_P (mode)
5599 && SCALAR_INT_MODE_P (op0_mode)
5600 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5601 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5602 && HWI_COMPUTABLE_MODE_P (op0_mode)
5603 && (nonzero_bits (SUBREG_REG (x), op0_mode)
5604 & GET_MODE_MASK (mode)) == 0)
5605 return CONST0_RTX (mode);
5608 /* Don't change the mode of the MEM if that would change the meaning
5609 of the address. */
5610 if (MEM_P (SUBREG_REG (x))
5611 && (MEM_VOLATILE_P (SUBREG_REG (x))
5612 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5613 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5614 return gen_rtx_CLOBBER (mode, const0_rtx);
5616 /* Note that we cannot do any narrowing for non-constants since
5617 we might have been counting on using the fact that some bits were
5618 zero. We now do this in the SET. */
5620 break;
5622 case NEG:
5623 temp = expand_compound_operation (XEXP (x, 0));
5625 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5626 replaced by (lshiftrt X C). This will convert
5627 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5629 if (GET_CODE (temp) == ASHIFTRT
5630 && CONST_INT_P (XEXP (temp, 1))
5631 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5632 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5633 INTVAL (XEXP (temp, 1)));
5635 /* If X has only a single bit that might be nonzero, say, bit I, convert
5636 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5637 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5638 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5639 or a SUBREG of one since we'd be making the expression more
5640 complex if it was just a register. */
5642 if (!REG_P (temp)
5643 && ! (GET_CODE (temp) == SUBREG
5644 && REG_P (SUBREG_REG (temp)))
5645 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5647 rtx temp1 = simplify_shift_const
5648 (NULL_RTX, ASHIFTRT, mode,
5649 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5650 GET_MODE_PRECISION (mode) - 1 - i),
5651 GET_MODE_PRECISION (mode) - 1 - i);
5653 /* If all we did was surround TEMP with the two shifts, we
5654 haven't improved anything, so don't use it. Otherwise,
5655 we are better off with TEMP1. */
5656 if (GET_CODE (temp1) != ASHIFTRT
5657 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5658 || XEXP (XEXP (temp1, 0), 0) != temp)
5659 return temp1;
5661 break;
5663 case TRUNCATE:
5664 /* We can't handle truncation to a partial integer mode here
5665 because we don't know the real bitsize of the partial
5666 integer mode. */
5667 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5668 break;
5670 if (HWI_COMPUTABLE_MODE_P (mode))
5671 SUBST (XEXP (x, 0),
5672 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5673 GET_MODE_MASK (mode), 0));
5675 /* We can truncate a constant value and return it. */
5676 if (CONST_INT_P (XEXP (x, 0)))
5677 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5679 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5680 whose value is a comparison can be replaced with a subreg if
5681 STORE_FLAG_VALUE permits. */
5682 if (HWI_COMPUTABLE_MODE_P (mode)
5683 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5684 && (temp = get_last_value (XEXP (x, 0)))
5685 && COMPARISON_P (temp))
5686 return gen_lowpart (mode, XEXP (x, 0));
5687 break;
5689 case CONST:
5690 /* (const (const X)) can become (const X). Do it this way rather than
5691 returning the inner CONST since CONST can be shared with a
5692 REG_EQUAL note. */
5693 if (GET_CODE (XEXP (x, 0)) == CONST)
5694 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5695 break;
5697 #ifdef HAVE_lo_sum
5698 case LO_SUM:
5699 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5700 can add in an offset. find_split_point will split this address up
5701 again if it doesn't match. */
5702 if (GET_CODE (XEXP (x, 0)) == HIGH
5703 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5704 return XEXP (x, 1);
5705 break;
5706 #endif
5708 case PLUS:
5709 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5710 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5711 bit-field and can be replaced by either a sign_extend or a
5712 sign_extract. The `and' may be a zero_extend and the two
5713 <c>, -<c> constants may be reversed. */
5714 if (GET_CODE (XEXP (x, 0)) == XOR
5715 && CONST_INT_P (XEXP (x, 1))
5716 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5717 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5718 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5719 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5720 && HWI_COMPUTABLE_MODE_P (mode)
5721 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5722 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5723 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5724 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5725 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5726 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5727 == (unsigned int) i + 1))))
5728 return simplify_shift_const
5729 (NULL_RTX, ASHIFTRT, mode,
5730 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5731 XEXP (XEXP (XEXP (x, 0), 0), 0),
5732 GET_MODE_PRECISION (mode) - (i + 1)),
5733 GET_MODE_PRECISION (mode) - (i + 1));
5735 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5736 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5737 the bitsize of the mode - 1. This allows simplification of
5738 "a = (b & 8) == 0;" */
5739 if (XEXP (x, 1) == constm1_rtx
5740 && !REG_P (XEXP (x, 0))
5741 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5742 && REG_P (SUBREG_REG (XEXP (x, 0))))
5743 && nonzero_bits (XEXP (x, 0), mode) == 1)
5744 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5745 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5746 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5747 GET_MODE_PRECISION (mode) - 1),
5748 GET_MODE_PRECISION (mode) - 1);
5750 /* If we are adding two things that have no bits in common, convert
5751 the addition into an IOR. This will often be further simplified,
5752 for example in cases like ((a & 1) + (a & 2)), which can
5753 become a & 3. */
5755 if (HWI_COMPUTABLE_MODE_P (mode)
5756 && (nonzero_bits (XEXP (x, 0), mode)
5757 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5759 /* Try to simplify the expression further. */
5760 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5761 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5763 /* If we could, great. If not, do not go ahead with the IOR
5764 replacement, since PLUS appears in many special purpose
5765 address arithmetic instructions. */
5766 if (GET_CODE (temp) != CLOBBER
5767 && (GET_CODE (temp) != IOR
5768 || ((XEXP (temp, 0) != XEXP (x, 0)
5769 || XEXP (temp, 1) != XEXP (x, 1))
5770 && (XEXP (temp, 0) != XEXP (x, 1)
5771 || XEXP (temp, 1) != XEXP (x, 0)))))
5772 return temp;
5774 break;
5776 case MINUS:
5777 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5778 (and <foo> (const_int pow2-1)) */
5779 if (GET_CODE (XEXP (x, 1)) == AND
5780 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5781 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5782 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5783 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5784 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5785 break;
5787 case MULT:
5788 /* If we have (mult (plus A B) C), apply the distributive law and then
5789 the inverse distributive law to see if things simplify. This
5790 occurs mostly in addresses, often when unrolling loops. */
5792 if (GET_CODE (XEXP (x, 0)) == PLUS)
5794 rtx result = distribute_and_simplify_rtx (x, 0);
5795 if (result)
5796 return result;
5799 /* Try simplify a*(b/c) as (a*b)/c. */
5800 if (FLOAT_MODE_P (mode) && flag_associative_math
5801 && GET_CODE (XEXP (x, 0)) == DIV)
5803 rtx tem = simplify_binary_operation (MULT, mode,
5804 XEXP (XEXP (x, 0), 0),
5805 XEXP (x, 1));
5806 if (tem)
5807 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5809 break;
5811 case UDIV:
5812 /* If this is a divide by a power of two, treat it as a shift if
5813 its first operand is a shift. */
5814 if (CONST_INT_P (XEXP (x, 1))
5815 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5816 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5817 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5818 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5819 || GET_CODE (XEXP (x, 0)) == ROTATE
5820 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5821 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5822 break;
5824 case EQ: case NE:
5825 case GT: case GTU: case GE: case GEU:
5826 case LT: case LTU: case LE: case LEU:
5827 case UNEQ: case LTGT:
5828 case UNGT: case UNGE:
5829 case UNLT: case UNLE:
5830 case UNORDERED: case ORDERED:
5831 /* If the first operand is a condition code, we can't do anything
5832 with it. */
5833 if (GET_CODE (XEXP (x, 0)) == COMPARE
5834 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5835 && ! CC0_P (XEXP (x, 0))))
5837 rtx op0 = XEXP (x, 0);
5838 rtx op1 = XEXP (x, 1);
5839 enum rtx_code new_code;
5841 if (GET_CODE (op0) == COMPARE)
5842 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5844 /* Simplify our comparison, if possible. */
5845 new_code = simplify_comparison (code, &op0, &op1);
5847 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5848 if only the low-order bit is possibly nonzero in X (such as when
5849 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5850 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5851 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5852 (plus X 1).
5854 Remove any ZERO_EXTRACT we made when thinking this was a
5855 comparison. It may now be simpler to use, e.g., an AND. If a
5856 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5857 the call to make_compound_operation in the SET case.
5859 Don't apply these optimizations if the caller would
5860 prefer a comparison rather than a value.
5861 E.g., for the condition in an IF_THEN_ELSE most targets need
5862 an explicit comparison. */
5864 if (in_cond)
5867 else if (STORE_FLAG_VALUE == 1
5868 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5869 && op1 == const0_rtx
5870 && mode == GET_MODE (op0)
5871 && nonzero_bits (op0, mode) == 1)
5872 return gen_lowpart (mode,
5873 expand_compound_operation (op0));
5875 else if (STORE_FLAG_VALUE == 1
5876 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5877 && op1 == const0_rtx
5878 && mode == GET_MODE (op0)
5879 && (num_sign_bit_copies (op0, mode)
5880 == GET_MODE_PRECISION (mode)))
5882 op0 = expand_compound_operation (op0);
5883 return simplify_gen_unary (NEG, mode,
5884 gen_lowpart (mode, op0),
5885 mode);
5888 else if (STORE_FLAG_VALUE == 1
5889 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5890 && op1 == const0_rtx
5891 && mode == GET_MODE (op0)
5892 && nonzero_bits (op0, mode) == 1)
5894 op0 = expand_compound_operation (op0);
5895 return simplify_gen_binary (XOR, mode,
5896 gen_lowpart (mode, op0),
5897 const1_rtx);
5900 else if (STORE_FLAG_VALUE == 1
5901 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5902 && op1 == const0_rtx
5903 && mode == GET_MODE (op0)
5904 && (num_sign_bit_copies (op0, mode)
5905 == GET_MODE_PRECISION (mode)))
5907 op0 = expand_compound_operation (op0);
5908 return plus_constant (mode, gen_lowpart (mode, op0), 1);
5911 /* If STORE_FLAG_VALUE is -1, we have cases similar to
5912 those above. */
5913 if (in_cond)
5916 else if (STORE_FLAG_VALUE == -1
5917 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5918 && op1 == const0_rtx
5919 && mode == GET_MODE (op0)
5920 && (num_sign_bit_copies (op0, mode)
5921 == GET_MODE_PRECISION (mode)))
5922 return gen_lowpart (mode,
5923 expand_compound_operation (op0));
5925 else if (STORE_FLAG_VALUE == -1
5926 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5927 && op1 == const0_rtx
5928 && mode == GET_MODE (op0)
5929 && nonzero_bits (op0, mode) == 1)
5931 op0 = expand_compound_operation (op0);
5932 return simplify_gen_unary (NEG, mode,
5933 gen_lowpart (mode, op0),
5934 mode);
5937 else if (STORE_FLAG_VALUE == -1
5938 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5939 && op1 == const0_rtx
5940 && mode == GET_MODE (op0)
5941 && (num_sign_bit_copies (op0, mode)
5942 == GET_MODE_PRECISION (mode)))
5944 op0 = expand_compound_operation (op0);
5945 return simplify_gen_unary (NOT, mode,
5946 gen_lowpart (mode, op0),
5947 mode);
5950 /* If X is 0/1, (eq X 0) is X-1. */
5951 else if (STORE_FLAG_VALUE == -1
5952 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5953 && op1 == const0_rtx
5954 && mode == GET_MODE (op0)
5955 && nonzero_bits (op0, mode) == 1)
5957 op0 = expand_compound_operation (op0);
5958 return plus_constant (mode, gen_lowpart (mode, op0), -1);
5961 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5962 one bit that might be nonzero, we can convert (ne x 0) to
5963 (ashift x c) where C puts the bit in the sign bit. Remove any
5964 AND with STORE_FLAG_VALUE when we are done, since we are only
5965 going to test the sign bit. */
5966 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5967 && HWI_COMPUTABLE_MODE_P (mode)
5968 && val_signbit_p (mode, STORE_FLAG_VALUE)
5969 && op1 == const0_rtx
5970 && mode == GET_MODE (op0)
5971 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5973 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5974 expand_compound_operation (op0),
5975 GET_MODE_PRECISION (mode) - 1 - i);
5976 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5977 return XEXP (x, 0);
5978 else
5979 return x;
5982 /* If the code changed, return a whole new comparison.
5983 We also need to avoid using SUBST in cases where
5984 simplify_comparison has widened a comparison with a CONST_INT,
5985 since in that case the wider CONST_INT may fail the sanity
5986 checks in do_SUBST. */
5987 if (new_code != code
5988 || (CONST_INT_P (op1)
5989 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
5990 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
5991 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5993 /* Otherwise, keep this operation, but maybe change its operands.
5994 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
5995 SUBST (XEXP (x, 0), op0);
5996 SUBST (XEXP (x, 1), op1);
5998 break;
6000 case IF_THEN_ELSE:
6001 return simplify_if_then_else (x);
6003 case ZERO_EXTRACT:
6004 case SIGN_EXTRACT:
6005 case ZERO_EXTEND:
6006 case SIGN_EXTEND:
6007 /* If we are processing SET_DEST, we are done. */
6008 if (in_dest)
6009 return x;
6011 return expand_compound_operation (x);
6013 case SET:
6014 return simplify_set (x);
6016 case AND:
6017 case IOR:
6018 return simplify_logical (x);
6020 case ASHIFT:
6021 case LSHIFTRT:
6022 case ASHIFTRT:
6023 case ROTATE:
6024 case ROTATERT:
6025 /* If this is a shift by a constant amount, simplify it. */
6026 if (CONST_INT_P (XEXP (x, 1)))
6027 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6028 INTVAL (XEXP (x, 1)));
6030 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6031 SUBST (XEXP (x, 1),
6032 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6033 ((unsigned HOST_WIDE_INT) 1
6034 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6035 - 1,
6036 0));
6037 break;
6039 default:
6040 break;
6043 return x;
6046 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6048 static rtx
6049 simplify_if_then_else (rtx x)
6051 machine_mode mode = GET_MODE (x);
6052 rtx cond = XEXP (x, 0);
6053 rtx true_rtx = XEXP (x, 1);
6054 rtx false_rtx = XEXP (x, 2);
6055 enum rtx_code true_code = GET_CODE (cond);
6056 int comparison_p = COMPARISON_P (cond);
6057 rtx temp;
6058 int i;
6059 enum rtx_code false_code;
6060 rtx reversed;
6062 /* Simplify storing of the truth value. */
6063 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6064 return simplify_gen_relational (true_code, mode, VOIDmode,
6065 XEXP (cond, 0), XEXP (cond, 1));
6067 /* Also when the truth value has to be reversed. */
6068 if (comparison_p
6069 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6070 && (reversed = reversed_comparison (cond, mode)))
6071 return reversed;
6073 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6074 in it is being compared against certain values. Get the true and false
6075 comparisons and see if that says anything about the value of each arm. */
6077 if (comparison_p
6078 && ((false_code = reversed_comparison_code (cond, NULL))
6079 != UNKNOWN)
6080 && REG_P (XEXP (cond, 0)))
6082 HOST_WIDE_INT nzb;
6083 rtx from = XEXP (cond, 0);
6084 rtx true_val = XEXP (cond, 1);
6085 rtx false_val = true_val;
6086 int swapped = 0;
6088 /* If FALSE_CODE is EQ, swap the codes and arms. */
6090 if (false_code == EQ)
6092 swapped = 1, true_code = EQ, false_code = NE;
6093 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6096 /* If we are comparing against zero and the expression being tested has
6097 only a single bit that might be nonzero, that is its value when it is
6098 not equal to zero. Similarly if it is known to be -1 or 0. */
6100 if (true_code == EQ && true_val == const0_rtx
6101 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
6103 false_code = EQ;
6104 false_val = gen_int_mode (nzb, GET_MODE (from));
6106 else if (true_code == EQ && true_val == const0_rtx
6107 && (num_sign_bit_copies (from, GET_MODE (from))
6108 == GET_MODE_PRECISION (GET_MODE (from))))
6110 false_code = EQ;
6111 false_val = constm1_rtx;
6114 /* Now simplify an arm if we know the value of the register in the
6115 branch and it is used in the arm. Be careful due to the potential
6116 of locally-shared RTL. */
6118 if (reg_mentioned_p (from, true_rtx))
6119 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6120 from, true_val),
6121 pc_rtx, pc_rtx, 0, 0, 0);
6122 if (reg_mentioned_p (from, false_rtx))
6123 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6124 from, false_val),
6125 pc_rtx, pc_rtx, 0, 0, 0);
6127 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6128 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6130 true_rtx = XEXP (x, 1);
6131 false_rtx = XEXP (x, 2);
6132 true_code = GET_CODE (cond);
6135 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6136 reversed, do so to avoid needing two sets of patterns for
6137 subtract-and-branch insns. Similarly if we have a constant in the true
6138 arm, the false arm is the same as the first operand of the comparison, or
6139 the false arm is more complicated than the true arm. */
6141 if (comparison_p
6142 && reversed_comparison_code (cond, NULL) != UNKNOWN
6143 && (true_rtx == pc_rtx
6144 || (CONSTANT_P (true_rtx)
6145 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6146 || true_rtx == const0_rtx
6147 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6148 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6149 && !OBJECT_P (false_rtx))
6150 || reg_mentioned_p (true_rtx, false_rtx)
6151 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6153 true_code = reversed_comparison_code (cond, NULL);
6154 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6155 SUBST (XEXP (x, 1), false_rtx);
6156 SUBST (XEXP (x, 2), true_rtx);
6158 temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
6159 cond = XEXP (x, 0);
6161 /* It is possible that the conditional has been simplified out. */
6162 true_code = GET_CODE (cond);
6163 comparison_p = COMPARISON_P (cond);
6166 /* If the two arms are identical, we don't need the comparison. */
6168 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6169 return true_rtx;
6171 /* Convert a == b ? b : a to "a". */
6172 if (true_code == EQ && ! side_effects_p (cond)
6173 && !HONOR_NANS (mode)
6174 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6175 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6176 return false_rtx;
6177 else if (true_code == NE && ! side_effects_p (cond)
6178 && !HONOR_NANS (mode)
6179 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6180 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6181 return true_rtx;
6183 /* Look for cases where we have (abs x) or (neg (abs X)). */
6185 if (GET_MODE_CLASS (mode) == MODE_INT
6186 && comparison_p
6187 && XEXP (cond, 1) == const0_rtx
6188 && GET_CODE (false_rtx) == NEG
6189 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6190 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6191 && ! side_effects_p (true_rtx))
6192 switch (true_code)
6194 case GT:
6195 case GE:
6196 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6197 case LT:
6198 case LE:
6199 return
6200 simplify_gen_unary (NEG, mode,
6201 simplify_gen_unary (ABS, mode, true_rtx, mode),
6202 mode);
6203 default:
6204 break;
6207 /* Look for MIN or MAX. */
6209 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6210 && comparison_p
6211 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6212 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6213 && ! side_effects_p (cond))
6214 switch (true_code)
6216 case GE:
6217 case GT:
6218 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6219 case LE:
6220 case LT:
6221 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6222 case GEU:
6223 case GTU:
6224 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6225 case LEU:
6226 case LTU:
6227 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6228 default:
6229 break;
6232 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6233 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6234 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6235 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6236 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6237 neither 1 or -1, but it isn't worth checking for. */
6239 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6240 && comparison_p
6241 && GET_MODE_CLASS (mode) == MODE_INT
6242 && ! side_effects_p (x))
6244 rtx t = make_compound_operation (true_rtx, SET);
6245 rtx f = make_compound_operation (false_rtx, SET);
6246 rtx cond_op0 = XEXP (cond, 0);
6247 rtx cond_op1 = XEXP (cond, 1);
6248 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6249 machine_mode m = mode;
6250 rtx z = 0, c1 = NULL_RTX;
6252 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6253 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6254 || GET_CODE (t) == ASHIFT
6255 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6256 && rtx_equal_p (XEXP (t, 0), f))
6257 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6259 /* If an identity-zero op is commutative, check whether there
6260 would be a match if we swapped the operands. */
6261 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6262 || GET_CODE (t) == XOR)
6263 && rtx_equal_p (XEXP (t, 1), f))
6264 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6265 else if (GET_CODE (t) == SIGN_EXTEND
6266 && (GET_CODE (XEXP (t, 0)) == PLUS
6267 || GET_CODE (XEXP (t, 0)) == MINUS
6268 || GET_CODE (XEXP (t, 0)) == IOR
6269 || GET_CODE (XEXP (t, 0)) == XOR
6270 || GET_CODE (XEXP (t, 0)) == ASHIFT
6271 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6272 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6273 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6274 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6275 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6276 && (num_sign_bit_copies (f, GET_MODE (f))
6277 > (unsigned int)
6278 (GET_MODE_PRECISION (mode)
6279 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6281 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6282 extend_op = SIGN_EXTEND;
6283 m = GET_MODE (XEXP (t, 0));
6285 else if (GET_CODE (t) == SIGN_EXTEND
6286 && (GET_CODE (XEXP (t, 0)) == PLUS
6287 || GET_CODE (XEXP (t, 0)) == IOR
6288 || GET_CODE (XEXP (t, 0)) == XOR)
6289 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6290 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6291 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6292 && (num_sign_bit_copies (f, GET_MODE (f))
6293 > (unsigned int)
6294 (GET_MODE_PRECISION (mode)
6295 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6297 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6298 extend_op = SIGN_EXTEND;
6299 m = GET_MODE (XEXP (t, 0));
6301 else if (GET_CODE (t) == ZERO_EXTEND
6302 && (GET_CODE (XEXP (t, 0)) == PLUS
6303 || GET_CODE (XEXP (t, 0)) == MINUS
6304 || GET_CODE (XEXP (t, 0)) == IOR
6305 || GET_CODE (XEXP (t, 0)) == XOR
6306 || GET_CODE (XEXP (t, 0)) == ASHIFT
6307 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6308 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6309 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6310 && HWI_COMPUTABLE_MODE_P (mode)
6311 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6312 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6313 && ((nonzero_bits (f, GET_MODE (f))
6314 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6315 == 0))
6317 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6318 extend_op = ZERO_EXTEND;
6319 m = GET_MODE (XEXP (t, 0));
6321 else if (GET_CODE (t) == ZERO_EXTEND
6322 && (GET_CODE (XEXP (t, 0)) == PLUS
6323 || GET_CODE (XEXP (t, 0)) == IOR
6324 || GET_CODE (XEXP (t, 0)) == XOR)
6325 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6326 && HWI_COMPUTABLE_MODE_P (mode)
6327 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6328 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6329 && ((nonzero_bits (f, GET_MODE (f))
6330 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6331 == 0))
6333 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6334 extend_op = ZERO_EXTEND;
6335 m = GET_MODE (XEXP (t, 0));
6338 if (z)
6340 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6341 cond_op0, cond_op1),
6342 pc_rtx, pc_rtx, 0, 0, 0);
6343 temp = simplify_gen_binary (MULT, m, temp,
6344 simplify_gen_binary (MULT, m, c1,
6345 const_true_rtx));
6346 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6347 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6349 if (extend_op != UNKNOWN)
6350 temp = simplify_gen_unary (extend_op, mode, temp, m);
6352 return temp;
6356 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6357 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6358 negation of a single bit, we can convert this operation to a shift. We
6359 can actually do this more generally, but it doesn't seem worth it. */
6361 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6362 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6363 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6364 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6365 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6366 == GET_MODE_PRECISION (mode))
6367 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6368 return
6369 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6370 gen_lowpart (mode, XEXP (cond, 0)), i);
6372 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6373 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6374 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6375 && GET_MODE (XEXP (cond, 0)) == mode
6376 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6377 == nonzero_bits (XEXP (cond, 0), mode)
6378 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6379 return XEXP (cond, 0);
6381 return x;
6384 /* Simplify X, a SET expression. Return the new expression. */
6386 static rtx
6387 simplify_set (rtx x)
6389 rtx src = SET_SRC (x);
6390 rtx dest = SET_DEST (x);
6391 machine_mode mode
6392 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6393 rtx_insn *other_insn;
6394 rtx *cc_use;
6396 /* (set (pc) (return)) gets written as (return). */
6397 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6398 return src;
6400 /* Now that we know for sure which bits of SRC we are using, see if we can
6401 simplify the expression for the object knowing that we only need the
6402 low-order bits. */
6404 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6406 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6407 SUBST (SET_SRC (x), src);
6410 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6411 the comparison result and try to simplify it unless we already have used
6412 undobuf.other_insn. */
6413 if ((GET_MODE_CLASS (mode) == MODE_CC
6414 || GET_CODE (src) == COMPARE
6415 || CC0_P (dest))
6416 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6417 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6418 && COMPARISON_P (*cc_use)
6419 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6421 enum rtx_code old_code = GET_CODE (*cc_use);
6422 enum rtx_code new_code;
6423 rtx op0, op1, tmp;
6424 int other_changed = 0;
6425 rtx inner_compare = NULL_RTX;
6426 machine_mode compare_mode = GET_MODE (dest);
6428 if (GET_CODE (src) == COMPARE)
6430 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6431 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6433 inner_compare = op0;
6434 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6437 else
6438 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6440 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6441 op0, op1);
6442 if (!tmp)
6443 new_code = old_code;
6444 else if (!CONSTANT_P (tmp))
6446 new_code = GET_CODE (tmp);
6447 op0 = XEXP (tmp, 0);
6448 op1 = XEXP (tmp, 1);
6450 else
6452 rtx pat = PATTERN (other_insn);
6453 undobuf.other_insn = other_insn;
6454 SUBST (*cc_use, tmp);
6456 /* Attempt to simplify CC user. */
6457 if (GET_CODE (pat) == SET)
6459 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6460 if (new_rtx != NULL_RTX)
6461 SUBST (SET_SRC (pat), new_rtx);
6464 /* Convert X into a no-op move. */
6465 SUBST (SET_DEST (x), pc_rtx);
6466 SUBST (SET_SRC (x), pc_rtx);
6467 return x;
6470 /* Simplify our comparison, if possible. */
6471 new_code = simplify_comparison (new_code, &op0, &op1);
6473 #ifdef SELECT_CC_MODE
6474 /* If this machine has CC modes other than CCmode, check to see if we
6475 need to use a different CC mode here. */
6476 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6477 compare_mode = GET_MODE (op0);
6478 else if (inner_compare
6479 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6480 && new_code == old_code
6481 && op0 == XEXP (inner_compare, 0)
6482 && op1 == XEXP (inner_compare, 1))
6483 compare_mode = GET_MODE (inner_compare);
6484 else
6485 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6487 #ifndef HAVE_cc0
6488 /* If the mode changed, we have to change SET_DEST, the mode in the
6489 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6490 a hard register, just build new versions with the proper mode. If it
6491 is a pseudo, we lose unless it is only time we set the pseudo, in
6492 which case we can safely change its mode. */
6493 if (compare_mode != GET_MODE (dest))
6495 if (can_change_dest_mode (dest, 0, compare_mode))
6497 unsigned int regno = REGNO (dest);
6498 rtx new_dest;
6500 if (regno < FIRST_PSEUDO_REGISTER)
6501 new_dest = gen_rtx_REG (compare_mode, regno);
6502 else
6504 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6505 new_dest = regno_reg_rtx[regno];
6508 SUBST (SET_DEST (x), new_dest);
6509 SUBST (XEXP (*cc_use, 0), new_dest);
6510 other_changed = 1;
6512 dest = new_dest;
6515 #endif /* cc0 */
6516 #endif /* SELECT_CC_MODE */
6518 /* If the code changed, we have to build a new comparison in
6519 undobuf.other_insn. */
6520 if (new_code != old_code)
6522 int other_changed_previously = other_changed;
6523 unsigned HOST_WIDE_INT mask;
6524 rtx old_cc_use = *cc_use;
6526 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6527 dest, const0_rtx));
6528 other_changed = 1;
6530 /* If the only change we made was to change an EQ into an NE or
6531 vice versa, OP0 has only one bit that might be nonzero, and OP1
6532 is zero, check if changing the user of the condition code will
6533 produce a valid insn. If it won't, we can keep the original code
6534 in that insn by surrounding our operation with an XOR. */
6536 if (((old_code == NE && new_code == EQ)
6537 || (old_code == EQ && new_code == NE))
6538 && ! other_changed_previously && op1 == const0_rtx
6539 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6540 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6542 rtx pat = PATTERN (other_insn), note = 0;
6544 if ((recog_for_combine (&pat, other_insn, &note) < 0
6545 && ! check_asm_operands (pat)))
6547 *cc_use = old_cc_use;
6548 other_changed = 0;
6550 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6551 gen_int_mode (mask,
6552 GET_MODE (op0)));
6557 if (other_changed)
6558 undobuf.other_insn = other_insn;
6560 /* Otherwise, if we didn't previously have a COMPARE in the
6561 correct mode, we need one. */
6562 if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6564 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6565 src = SET_SRC (x);
6567 else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6569 SUBST (SET_SRC (x), op0);
6570 src = SET_SRC (x);
6572 /* Otherwise, update the COMPARE if needed. */
6573 else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6575 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6576 src = SET_SRC (x);
6579 else
6581 /* Get SET_SRC in a form where we have placed back any
6582 compound expressions. Then do the checks below. */
6583 src = make_compound_operation (src, SET);
6584 SUBST (SET_SRC (x), src);
6587 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6588 and X being a REG or (subreg (reg)), we may be able to convert this to
6589 (set (subreg:m2 x) (op)).
6591 We can always do this if M1 is narrower than M2 because that means that
6592 we only care about the low bits of the result.
6594 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6595 perform a narrower operation than requested since the high-order bits will
6596 be undefined. On machine where it is defined, this transformation is safe
6597 as long as M1 and M2 have the same number of words. */
6599 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6600 && !OBJECT_P (SUBREG_REG (src))
6601 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6602 / UNITS_PER_WORD)
6603 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6604 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6605 #ifndef WORD_REGISTER_OPERATIONS
6606 && (GET_MODE_SIZE (GET_MODE (src))
6607 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6608 #endif
6609 #ifdef CANNOT_CHANGE_MODE_CLASS
6610 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6611 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6612 GET_MODE (SUBREG_REG (src)),
6613 GET_MODE (src)))
6614 #endif
6615 && (REG_P (dest)
6616 || (GET_CODE (dest) == SUBREG
6617 && REG_P (SUBREG_REG (dest)))))
6619 SUBST (SET_DEST (x),
6620 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6621 dest));
6622 SUBST (SET_SRC (x), SUBREG_REG (src));
6624 src = SET_SRC (x), dest = SET_DEST (x);
6627 #ifdef HAVE_cc0
6628 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6629 in SRC. */
6630 if (dest == cc0_rtx
6631 && GET_CODE (src) == SUBREG
6632 && subreg_lowpart_p (src)
6633 && (GET_MODE_PRECISION (GET_MODE (src))
6634 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6636 rtx inner = SUBREG_REG (src);
6637 machine_mode inner_mode = GET_MODE (inner);
6639 /* Here we make sure that we don't have a sign bit on. */
6640 if (val_signbit_known_clear_p (GET_MODE (src),
6641 nonzero_bits (inner, inner_mode)))
6643 SUBST (SET_SRC (x), inner);
6644 src = SET_SRC (x);
6647 #endif
6649 #ifdef LOAD_EXTEND_OP
6650 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6651 would require a paradoxical subreg. Replace the subreg with a
6652 zero_extend to avoid the reload that would otherwise be required. */
6654 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6655 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6656 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6657 && SUBREG_BYTE (src) == 0
6658 && paradoxical_subreg_p (src)
6659 && MEM_P (SUBREG_REG (src)))
6661 SUBST (SET_SRC (x),
6662 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6663 GET_MODE (src), SUBREG_REG (src)));
6665 src = SET_SRC (x);
6667 #endif
6669 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6670 are comparing an item known to be 0 or -1 against 0, use a logical
6671 operation instead. Check for one of the arms being an IOR of the other
6672 arm with some value. We compute three terms to be IOR'ed together. In
6673 practice, at most two will be nonzero. Then we do the IOR's. */
6675 if (GET_CODE (dest) != PC
6676 && GET_CODE (src) == IF_THEN_ELSE
6677 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6678 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6679 && XEXP (XEXP (src, 0), 1) == const0_rtx
6680 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6681 #ifdef HAVE_conditional_move
6682 && ! can_conditionally_move_p (GET_MODE (src))
6683 #endif
6684 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6685 GET_MODE (XEXP (XEXP (src, 0), 0)))
6686 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6687 && ! side_effects_p (src))
6689 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6690 ? XEXP (src, 1) : XEXP (src, 2));
6691 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6692 ? XEXP (src, 2) : XEXP (src, 1));
6693 rtx term1 = const0_rtx, term2, term3;
6695 if (GET_CODE (true_rtx) == IOR
6696 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6697 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6698 else if (GET_CODE (true_rtx) == IOR
6699 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6700 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6701 else if (GET_CODE (false_rtx) == IOR
6702 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6703 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6704 else if (GET_CODE (false_rtx) == IOR
6705 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6706 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6708 term2 = simplify_gen_binary (AND, GET_MODE (src),
6709 XEXP (XEXP (src, 0), 0), true_rtx);
6710 term3 = simplify_gen_binary (AND, GET_MODE (src),
6711 simplify_gen_unary (NOT, GET_MODE (src),
6712 XEXP (XEXP (src, 0), 0),
6713 GET_MODE (src)),
6714 false_rtx);
6716 SUBST (SET_SRC (x),
6717 simplify_gen_binary (IOR, GET_MODE (src),
6718 simplify_gen_binary (IOR, GET_MODE (src),
6719 term1, term2),
6720 term3));
6722 src = SET_SRC (x);
6725 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6726 whole thing fail. */
6727 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6728 return src;
6729 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6730 return dest;
6731 else
6732 /* Convert this into a field assignment operation, if possible. */
6733 return make_field_assignment (x);
6736 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6737 result. */
6739 static rtx
6740 simplify_logical (rtx x)
6742 machine_mode mode = GET_MODE (x);
6743 rtx op0 = XEXP (x, 0);
6744 rtx op1 = XEXP (x, 1);
6746 switch (GET_CODE (x))
6748 case AND:
6749 /* We can call simplify_and_const_int only if we don't lose
6750 any (sign) bits when converting INTVAL (op1) to
6751 "unsigned HOST_WIDE_INT". */
6752 if (CONST_INT_P (op1)
6753 && (HWI_COMPUTABLE_MODE_P (mode)
6754 || INTVAL (op1) > 0))
6756 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6757 if (GET_CODE (x) != AND)
6758 return x;
6760 op0 = XEXP (x, 0);
6761 op1 = XEXP (x, 1);
6764 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6765 apply the distributive law and then the inverse distributive
6766 law to see if things simplify. */
6767 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6769 rtx result = distribute_and_simplify_rtx (x, 0);
6770 if (result)
6771 return result;
6773 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6775 rtx result = distribute_and_simplify_rtx (x, 1);
6776 if (result)
6777 return result;
6779 break;
6781 case IOR:
6782 /* If we have (ior (and A B) C), apply the distributive law and then
6783 the inverse distributive law to see if things simplify. */
6785 if (GET_CODE (op0) == AND)
6787 rtx result = distribute_and_simplify_rtx (x, 0);
6788 if (result)
6789 return result;
6792 if (GET_CODE (op1) == AND)
6794 rtx result = distribute_and_simplify_rtx (x, 1);
6795 if (result)
6796 return result;
6798 break;
6800 default:
6801 gcc_unreachable ();
6804 return x;
6807 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6808 operations" because they can be replaced with two more basic operations.
6809 ZERO_EXTEND is also considered "compound" because it can be replaced with
6810 an AND operation, which is simpler, though only one operation.
6812 The function expand_compound_operation is called with an rtx expression
6813 and will convert it to the appropriate shifts and AND operations,
6814 simplifying at each stage.
6816 The function make_compound_operation is called to convert an expression
6817 consisting of shifts and ANDs into the equivalent compound expression.
6818 It is the inverse of this function, loosely speaking. */
6820 static rtx
6821 expand_compound_operation (rtx x)
6823 unsigned HOST_WIDE_INT pos = 0, len;
6824 int unsignedp = 0;
6825 unsigned int modewidth;
6826 rtx tem;
6828 switch (GET_CODE (x))
6830 case ZERO_EXTEND:
6831 unsignedp = 1;
6832 case SIGN_EXTEND:
6833 /* We can't necessarily use a const_int for a multiword mode;
6834 it depends on implicitly extending the value.
6835 Since we don't know the right way to extend it,
6836 we can't tell whether the implicit way is right.
6838 Even for a mode that is no wider than a const_int,
6839 we can't win, because we need to sign extend one of its bits through
6840 the rest of it, and we don't know which bit. */
6841 if (CONST_INT_P (XEXP (x, 0)))
6842 return x;
6844 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6845 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6846 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6847 reloaded. If not for that, MEM's would very rarely be safe.
6849 Reject MODEs bigger than a word, because we might not be able
6850 to reference a two-register group starting with an arbitrary register
6851 (and currently gen_lowpart might crash for a SUBREG). */
6853 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6854 return x;
6856 /* Reject MODEs that aren't scalar integers because turning vector
6857 or complex modes into shifts causes problems. */
6859 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6860 return x;
6862 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6863 /* If the inner object has VOIDmode (the only way this can happen
6864 is if it is an ASM_OPERANDS), we can't do anything since we don't
6865 know how much masking to do. */
6866 if (len == 0)
6867 return x;
6869 break;
6871 case ZERO_EXTRACT:
6872 unsignedp = 1;
6874 /* ... fall through ... */
6876 case SIGN_EXTRACT:
6877 /* If the operand is a CLOBBER, just return it. */
6878 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6879 return XEXP (x, 0);
6881 if (!CONST_INT_P (XEXP (x, 1))
6882 || !CONST_INT_P (XEXP (x, 2))
6883 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6884 return x;
6886 /* Reject MODEs that aren't scalar integers because turning vector
6887 or complex modes into shifts causes problems. */
6889 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6890 return x;
6892 len = INTVAL (XEXP (x, 1));
6893 pos = INTVAL (XEXP (x, 2));
6895 /* This should stay within the object being extracted, fail otherwise. */
6896 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
6897 return x;
6899 if (BITS_BIG_ENDIAN)
6900 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
6902 break;
6904 default:
6905 return x;
6907 /* Convert sign extension to zero extension, if we know that the high
6908 bit is not set, as this is easier to optimize. It will be converted
6909 back to cheaper alternative in make_extraction. */
6910 if (GET_CODE (x) == SIGN_EXTEND
6911 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6912 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6913 & ~(((unsigned HOST_WIDE_INT)
6914 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6915 >> 1))
6916 == 0)))
6918 rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6919 rtx temp2 = expand_compound_operation (temp);
6921 /* Make sure this is a profitable operation. */
6922 if (set_src_cost (x, optimize_this_for_speed_p)
6923 > set_src_cost (temp2, optimize_this_for_speed_p))
6924 return temp2;
6925 else if (set_src_cost (x, optimize_this_for_speed_p)
6926 > set_src_cost (temp, optimize_this_for_speed_p))
6927 return temp;
6928 else
6929 return x;
6932 /* We can optimize some special cases of ZERO_EXTEND. */
6933 if (GET_CODE (x) == ZERO_EXTEND)
6935 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6936 know that the last value didn't have any inappropriate bits
6937 set. */
6938 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6939 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6940 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6941 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6942 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6943 return XEXP (XEXP (x, 0), 0);
6945 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6946 if (GET_CODE (XEXP (x, 0)) == SUBREG
6947 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6948 && subreg_lowpart_p (XEXP (x, 0))
6949 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6950 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6951 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6952 return SUBREG_REG (XEXP (x, 0));
6954 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6955 is a comparison and STORE_FLAG_VALUE permits. This is like
6956 the first case, but it works even when GET_MODE (x) is larger
6957 than HOST_WIDE_INT. */
6958 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6959 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6960 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6961 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6962 <= HOST_BITS_PER_WIDE_INT)
6963 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6964 return XEXP (XEXP (x, 0), 0);
6966 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
6967 if (GET_CODE (XEXP (x, 0)) == SUBREG
6968 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6969 && subreg_lowpart_p (XEXP (x, 0))
6970 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6971 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6972 <= HOST_BITS_PER_WIDE_INT)
6973 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6974 return SUBREG_REG (XEXP (x, 0));
6978 /* If we reach here, we want to return a pair of shifts. The inner
6979 shift is a left shift of BITSIZE - POS - LEN bits. The outer
6980 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
6981 logical depending on the value of UNSIGNEDP.
6983 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6984 converted into an AND of a shift.
6986 We must check for the case where the left shift would have a negative
6987 count. This can happen in a case like (x >> 31) & 255 on machines
6988 that can't shift by a constant. On those machines, we would first
6989 combine the shift with the AND to produce a variable-position
6990 extraction. Then the constant of 31 would be substituted in
6991 to produce such a position. */
6993 modewidth = GET_MODE_PRECISION (GET_MODE (x));
6994 if (modewidth >= pos + len)
6996 machine_mode mode = GET_MODE (x);
6997 tem = gen_lowpart (mode, XEXP (x, 0));
6998 if (!tem || GET_CODE (tem) == CLOBBER)
6999 return x;
7000 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7001 tem, modewidth - pos - len);
7002 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7003 mode, tem, modewidth - len);
7005 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7006 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7007 simplify_shift_const (NULL_RTX, LSHIFTRT,
7008 GET_MODE (x),
7009 XEXP (x, 0), pos),
7010 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
7011 else
7012 /* Any other cases we can't handle. */
7013 return x;
7015 /* If we couldn't do this for some reason, return the original
7016 expression. */
7017 if (GET_CODE (tem) == CLOBBER)
7018 return x;
7020 return tem;
7023 /* X is a SET which contains an assignment of one object into
7024 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7025 or certain SUBREGS). If possible, convert it into a series of
7026 logical operations.
7028 We half-heartedly support variable positions, but do not at all
7029 support variable lengths. */
7031 static const_rtx
7032 expand_field_assignment (const_rtx x)
7034 rtx inner;
7035 rtx pos; /* Always counts from low bit. */
7036 int len;
7037 rtx mask, cleared, masked;
7038 machine_mode compute_mode;
7040 /* Loop until we find something we can't simplify. */
7041 while (1)
7043 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7044 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7046 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7047 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7048 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7050 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7051 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7053 inner = XEXP (SET_DEST (x), 0);
7054 len = INTVAL (XEXP (SET_DEST (x), 1));
7055 pos = XEXP (SET_DEST (x), 2);
7057 /* A constant position should stay within the width of INNER. */
7058 if (CONST_INT_P (pos)
7059 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7060 break;
7062 if (BITS_BIG_ENDIAN)
7064 if (CONST_INT_P (pos))
7065 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7066 - INTVAL (pos));
7067 else if (GET_CODE (pos) == MINUS
7068 && CONST_INT_P (XEXP (pos, 1))
7069 && (INTVAL (XEXP (pos, 1))
7070 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7071 /* If position is ADJUST - X, new position is X. */
7072 pos = XEXP (pos, 0);
7073 else
7075 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7076 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7077 gen_int_mode (prec - len,
7078 GET_MODE (pos)),
7079 pos);
7084 /* A SUBREG between two modes that occupy the same numbers of words
7085 can be done by moving the SUBREG to the source. */
7086 else if (GET_CODE (SET_DEST (x)) == SUBREG
7087 /* We need SUBREGs to compute nonzero_bits properly. */
7088 && nonzero_sign_valid
7089 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7090 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7091 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7092 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7094 x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
7095 gen_lowpart
7096 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7097 SET_SRC (x)));
7098 continue;
7100 else
7101 break;
7103 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7104 inner = SUBREG_REG (inner);
7106 compute_mode = GET_MODE (inner);
7108 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7109 if (! SCALAR_INT_MODE_P (compute_mode))
7111 machine_mode imode;
7113 /* Don't do anything for vector or complex integral types. */
7114 if (! FLOAT_MODE_P (compute_mode))
7115 break;
7117 /* Try to find an integral mode to pun with. */
7118 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7119 if (imode == BLKmode)
7120 break;
7122 compute_mode = imode;
7123 inner = gen_lowpart (imode, inner);
7126 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7127 if (len >= HOST_BITS_PER_WIDE_INT)
7128 break;
7130 /* Now compute the equivalent expression. Make a copy of INNER
7131 for the SET_DEST in case it is a MEM into which we will substitute;
7132 we don't want shared RTL in that case. */
7133 mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << len) - 1,
7134 compute_mode);
7135 cleared = simplify_gen_binary (AND, compute_mode,
7136 simplify_gen_unary (NOT, compute_mode,
7137 simplify_gen_binary (ASHIFT,
7138 compute_mode,
7139 mask, pos),
7140 compute_mode),
7141 inner);
7142 masked = simplify_gen_binary (ASHIFT, compute_mode,
7143 simplify_gen_binary (
7144 AND, compute_mode,
7145 gen_lowpart (compute_mode, SET_SRC (x)),
7146 mask),
7147 pos);
7149 x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
7150 simplify_gen_binary (IOR, compute_mode,
7151 cleared, masked));
7154 return x;
7157 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7158 it is an RTX that represents the (variable) starting position; otherwise,
7159 POS is the (constant) starting bit position. Both are counted from the LSB.
7161 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7163 IN_DEST is nonzero if this is a reference in the destination of a SET.
7164 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7165 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7166 be used.
7168 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7169 ZERO_EXTRACT should be built even for bits starting at bit 0.
7171 MODE is the desired mode of the result (if IN_DEST == 0).
7173 The result is an RTX for the extraction or NULL_RTX if the target
7174 can't handle it. */
7176 static rtx
7177 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7178 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7179 int in_dest, int in_compare)
7181 /* This mode describes the size of the storage area
7182 to fetch the overall value from. Within that, we
7183 ignore the POS lowest bits, etc. */
7184 machine_mode is_mode = GET_MODE (inner);
7185 machine_mode inner_mode;
7186 machine_mode wanted_inner_mode;
7187 machine_mode wanted_inner_reg_mode = word_mode;
7188 machine_mode pos_mode = word_mode;
7189 machine_mode extraction_mode = word_mode;
7190 machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7191 rtx new_rtx = 0;
7192 rtx orig_pos_rtx = pos_rtx;
7193 HOST_WIDE_INT orig_pos;
7195 if (pos_rtx && CONST_INT_P (pos_rtx))
7196 pos = INTVAL (pos_rtx), pos_rtx = 0;
7198 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7200 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7201 consider just the QI as the memory to extract from.
7202 The subreg adds or removes high bits; its mode is
7203 irrelevant to the meaning of this extraction,
7204 since POS and LEN count from the lsb. */
7205 if (MEM_P (SUBREG_REG (inner)))
7206 is_mode = GET_MODE (SUBREG_REG (inner));
7207 inner = SUBREG_REG (inner);
7209 else if (GET_CODE (inner) == ASHIFT
7210 && CONST_INT_P (XEXP (inner, 1))
7211 && pos_rtx == 0 && pos == 0
7212 && len > UINTVAL (XEXP (inner, 1)))
7214 /* We're extracting the least significant bits of an rtx
7215 (ashift X (const_int C)), where LEN > C. Extract the
7216 least significant (LEN - C) bits of X, giving an rtx
7217 whose mode is MODE, then shift it left C times. */
7218 new_rtx = make_extraction (mode, XEXP (inner, 0),
7219 0, 0, len - INTVAL (XEXP (inner, 1)),
7220 unsignedp, in_dest, in_compare);
7221 if (new_rtx != 0)
7222 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7224 else if (GET_CODE (inner) == TRUNCATE)
7225 inner = XEXP (inner, 0);
7227 inner_mode = GET_MODE (inner);
7229 /* See if this can be done without an extraction. We never can if the
7230 width of the field is not the same as that of some integer mode. For
7231 registers, we can only avoid the extraction if the position is at the
7232 low-order bit and this is either not in the destination or we have the
7233 appropriate STRICT_LOW_PART operation available.
7235 For MEM, we can avoid an extract if the field starts on an appropriate
7236 boundary and we can change the mode of the memory reference. */
7238 if (tmode != BLKmode
7239 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7240 && !MEM_P (inner)
7241 && (inner_mode == tmode
7242 || !REG_P (inner)
7243 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7244 || reg_truncated_to_mode (tmode, inner))
7245 && (! in_dest
7246 || (REG_P (inner)
7247 && have_insn_for (STRICT_LOW_PART, tmode))))
7248 || (MEM_P (inner) && pos_rtx == 0
7249 && (pos
7250 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7251 : BITS_PER_UNIT)) == 0
7252 /* We can't do this if we are widening INNER_MODE (it
7253 may not be aligned, for one thing). */
7254 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7255 && (inner_mode == tmode
7256 || (! mode_dependent_address_p (XEXP (inner, 0),
7257 MEM_ADDR_SPACE (inner))
7258 && ! MEM_VOLATILE_P (inner))))))
7260 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7261 field. If the original and current mode are the same, we need not
7262 adjust the offset. Otherwise, we do if bytes big endian.
7264 If INNER is not a MEM, get a piece consisting of just the field
7265 of interest (in this case POS % BITS_PER_WORD must be 0). */
7267 if (MEM_P (inner))
7269 HOST_WIDE_INT offset;
7271 /* POS counts from lsb, but make OFFSET count in memory order. */
7272 if (BYTES_BIG_ENDIAN)
7273 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7274 else
7275 offset = pos / BITS_PER_UNIT;
7277 new_rtx = adjust_address_nv (inner, tmode, offset);
7279 else if (REG_P (inner))
7281 if (tmode != inner_mode)
7283 /* We can't call gen_lowpart in a DEST since we
7284 always want a SUBREG (see below) and it would sometimes
7285 return a new hard register. */
7286 if (pos || in_dest)
7288 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7290 if (WORDS_BIG_ENDIAN
7291 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7292 final_word = ((GET_MODE_SIZE (inner_mode)
7293 - GET_MODE_SIZE (tmode))
7294 / UNITS_PER_WORD) - final_word;
7296 final_word *= UNITS_PER_WORD;
7297 if (BYTES_BIG_ENDIAN &&
7298 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7299 final_word += (GET_MODE_SIZE (inner_mode)
7300 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7302 /* Avoid creating invalid subregs, for example when
7303 simplifying (x>>32)&255. */
7304 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7305 return NULL_RTX;
7307 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7309 else
7310 new_rtx = gen_lowpart (tmode, inner);
7312 else
7313 new_rtx = inner;
7315 else
7316 new_rtx = force_to_mode (inner, tmode,
7317 len >= HOST_BITS_PER_WIDE_INT
7318 ? ~(unsigned HOST_WIDE_INT) 0
7319 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7322 /* If this extraction is going into the destination of a SET,
7323 make a STRICT_LOW_PART unless we made a MEM. */
7325 if (in_dest)
7326 return (MEM_P (new_rtx) ? new_rtx
7327 : (GET_CODE (new_rtx) != SUBREG
7328 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7329 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7331 if (mode == tmode)
7332 return new_rtx;
7334 if (CONST_SCALAR_INT_P (new_rtx))
7335 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7336 mode, new_rtx, tmode);
7338 /* If we know that no extraneous bits are set, and that the high
7339 bit is not set, convert the extraction to the cheaper of
7340 sign and zero extension, that are equivalent in these cases. */
7341 if (flag_expensive_optimizations
7342 && (HWI_COMPUTABLE_MODE_P (tmode)
7343 && ((nonzero_bits (new_rtx, tmode)
7344 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7345 == 0)))
7347 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7348 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7350 /* Prefer ZERO_EXTENSION, since it gives more information to
7351 backends. */
7352 if (set_src_cost (temp, optimize_this_for_speed_p)
7353 <= set_src_cost (temp1, optimize_this_for_speed_p))
7354 return temp;
7355 return temp1;
7358 /* Otherwise, sign- or zero-extend unless we already are in the
7359 proper mode. */
7361 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7362 mode, new_rtx));
7365 /* Unless this is a COMPARE or we have a funny memory reference,
7366 don't do anything with zero-extending field extracts starting at
7367 the low-order bit since they are simple AND operations. */
7368 if (pos_rtx == 0 && pos == 0 && ! in_dest
7369 && ! in_compare && unsignedp)
7370 return 0;
7372 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7373 if the position is not a constant and the length is not 1. In all
7374 other cases, we would only be going outside our object in cases when
7375 an original shift would have been undefined. */
7376 if (MEM_P (inner)
7377 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7378 || (pos_rtx != 0 && len != 1)))
7379 return 0;
7381 enum extraction_pattern pattern = (in_dest ? EP_insv
7382 : unsignedp ? EP_extzv : EP_extv);
7384 /* If INNER is not from memory, we want it to have the mode of a register
7385 extraction pattern's structure operand, or word_mode if there is no
7386 such pattern. The same applies to extraction_mode and pos_mode
7387 and their respective operands.
7389 For memory, assume that the desired extraction_mode and pos_mode
7390 are the same as for a register operation, since at present we don't
7391 have named patterns for aligned memory structures. */
7392 struct extraction_insn insn;
7393 if (get_best_reg_extraction_insn (&insn, pattern,
7394 GET_MODE_BITSIZE (inner_mode), mode))
7396 wanted_inner_reg_mode = insn.struct_mode;
7397 pos_mode = insn.pos_mode;
7398 extraction_mode = insn.field_mode;
7401 /* Never narrow an object, since that might not be safe. */
7403 if (mode != VOIDmode
7404 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7405 extraction_mode = mode;
7407 if (!MEM_P (inner))
7408 wanted_inner_mode = wanted_inner_reg_mode;
7409 else
7411 /* Be careful not to go beyond the extracted object and maintain the
7412 natural alignment of the memory. */
7413 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7414 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7415 > GET_MODE_BITSIZE (wanted_inner_mode))
7417 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7418 gcc_assert (wanted_inner_mode != VOIDmode);
7422 orig_pos = pos;
7424 if (BITS_BIG_ENDIAN)
7426 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7427 BITS_BIG_ENDIAN style. If position is constant, compute new
7428 position. Otherwise, build subtraction.
7429 Note that POS is relative to the mode of the original argument.
7430 If it's a MEM we need to recompute POS relative to that.
7431 However, if we're extracting from (or inserting into) a register,
7432 we want to recompute POS relative to wanted_inner_mode. */
7433 int width = (MEM_P (inner)
7434 ? GET_MODE_BITSIZE (is_mode)
7435 : GET_MODE_BITSIZE (wanted_inner_mode));
7437 if (pos_rtx == 0)
7438 pos = width - len - pos;
7439 else
7440 pos_rtx
7441 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7442 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7443 pos_rtx);
7444 /* POS may be less than 0 now, but we check for that below.
7445 Note that it can only be less than 0 if !MEM_P (inner). */
7448 /* If INNER has a wider mode, and this is a constant extraction, try to
7449 make it smaller and adjust the byte to point to the byte containing
7450 the value. */
7451 if (wanted_inner_mode != VOIDmode
7452 && inner_mode != wanted_inner_mode
7453 && ! pos_rtx
7454 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7455 && MEM_P (inner)
7456 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7457 && ! MEM_VOLATILE_P (inner))
7459 int offset = 0;
7461 /* The computations below will be correct if the machine is big
7462 endian in both bits and bytes or little endian in bits and bytes.
7463 If it is mixed, we must adjust. */
7465 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7466 adjust OFFSET to compensate. */
7467 if (BYTES_BIG_ENDIAN
7468 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7469 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7471 /* We can now move to the desired byte. */
7472 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7473 * GET_MODE_SIZE (wanted_inner_mode);
7474 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7476 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7477 && is_mode != wanted_inner_mode)
7478 offset = (GET_MODE_SIZE (is_mode)
7479 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7481 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7484 /* If INNER is not memory, get it into the proper mode. If we are changing
7485 its mode, POS must be a constant and smaller than the size of the new
7486 mode. */
7487 else if (!MEM_P (inner))
7489 /* On the LHS, don't create paradoxical subregs implicitely truncating
7490 the register unless TRULY_NOOP_TRUNCATION. */
7491 if (in_dest
7492 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7493 wanted_inner_mode))
7494 return NULL_RTX;
7496 if (GET_MODE (inner) != wanted_inner_mode
7497 && (pos_rtx != 0
7498 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7499 return NULL_RTX;
7501 if (orig_pos < 0)
7502 return NULL_RTX;
7504 inner = force_to_mode (inner, wanted_inner_mode,
7505 pos_rtx
7506 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7507 ? ~(unsigned HOST_WIDE_INT) 0
7508 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7509 << orig_pos),
7513 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7514 have to zero extend. Otherwise, we can just use a SUBREG. */
7515 if (pos_rtx != 0
7516 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7518 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7519 GET_MODE (pos_rtx));
7521 /* If we know that no extraneous bits are set, and that the high
7522 bit is not set, convert extraction to cheaper one - either
7523 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7524 cases. */
7525 if (flag_expensive_optimizations
7526 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7527 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7528 & ~(((unsigned HOST_WIDE_INT)
7529 GET_MODE_MASK (GET_MODE (pos_rtx)))
7530 >> 1))
7531 == 0)))
7533 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7534 GET_MODE (pos_rtx));
7536 /* Prefer ZERO_EXTENSION, since it gives more information to
7537 backends. */
7538 if (set_src_cost (temp1, optimize_this_for_speed_p)
7539 < set_src_cost (temp, optimize_this_for_speed_p))
7540 temp = temp1;
7542 pos_rtx = temp;
7545 /* Make POS_RTX unless we already have it and it is correct. If we don't
7546 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7547 be a CONST_INT. */
7548 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7549 pos_rtx = orig_pos_rtx;
7551 else if (pos_rtx == 0)
7552 pos_rtx = GEN_INT (pos);
7554 /* Make the required operation. See if we can use existing rtx. */
7555 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7556 extraction_mode, inner, GEN_INT (len), pos_rtx);
7557 if (! in_dest)
7558 new_rtx = gen_lowpart (mode, new_rtx);
7560 return new_rtx;
7563 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7564 with any other operations in X. Return X without that shift if so. */
7566 static rtx
7567 extract_left_shift (rtx x, int count)
7569 enum rtx_code code = GET_CODE (x);
7570 machine_mode mode = GET_MODE (x);
7571 rtx tem;
7573 switch (code)
7575 case ASHIFT:
7576 /* This is the shift itself. If it is wide enough, we will return
7577 either the value being shifted if the shift count is equal to
7578 COUNT or a shift for the difference. */
7579 if (CONST_INT_P (XEXP (x, 1))
7580 && INTVAL (XEXP (x, 1)) >= count)
7581 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7582 INTVAL (XEXP (x, 1)) - count);
7583 break;
7585 case NEG: case NOT:
7586 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7587 return simplify_gen_unary (code, mode, tem, mode);
7589 break;
7591 case PLUS: case IOR: case XOR: case AND:
7592 /* If we can safely shift this constant and we find the inner shift,
7593 make a new operation. */
7594 if (CONST_INT_P (XEXP (x, 1))
7595 && (UINTVAL (XEXP (x, 1))
7596 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7597 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7599 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7600 return simplify_gen_binary (code, mode, tem,
7601 gen_int_mode (val, mode));
7603 break;
7605 default:
7606 break;
7609 return 0;
7612 /* Look at the expression rooted at X. Look for expressions
7613 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7614 Form these expressions.
7616 Return the new rtx, usually just X.
7618 Also, for machines like the VAX that don't have logical shift insns,
7619 try to convert logical to arithmetic shift operations in cases where
7620 they are equivalent. This undoes the canonicalizations to logical
7621 shifts done elsewhere.
7623 We try, as much as possible, to re-use rtl expressions to save memory.
7625 IN_CODE says what kind of expression we are processing. Normally, it is
7626 SET. In a memory address (inside a MEM, PLUS or minus, the latter two
7627 being kludges), it is MEM. When processing the arguments of a comparison
7628 or a COMPARE against zero, it is COMPARE. */
7631 make_compound_operation (rtx x, enum rtx_code in_code)
7633 enum rtx_code code = GET_CODE (x);
7634 machine_mode mode = GET_MODE (x);
7635 int mode_width = GET_MODE_PRECISION (mode);
7636 rtx rhs, lhs;
7637 enum rtx_code next_code;
7638 int i, j;
7639 rtx new_rtx = 0;
7640 rtx tem;
7641 const char *fmt;
7643 /* Select the code to be used in recursive calls. Once we are inside an
7644 address, we stay there. If we have a comparison, set to COMPARE,
7645 but once inside, go back to our default of SET. */
7647 next_code = (code == MEM ? MEM
7648 : ((code == PLUS || code == MINUS)
7649 && SCALAR_INT_MODE_P (mode)) ? MEM
7650 : ((code == COMPARE || COMPARISON_P (x))
7651 && XEXP (x, 1) == const0_rtx) ? COMPARE
7652 : in_code == COMPARE ? SET : in_code);
7654 /* Process depending on the code of this operation. If NEW is set
7655 nonzero, it will be returned. */
7657 switch (code)
7659 case ASHIFT:
7660 /* Convert shifts by constants into multiplications if inside
7661 an address. */
7662 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7663 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7664 && INTVAL (XEXP (x, 1)) >= 0
7665 && SCALAR_INT_MODE_P (mode))
7667 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7668 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7670 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7671 if (GET_CODE (new_rtx) == NEG)
7673 new_rtx = XEXP (new_rtx, 0);
7674 multval = -multval;
7676 multval = trunc_int_for_mode (multval, mode);
7677 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7679 break;
7681 case PLUS:
7682 lhs = XEXP (x, 0);
7683 rhs = XEXP (x, 1);
7684 lhs = make_compound_operation (lhs, next_code);
7685 rhs = make_compound_operation (rhs, next_code);
7686 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7687 && SCALAR_INT_MODE_P (mode))
7689 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7690 XEXP (lhs, 1));
7691 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7693 else if (GET_CODE (lhs) == MULT
7694 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7696 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7697 simplify_gen_unary (NEG, mode,
7698 XEXP (lhs, 1),
7699 mode));
7700 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7702 else
7704 SUBST (XEXP (x, 0), lhs);
7705 SUBST (XEXP (x, 1), rhs);
7706 goto maybe_swap;
7708 x = gen_lowpart (mode, new_rtx);
7709 goto maybe_swap;
7711 case MINUS:
7712 lhs = XEXP (x, 0);
7713 rhs = XEXP (x, 1);
7714 lhs = make_compound_operation (lhs, next_code);
7715 rhs = make_compound_operation (rhs, next_code);
7716 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7717 && SCALAR_INT_MODE_P (mode))
7719 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7720 XEXP (rhs, 1));
7721 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7723 else if (GET_CODE (rhs) == MULT
7724 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7726 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7727 simplify_gen_unary (NEG, mode,
7728 XEXP (rhs, 1),
7729 mode));
7730 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7732 else
7734 SUBST (XEXP (x, 0), lhs);
7735 SUBST (XEXP (x, 1), rhs);
7736 return x;
7738 return gen_lowpart (mode, new_rtx);
7740 case AND:
7741 /* If the second operand is not a constant, we can't do anything
7742 with it. */
7743 if (!CONST_INT_P (XEXP (x, 1)))
7744 break;
7746 /* If the constant is a power of two minus one and the first operand
7747 is a logical right shift, make an extraction. */
7748 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7749 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7751 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7752 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7753 0, in_code == COMPARE);
7756 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7757 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7758 && subreg_lowpart_p (XEXP (x, 0))
7759 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7760 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7762 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7763 next_code);
7764 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7765 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7766 0, in_code == COMPARE);
7768 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7769 else if ((GET_CODE (XEXP (x, 0)) == XOR
7770 || GET_CODE (XEXP (x, 0)) == IOR)
7771 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7772 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7773 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7775 /* Apply the distributive law, and then try to make extractions. */
7776 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7777 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7778 XEXP (x, 1)),
7779 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7780 XEXP (x, 1)));
7781 new_rtx = make_compound_operation (new_rtx, in_code);
7784 /* If we are have (and (rotate X C) M) and C is larger than the number
7785 of bits in M, this is an extraction. */
7787 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7788 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7789 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7790 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7792 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7793 new_rtx = make_extraction (mode, new_rtx,
7794 (GET_MODE_PRECISION (mode)
7795 - INTVAL (XEXP (XEXP (x, 0), 1))),
7796 NULL_RTX, i, 1, 0, in_code == COMPARE);
7799 /* On machines without logical shifts, if the operand of the AND is
7800 a logical shift and our mask turns off all the propagated sign
7801 bits, we can replace the logical shift with an arithmetic shift. */
7802 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7803 && !have_insn_for (LSHIFTRT, mode)
7804 && have_insn_for (ASHIFTRT, mode)
7805 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7806 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7807 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7808 && mode_width <= HOST_BITS_PER_WIDE_INT)
7810 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7812 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7813 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7814 SUBST (XEXP (x, 0),
7815 gen_rtx_ASHIFTRT (mode,
7816 make_compound_operation
7817 (XEXP (XEXP (x, 0), 0), next_code),
7818 XEXP (XEXP (x, 0), 1)));
7821 /* If the constant is one less than a power of two, this might be
7822 representable by an extraction even if no shift is present.
7823 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7824 we are in a COMPARE. */
7825 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7826 new_rtx = make_extraction (mode,
7827 make_compound_operation (XEXP (x, 0),
7828 next_code),
7829 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7831 /* If we are in a comparison and this is an AND with a power of two,
7832 convert this into the appropriate bit extract. */
7833 else if (in_code == COMPARE
7834 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7835 new_rtx = make_extraction (mode,
7836 make_compound_operation (XEXP (x, 0),
7837 next_code),
7838 i, NULL_RTX, 1, 1, 0, 1);
7840 break;
7842 case LSHIFTRT:
7843 /* If the sign bit is known to be zero, replace this with an
7844 arithmetic shift. */
7845 if (have_insn_for (ASHIFTRT, mode)
7846 && ! have_insn_for (LSHIFTRT, mode)
7847 && mode_width <= HOST_BITS_PER_WIDE_INT
7848 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7850 new_rtx = gen_rtx_ASHIFTRT (mode,
7851 make_compound_operation (XEXP (x, 0),
7852 next_code),
7853 XEXP (x, 1));
7854 break;
7857 /* ... fall through ... */
7859 case ASHIFTRT:
7860 lhs = XEXP (x, 0);
7861 rhs = XEXP (x, 1);
7863 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7864 this is a SIGN_EXTRACT. */
7865 if (CONST_INT_P (rhs)
7866 && GET_CODE (lhs) == ASHIFT
7867 && CONST_INT_P (XEXP (lhs, 1))
7868 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7869 && INTVAL (XEXP (lhs, 1)) >= 0
7870 && INTVAL (rhs) < mode_width)
7872 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7873 new_rtx = make_extraction (mode, new_rtx,
7874 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7875 NULL_RTX, mode_width - INTVAL (rhs),
7876 code == LSHIFTRT, 0, in_code == COMPARE);
7877 break;
7880 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7881 If so, try to merge the shifts into a SIGN_EXTEND. We could
7882 also do this for some cases of SIGN_EXTRACT, but it doesn't
7883 seem worth the effort; the case checked for occurs on Alpha. */
7885 if (!OBJECT_P (lhs)
7886 && ! (GET_CODE (lhs) == SUBREG
7887 && (OBJECT_P (SUBREG_REG (lhs))))
7888 && CONST_INT_P (rhs)
7889 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7890 && INTVAL (rhs) < mode_width
7891 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7892 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7893 0, NULL_RTX, mode_width - INTVAL (rhs),
7894 code == LSHIFTRT, 0, in_code == COMPARE);
7896 break;
7898 case SUBREG:
7899 /* Call ourselves recursively on the inner expression. If we are
7900 narrowing the object and it has a different RTL code from
7901 what it originally did, do this SUBREG as a force_to_mode. */
7903 rtx inner = SUBREG_REG (x), simplified;
7904 enum rtx_code subreg_code = in_code;
7906 /* If in_code is COMPARE, it isn't always safe to pass it through
7907 to the recursive make_compound_operation call. */
7908 if (subreg_code == COMPARE
7909 && (!subreg_lowpart_p (x)
7910 || GET_CODE (inner) == SUBREG
7911 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
7912 is (const_int 0), rather than
7913 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
7914 || (GET_CODE (inner) == AND
7915 && CONST_INT_P (XEXP (inner, 1))
7916 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7917 && exact_log2 (UINTVAL (XEXP (inner, 1)))
7918 >= GET_MODE_BITSIZE (mode))))
7919 subreg_code = SET;
7921 tem = make_compound_operation (inner, subreg_code);
7923 simplified
7924 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7925 if (simplified)
7926 tem = simplified;
7928 if (GET_CODE (tem) != GET_CODE (inner)
7929 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7930 && subreg_lowpart_p (x))
7932 rtx newer
7933 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7935 /* If we have something other than a SUBREG, we might have
7936 done an expansion, so rerun ourselves. */
7937 if (GET_CODE (newer) != SUBREG)
7938 newer = make_compound_operation (newer, in_code);
7940 /* force_to_mode can expand compounds. If it just re-expanded the
7941 compound, use gen_lowpart to convert to the desired mode. */
7942 if (rtx_equal_p (newer, x)
7943 /* Likewise if it re-expanded the compound only partially.
7944 This happens for SUBREG of ZERO_EXTRACT if they extract
7945 the same number of bits. */
7946 || (GET_CODE (newer) == SUBREG
7947 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7948 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7949 && GET_CODE (inner) == AND
7950 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7951 return gen_lowpart (GET_MODE (x), tem);
7953 return newer;
7956 if (simplified)
7957 return tem;
7959 break;
7961 default:
7962 break;
7965 if (new_rtx)
7967 x = gen_lowpart (mode, new_rtx);
7968 code = GET_CODE (x);
7971 /* Now recursively process each operand of this operation. We need to
7972 handle ZERO_EXTEND specially so that we don't lose track of the
7973 inner mode. */
7974 if (GET_CODE (x) == ZERO_EXTEND)
7976 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7977 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
7978 new_rtx, GET_MODE (XEXP (x, 0)));
7979 if (tem)
7980 return tem;
7981 SUBST (XEXP (x, 0), new_rtx);
7982 return x;
7985 fmt = GET_RTX_FORMAT (code);
7986 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7987 if (fmt[i] == 'e')
7989 new_rtx = make_compound_operation (XEXP (x, i), next_code);
7990 SUBST (XEXP (x, i), new_rtx);
7992 else if (fmt[i] == 'E')
7993 for (j = 0; j < XVECLEN (x, i); j++)
7995 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7996 SUBST (XVECEXP (x, i, j), new_rtx);
7999 maybe_swap:
8000 /* If this is a commutative operation, the changes to the operands
8001 may have made it noncanonical. */
8002 if (COMMUTATIVE_ARITH_P (x)
8003 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
8005 tem = XEXP (x, 0);
8006 SUBST (XEXP (x, 0), XEXP (x, 1));
8007 SUBST (XEXP (x, 1), tem);
8010 return x;
8013 /* Given M see if it is a value that would select a field of bits
8014 within an item, but not the entire word. Return -1 if not.
8015 Otherwise, return the starting position of the field, where 0 is the
8016 low-order bit.
8018 *PLEN is set to the length of the field. */
8020 static int
8021 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8023 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8024 int pos = m ? ctz_hwi (m) : -1;
8025 int len = 0;
8027 if (pos >= 0)
8028 /* Now shift off the low-order zero bits and see if we have a
8029 power of two minus 1. */
8030 len = exact_log2 ((m >> pos) + 1);
8032 if (len <= 0)
8033 pos = -1;
8035 *plen = len;
8036 return pos;
8039 /* If X refers to a register that equals REG in value, replace these
8040 references with REG. */
8041 static rtx
8042 canon_reg_for_combine (rtx x, rtx reg)
8044 rtx op0, op1, op2;
8045 const char *fmt;
8046 int i;
8047 bool copied;
8049 enum rtx_code code = GET_CODE (x);
8050 switch (GET_RTX_CLASS (code))
8052 case RTX_UNARY:
8053 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8054 if (op0 != XEXP (x, 0))
8055 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8056 GET_MODE (reg));
8057 break;
8059 case RTX_BIN_ARITH:
8060 case RTX_COMM_ARITH:
8061 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8062 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8063 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8064 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8065 break;
8067 case RTX_COMPARE:
8068 case RTX_COMM_COMPARE:
8069 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8070 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8071 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8072 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8073 GET_MODE (op0), op0, op1);
8074 break;
8076 case RTX_TERNARY:
8077 case RTX_BITFIELD_OPS:
8078 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8079 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8080 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8081 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8082 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8083 GET_MODE (op0), op0, op1, op2);
8085 case RTX_OBJ:
8086 if (REG_P (x))
8088 if (rtx_equal_p (get_last_value (reg), x)
8089 || rtx_equal_p (reg, get_last_value (x)))
8090 return reg;
8091 else
8092 break;
8095 /* fall through */
8097 default:
8098 fmt = GET_RTX_FORMAT (code);
8099 copied = false;
8100 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8101 if (fmt[i] == 'e')
8103 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8104 if (op != XEXP (x, i))
8106 if (!copied)
8108 copied = true;
8109 x = copy_rtx (x);
8111 XEXP (x, i) = op;
8114 else if (fmt[i] == 'E')
8116 int j;
8117 for (j = 0; j < XVECLEN (x, i); j++)
8119 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8120 if (op != XVECEXP (x, i, j))
8122 if (!copied)
8124 copied = true;
8125 x = copy_rtx (x);
8127 XVECEXP (x, i, j) = op;
8132 break;
8135 return x;
8138 /* Return X converted to MODE. If the value is already truncated to
8139 MODE we can just return a subreg even though in the general case we
8140 would need an explicit truncation. */
8142 static rtx
8143 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8145 if (!CONST_INT_P (x)
8146 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8147 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8148 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8150 /* Bit-cast X into an integer mode. */
8151 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8152 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8153 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8154 x, GET_MODE (x));
8157 return gen_lowpart (mode, x);
8160 /* See if X can be simplified knowing that we will only refer to it in
8161 MODE and will only refer to those bits that are nonzero in MASK.
8162 If other bits are being computed or if masking operations are done
8163 that select a superset of the bits in MASK, they can sometimes be
8164 ignored.
8166 Return a possibly simplified expression, but always convert X to
8167 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8169 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8170 are all off in X. This is used when X will be complemented, by either
8171 NOT, NEG, or XOR. */
8173 static rtx
8174 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8175 int just_select)
8177 enum rtx_code code = GET_CODE (x);
8178 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8179 machine_mode op_mode;
8180 unsigned HOST_WIDE_INT fuller_mask, nonzero;
8181 rtx op0, op1, temp;
8183 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8184 code below will do the wrong thing since the mode of such an
8185 expression is VOIDmode.
8187 Also do nothing if X is a CLOBBER; this can happen if X was
8188 the return value from a call to gen_lowpart. */
8189 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8190 return x;
8192 /* We want to perform the operation in its present mode unless we know
8193 that the operation is valid in MODE, in which case we do the operation
8194 in MODE. */
8195 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8196 && have_insn_for (code, mode))
8197 ? mode : GET_MODE (x));
8199 /* It is not valid to do a right-shift in a narrower mode
8200 than the one it came in with. */
8201 if ((code == LSHIFTRT || code == ASHIFTRT)
8202 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8203 op_mode = GET_MODE (x);
8205 /* Truncate MASK to fit OP_MODE. */
8206 if (op_mode)
8207 mask &= GET_MODE_MASK (op_mode);
8209 /* When we have an arithmetic operation, or a shift whose count we
8210 do not know, we need to assume that all bits up to the highest-order
8211 bit in MASK will be needed. This is how we form such a mask. */
8212 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8213 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8214 else
8215 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8216 - 1);
8218 /* Determine what bits of X are guaranteed to be (non)zero. */
8219 nonzero = nonzero_bits (x, mode);
8221 /* If none of the bits in X are needed, return a zero. */
8222 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8223 x = const0_rtx;
8225 /* If X is a CONST_INT, return a new one. Do this here since the
8226 test below will fail. */
8227 if (CONST_INT_P (x))
8229 if (SCALAR_INT_MODE_P (mode))
8230 return gen_int_mode (INTVAL (x) & mask, mode);
8231 else
8233 x = GEN_INT (INTVAL (x) & mask);
8234 return gen_lowpart_common (mode, x);
8238 /* If X is narrower than MODE and we want all the bits in X's mode, just
8239 get X in the proper mode. */
8240 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8241 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8242 return gen_lowpart (mode, x);
8244 /* We can ignore the effect of a SUBREG if it narrows the mode or
8245 if the constant masks to zero all the bits the mode doesn't have. */
8246 if (GET_CODE (x) == SUBREG
8247 && subreg_lowpart_p (x)
8248 && ((GET_MODE_SIZE (GET_MODE (x))
8249 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8250 || (0 == (mask
8251 & GET_MODE_MASK (GET_MODE (x))
8252 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8253 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8255 /* The arithmetic simplifications here only work for scalar integer modes. */
8256 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8257 return gen_lowpart_or_truncate (mode, x);
8259 switch (code)
8261 case CLOBBER:
8262 /* If X is a (clobber (const_int)), return it since we know we are
8263 generating something that won't match. */
8264 return x;
8266 case SIGN_EXTEND:
8267 case ZERO_EXTEND:
8268 case ZERO_EXTRACT:
8269 case SIGN_EXTRACT:
8270 x = expand_compound_operation (x);
8271 if (GET_CODE (x) != code)
8272 return force_to_mode (x, mode, mask, next_select);
8273 break;
8275 case TRUNCATE:
8276 /* Similarly for a truncate. */
8277 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8279 case AND:
8280 /* If this is an AND with a constant, convert it into an AND
8281 whose constant is the AND of that constant with MASK. If it
8282 remains an AND of MASK, delete it since it is redundant. */
8284 if (CONST_INT_P (XEXP (x, 1)))
8286 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8287 mask & INTVAL (XEXP (x, 1)));
8289 /* If X is still an AND, see if it is an AND with a mask that
8290 is just some low-order bits. If so, and it is MASK, we don't
8291 need it. */
8293 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8294 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8295 == mask))
8296 x = XEXP (x, 0);
8298 /* If it remains an AND, try making another AND with the bits
8299 in the mode mask that aren't in MASK turned on. If the
8300 constant in the AND is wide enough, this might make a
8301 cheaper constant. */
8303 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8304 && GET_MODE_MASK (GET_MODE (x)) != mask
8305 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8307 unsigned HOST_WIDE_INT cval
8308 = UINTVAL (XEXP (x, 1))
8309 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8310 rtx y;
8312 y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8313 gen_int_mode (cval, GET_MODE (x)));
8314 if (set_src_cost (y, optimize_this_for_speed_p)
8315 < set_src_cost (x, optimize_this_for_speed_p))
8316 x = y;
8319 break;
8322 goto binop;
8324 case PLUS:
8325 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8326 low-order bits (as in an alignment operation) and FOO is already
8327 aligned to that boundary, mask C1 to that boundary as well.
8328 This may eliminate that PLUS and, later, the AND. */
8331 unsigned int width = GET_MODE_PRECISION (mode);
8332 unsigned HOST_WIDE_INT smask = mask;
8334 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8335 number, sign extend it. */
8337 if (width < HOST_BITS_PER_WIDE_INT
8338 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8339 smask |= HOST_WIDE_INT_M1U << width;
8341 if (CONST_INT_P (XEXP (x, 1))
8342 && exact_log2 (- smask) >= 0
8343 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8344 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8345 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8346 (INTVAL (XEXP (x, 1)) & smask)),
8347 mode, smask, next_select);
8350 /* ... fall through ... */
8352 case MULT:
8353 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8354 most significant bit in MASK since carries from those bits will
8355 affect the bits we are interested in. */
8356 mask = fuller_mask;
8357 goto binop;
8359 case MINUS:
8360 /* If X is (minus C Y) where C's least set bit is larger than any bit
8361 in the mask, then we may replace with (neg Y). */
8362 if (CONST_INT_P (XEXP (x, 0))
8363 && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
8365 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8366 GET_MODE (x));
8367 return force_to_mode (x, mode, mask, next_select);
8370 /* Similarly, if C contains every bit in the fuller_mask, then we may
8371 replace with (not Y). */
8372 if (CONST_INT_P (XEXP (x, 0))
8373 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8375 x = simplify_gen_unary (NOT, GET_MODE (x),
8376 XEXP (x, 1), GET_MODE (x));
8377 return force_to_mode (x, mode, mask, next_select);
8380 mask = fuller_mask;
8381 goto binop;
8383 case IOR:
8384 case XOR:
8385 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8386 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8387 operation which may be a bitfield extraction. Ensure that the
8388 constant we form is not wider than the mode of X. */
8390 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8391 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8392 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8393 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8394 && CONST_INT_P (XEXP (x, 1))
8395 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8396 + floor_log2 (INTVAL (XEXP (x, 1))))
8397 < GET_MODE_PRECISION (GET_MODE (x)))
8398 && (UINTVAL (XEXP (x, 1))
8399 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8401 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8402 << INTVAL (XEXP (XEXP (x, 0), 1)),
8403 GET_MODE (x));
8404 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8405 XEXP (XEXP (x, 0), 0), temp);
8406 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8407 XEXP (XEXP (x, 0), 1));
8408 return force_to_mode (x, mode, mask, next_select);
8411 binop:
8412 /* For most binary operations, just propagate into the operation and
8413 change the mode if we have an operation of that mode. */
8415 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8416 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8418 /* If we ended up truncating both operands, truncate the result of the
8419 operation instead. */
8420 if (GET_CODE (op0) == TRUNCATE
8421 && GET_CODE (op1) == TRUNCATE)
8423 op0 = XEXP (op0, 0);
8424 op1 = XEXP (op1, 0);
8427 op0 = gen_lowpart_or_truncate (op_mode, op0);
8428 op1 = gen_lowpart_or_truncate (op_mode, op1);
8430 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8431 x = simplify_gen_binary (code, op_mode, op0, op1);
8432 break;
8434 case ASHIFT:
8435 /* For left shifts, do the same, but just for the first operand.
8436 However, we cannot do anything with shifts where we cannot
8437 guarantee that the counts are smaller than the size of the mode
8438 because such a count will have a different meaning in a
8439 wider mode. */
8441 if (! (CONST_INT_P (XEXP (x, 1))
8442 && INTVAL (XEXP (x, 1)) >= 0
8443 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8444 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8445 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8446 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8447 break;
8449 /* If the shift count is a constant and we can do arithmetic in
8450 the mode of the shift, refine which bits we need. Otherwise, use the
8451 conservative form of the mask. */
8452 if (CONST_INT_P (XEXP (x, 1))
8453 && INTVAL (XEXP (x, 1)) >= 0
8454 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8455 && HWI_COMPUTABLE_MODE_P (op_mode))
8456 mask >>= INTVAL (XEXP (x, 1));
8457 else
8458 mask = fuller_mask;
8460 op0 = gen_lowpart_or_truncate (op_mode,
8461 force_to_mode (XEXP (x, 0), op_mode,
8462 mask, next_select));
8464 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8465 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8466 break;
8468 case LSHIFTRT:
8469 /* Here we can only do something if the shift count is a constant,
8470 this shift constant is valid for the host, and we can do arithmetic
8471 in OP_MODE. */
8473 if (CONST_INT_P (XEXP (x, 1))
8474 && INTVAL (XEXP (x, 1)) >= 0
8475 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8476 && HWI_COMPUTABLE_MODE_P (op_mode))
8478 rtx inner = XEXP (x, 0);
8479 unsigned HOST_WIDE_INT inner_mask;
8481 /* Select the mask of the bits we need for the shift operand. */
8482 inner_mask = mask << INTVAL (XEXP (x, 1));
8484 /* We can only change the mode of the shift if we can do arithmetic
8485 in the mode of the shift and INNER_MASK is no wider than the
8486 width of X's mode. */
8487 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8488 op_mode = GET_MODE (x);
8490 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8492 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8493 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8496 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8497 shift and AND produces only copies of the sign bit (C2 is one less
8498 than a power of two), we can do this with just a shift. */
8500 if (GET_CODE (x) == LSHIFTRT
8501 && CONST_INT_P (XEXP (x, 1))
8502 /* The shift puts one of the sign bit copies in the least significant
8503 bit. */
8504 && ((INTVAL (XEXP (x, 1))
8505 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8506 >= GET_MODE_PRECISION (GET_MODE (x)))
8507 && exact_log2 (mask + 1) >= 0
8508 /* Number of bits left after the shift must be more than the mask
8509 needs. */
8510 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8511 <= GET_MODE_PRECISION (GET_MODE (x)))
8512 /* Must be more sign bit copies than the mask needs. */
8513 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8514 >= exact_log2 (mask + 1)))
8515 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8516 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8517 - exact_log2 (mask + 1)));
8519 goto shiftrt;
8521 case ASHIFTRT:
8522 /* If we are just looking for the sign bit, we don't need this shift at
8523 all, even if it has a variable count. */
8524 if (val_signbit_p (GET_MODE (x), mask))
8525 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8527 /* If this is a shift by a constant, get a mask that contains those bits
8528 that are not copies of the sign bit. We then have two cases: If
8529 MASK only includes those bits, this can be a logical shift, which may
8530 allow simplifications. If MASK is a single-bit field not within
8531 those bits, we are requesting a copy of the sign bit and hence can
8532 shift the sign bit to the appropriate location. */
8534 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8535 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8537 int i;
8539 /* If the considered data is wider than HOST_WIDE_INT, we can't
8540 represent a mask for all its bits in a single scalar.
8541 But we only care about the lower bits, so calculate these. */
8543 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8545 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8547 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8548 is the number of bits a full-width mask would have set.
8549 We need only shift if these are fewer than nonzero can
8550 hold. If not, we must keep all bits set in nonzero. */
8552 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8553 < HOST_BITS_PER_WIDE_INT)
8554 nonzero >>= INTVAL (XEXP (x, 1))
8555 + HOST_BITS_PER_WIDE_INT
8556 - GET_MODE_PRECISION (GET_MODE (x)) ;
8558 else
8560 nonzero = GET_MODE_MASK (GET_MODE (x));
8561 nonzero >>= INTVAL (XEXP (x, 1));
8564 if ((mask & ~nonzero) == 0)
8566 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8567 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8568 if (GET_CODE (x) != ASHIFTRT)
8569 return force_to_mode (x, mode, mask, next_select);
8572 else if ((i = exact_log2 (mask)) >= 0)
8574 x = simplify_shift_const
8575 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8576 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8578 if (GET_CODE (x) != ASHIFTRT)
8579 return force_to_mode (x, mode, mask, next_select);
8583 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8584 even if the shift count isn't a constant. */
8585 if (mask == 1)
8586 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8587 XEXP (x, 0), XEXP (x, 1));
8589 shiftrt:
8591 /* If this is a zero- or sign-extension operation that just affects bits
8592 we don't care about, remove it. Be sure the call above returned
8593 something that is still a shift. */
8595 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8596 && CONST_INT_P (XEXP (x, 1))
8597 && INTVAL (XEXP (x, 1)) >= 0
8598 && (INTVAL (XEXP (x, 1))
8599 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8600 && GET_CODE (XEXP (x, 0)) == ASHIFT
8601 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8602 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8603 next_select);
8605 break;
8607 case ROTATE:
8608 case ROTATERT:
8609 /* If the shift count is constant and we can do computations
8610 in the mode of X, compute where the bits we care about are.
8611 Otherwise, we can't do anything. Don't change the mode of
8612 the shift or propagate MODE into the shift, though. */
8613 if (CONST_INT_P (XEXP (x, 1))
8614 && INTVAL (XEXP (x, 1)) >= 0)
8616 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8617 GET_MODE (x),
8618 gen_int_mode (mask, GET_MODE (x)),
8619 XEXP (x, 1));
8620 if (temp && CONST_INT_P (temp))
8621 x = simplify_gen_binary (code, GET_MODE (x),
8622 force_to_mode (XEXP (x, 0), GET_MODE (x),
8623 INTVAL (temp), next_select),
8624 XEXP (x, 1));
8626 break;
8628 case NEG:
8629 /* If we just want the low-order bit, the NEG isn't needed since it
8630 won't change the low-order bit. */
8631 if (mask == 1)
8632 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8634 /* We need any bits less significant than the most significant bit in
8635 MASK since carries from those bits will affect the bits we are
8636 interested in. */
8637 mask = fuller_mask;
8638 goto unop;
8640 case NOT:
8641 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8642 same as the XOR case above. Ensure that the constant we form is not
8643 wider than the mode of X. */
8645 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8646 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8647 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8648 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8649 < GET_MODE_PRECISION (GET_MODE (x)))
8650 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8652 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8653 GET_MODE (x));
8654 temp = simplify_gen_binary (XOR, GET_MODE (x),
8655 XEXP (XEXP (x, 0), 0), temp);
8656 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8657 temp, XEXP (XEXP (x, 0), 1));
8659 return force_to_mode (x, mode, mask, next_select);
8662 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8663 use the full mask inside the NOT. */
8664 mask = fuller_mask;
8666 unop:
8667 op0 = gen_lowpart_or_truncate (op_mode,
8668 force_to_mode (XEXP (x, 0), mode, mask,
8669 next_select));
8670 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8671 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8672 break;
8674 case NE:
8675 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8676 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8677 which is equal to STORE_FLAG_VALUE. */
8678 if ((mask & ~STORE_FLAG_VALUE) == 0
8679 && XEXP (x, 1) == const0_rtx
8680 && GET_MODE (XEXP (x, 0)) == mode
8681 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8682 && (nonzero_bits (XEXP (x, 0), mode)
8683 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8684 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8686 break;
8688 case IF_THEN_ELSE:
8689 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8690 written in a narrower mode. We play it safe and do not do so. */
8692 op0 = gen_lowpart_or_truncate (GET_MODE (x),
8693 force_to_mode (XEXP (x, 1), mode,
8694 mask, next_select));
8695 op1 = gen_lowpart_or_truncate (GET_MODE (x),
8696 force_to_mode (XEXP (x, 2), mode,
8697 mask, next_select));
8698 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8699 x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8700 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8701 op0, op1);
8702 break;
8704 default:
8705 break;
8708 /* Ensure we return a value of the proper mode. */
8709 return gen_lowpart_or_truncate (mode, x);
8712 /* Return nonzero if X is an expression that has one of two values depending on
8713 whether some other value is zero or nonzero. In that case, we return the
8714 value that is being tested, *PTRUE is set to the value if the rtx being
8715 returned has a nonzero value, and *PFALSE is set to the other alternative.
8717 If we return zero, we set *PTRUE and *PFALSE to X. */
8719 static rtx
8720 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8722 machine_mode mode = GET_MODE (x);
8723 enum rtx_code code = GET_CODE (x);
8724 rtx cond0, cond1, true0, true1, false0, false1;
8725 unsigned HOST_WIDE_INT nz;
8727 /* If we are comparing a value against zero, we are done. */
8728 if ((code == NE || code == EQ)
8729 && XEXP (x, 1) == const0_rtx)
8731 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8732 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8733 return XEXP (x, 0);
8736 /* If this is a unary operation whose operand has one of two values, apply
8737 our opcode to compute those values. */
8738 else if (UNARY_P (x)
8739 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8741 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8742 *pfalse = simplify_gen_unary (code, mode, false0,
8743 GET_MODE (XEXP (x, 0)));
8744 return cond0;
8747 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8748 make can't possibly match and would suppress other optimizations. */
8749 else if (code == COMPARE)
8752 /* If this is a binary operation, see if either side has only one of two
8753 values. If either one does or if both do and they are conditional on
8754 the same value, compute the new true and false values. */
8755 else if (BINARY_P (x))
8757 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8758 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8760 if ((cond0 != 0 || cond1 != 0)
8761 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8763 /* If if_then_else_cond returned zero, then true/false are the
8764 same rtl. We must copy one of them to prevent invalid rtl
8765 sharing. */
8766 if (cond0 == 0)
8767 true0 = copy_rtx (true0);
8768 else if (cond1 == 0)
8769 true1 = copy_rtx (true1);
8771 if (COMPARISON_P (x))
8773 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8774 true0, true1);
8775 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8776 false0, false1);
8778 else
8780 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8781 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8784 return cond0 ? cond0 : cond1;
8787 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8788 operands is zero when the other is nonzero, and vice-versa,
8789 and STORE_FLAG_VALUE is 1 or -1. */
8791 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8792 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8793 || code == UMAX)
8794 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8796 rtx op0 = XEXP (XEXP (x, 0), 1);
8797 rtx op1 = XEXP (XEXP (x, 1), 1);
8799 cond0 = XEXP (XEXP (x, 0), 0);
8800 cond1 = XEXP (XEXP (x, 1), 0);
8802 if (COMPARISON_P (cond0)
8803 && COMPARISON_P (cond1)
8804 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8805 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8806 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8807 || ((swap_condition (GET_CODE (cond0))
8808 == reversed_comparison_code (cond1, NULL))
8809 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8810 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8811 && ! side_effects_p (x))
8813 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8814 *pfalse = simplify_gen_binary (MULT, mode,
8815 (code == MINUS
8816 ? simplify_gen_unary (NEG, mode,
8817 op1, mode)
8818 : op1),
8819 const_true_rtx);
8820 return cond0;
8824 /* Similarly for MULT, AND and UMIN, except that for these the result
8825 is always zero. */
8826 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8827 && (code == MULT || code == AND || code == UMIN)
8828 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8830 cond0 = XEXP (XEXP (x, 0), 0);
8831 cond1 = XEXP (XEXP (x, 1), 0);
8833 if (COMPARISON_P (cond0)
8834 && COMPARISON_P (cond1)
8835 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8836 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8837 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8838 || ((swap_condition (GET_CODE (cond0))
8839 == reversed_comparison_code (cond1, NULL))
8840 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8841 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8842 && ! side_effects_p (x))
8844 *ptrue = *pfalse = const0_rtx;
8845 return cond0;
8850 else if (code == IF_THEN_ELSE)
8852 /* If we have IF_THEN_ELSE already, extract the condition and
8853 canonicalize it if it is NE or EQ. */
8854 cond0 = XEXP (x, 0);
8855 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8856 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8857 return XEXP (cond0, 0);
8858 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8860 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8861 return XEXP (cond0, 0);
8863 else
8864 return cond0;
8867 /* If X is a SUBREG, we can narrow both the true and false values
8868 if the inner expression, if there is a condition. */
8869 else if (code == SUBREG
8870 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8871 &true0, &false0)))
8873 true0 = simplify_gen_subreg (mode, true0,
8874 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8875 false0 = simplify_gen_subreg (mode, false0,
8876 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8877 if (true0 && false0)
8879 *ptrue = true0;
8880 *pfalse = false0;
8881 return cond0;
8885 /* If X is a constant, this isn't special and will cause confusions
8886 if we treat it as such. Likewise if it is equivalent to a constant. */
8887 else if (CONSTANT_P (x)
8888 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8891 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8892 will be least confusing to the rest of the compiler. */
8893 else if (mode == BImode)
8895 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8896 return x;
8899 /* If X is known to be either 0 or -1, those are the true and
8900 false values when testing X. */
8901 else if (x == constm1_rtx || x == const0_rtx
8902 || (mode != VOIDmode
8903 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
8905 *ptrue = constm1_rtx, *pfalse = const0_rtx;
8906 return x;
8909 /* Likewise for 0 or a single bit. */
8910 else if (HWI_COMPUTABLE_MODE_P (mode)
8911 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8913 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8914 return x;
8917 /* Otherwise fail; show no condition with true and false values the same. */
8918 *ptrue = *pfalse = x;
8919 return 0;
8922 /* Return the value of expression X given the fact that condition COND
8923 is known to be true when applied to REG as its first operand and VAL
8924 as its second. X is known to not be shared and so can be modified in
8925 place.
8927 We only handle the simplest cases, and specifically those cases that
8928 arise with IF_THEN_ELSE expressions. */
8930 static rtx
8931 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8933 enum rtx_code code = GET_CODE (x);
8934 rtx temp;
8935 const char *fmt;
8936 int i, j;
8938 if (side_effects_p (x))
8939 return x;
8941 /* If either operand of the condition is a floating point value,
8942 then we have to avoid collapsing an EQ comparison. */
8943 if (cond == EQ
8944 && rtx_equal_p (x, reg)
8945 && ! FLOAT_MODE_P (GET_MODE (x))
8946 && ! FLOAT_MODE_P (GET_MODE (val)))
8947 return val;
8949 if (cond == UNEQ && rtx_equal_p (x, reg))
8950 return val;
8952 /* If X is (abs REG) and we know something about REG's relationship
8953 with zero, we may be able to simplify this. */
8955 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8956 switch (cond)
8958 case GE: case GT: case EQ:
8959 return XEXP (x, 0);
8960 case LT: case LE:
8961 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8962 XEXP (x, 0),
8963 GET_MODE (XEXP (x, 0)));
8964 default:
8965 break;
8968 /* The only other cases we handle are MIN, MAX, and comparisons if the
8969 operands are the same as REG and VAL. */
8971 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8973 if (rtx_equal_p (XEXP (x, 0), val))
8974 cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8976 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8978 if (COMPARISON_P (x))
8980 if (comparison_dominates_p (cond, code))
8981 return const_true_rtx;
8983 code = reversed_comparison_code (x, NULL);
8984 if (code != UNKNOWN
8985 && comparison_dominates_p (cond, code))
8986 return const0_rtx;
8987 else
8988 return x;
8990 else if (code == SMAX || code == SMIN
8991 || code == UMIN || code == UMAX)
8993 int unsignedp = (code == UMIN || code == UMAX);
8995 /* Do not reverse the condition when it is NE or EQ.
8996 This is because we cannot conclude anything about
8997 the value of 'SMAX (x, y)' when x is not equal to y,
8998 but we can when x equals y. */
8999 if ((code == SMAX || code == UMAX)
9000 && ! (cond == EQ || cond == NE))
9001 cond = reverse_condition (cond);
9003 switch (cond)
9005 case GE: case GT:
9006 return unsignedp ? x : XEXP (x, 1);
9007 case LE: case LT:
9008 return unsignedp ? x : XEXP (x, 0);
9009 case GEU: case GTU:
9010 return unsignedp ? XEXP (x, 1) : x;
9011 case LEU: case LTU:
9012 return unsignedp ? XEXP (x, 0) : x;
9013 default:
9014 break;
9019 else if (code == SUBREG)
9021 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9022 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9024 if (SUBREG_REG (x) != r)
9026 /* We must simplify subreg here, before we lose track of the
9027 original inner_mode. */
9028 new_rtx = simplify_subreg (GET_MODE (x), r,
9029 inner_mode, SUBREG_BYTE (x));
9030 if (new_rtx)
9031 return new_rtx;
9032 else
9033 SUBST (SUBREG_REG (x), r);
9036 return x;
9038 /* We don't have to handle SIGN_EXTEND here, because even in the
9039 case of replacing something with a modeless CONST_INT, a
9040 CONST_INT is already (supposed to be) a valid sign extension for
9041 its narrower mode, which implies it's already properly
9042 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9043 story is different. */
9044 else if (code == ZERO_EXTEND)
9046 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9047 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9049 if (XEXP (x, 0) != r)
9051 /* We must simplify the zero_extend here, before we lose
9052 track of the original inner_mode. */
9053 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9054 r, inner_mode);
9055 if (new_rtx)
9056 return new_rtx;
9057 else
9058 SUBST (XEXP (x, 0), r);
9061 return x;
9064 fmt = GET_RTX_FORMAT (code);
9065 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9067 if (fmt[i] == 'e')
9068 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9069 else if (fmt[i] == 'E')
9070 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9071 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9072 cond, reg, val));
9075 return x;
9078 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9079 assignment as a field assignment. */
9081 static int
9082 rtx_equal_for_field_assignment_p (rtx x, rtx y)
9084 if (x == y || rtx_equal_p (x, y))
9085 return 1;
9087 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9088 return 0;
9090 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9091 Note that all SUBREGs of MEM are paradoxical; otherwise they
9092 would have been rewritten. */
9093 if (MEM_P (x) && GET_CODE (y) == SUBREG
9094 && MEM_P (SUBREG_REG (y))
9095 && rtx_equal_p (SUBREG_REG (y),
9096 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9097 return 1;
9099 if (MEM_P (y) && GET_CODE (x) == SUBREG
9100 && MEM_P (SUBREG_REG (x))
9101 && rtx_equal_p (SUBREG_REG (x),
9102 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9103 return 1;
9105 /* We used to see if get_last_value of X and Y were the same but that's
9106 not correct. In one direction, we'll cause the assignment to have
9107 the wrong destination and in the case, we'll import a register into this
9108 insn that might have already have been dead. So fail if none of the
9109 above cases are true. */
9110 return 0;
9113 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9114 Return that assignment if so.
9116 We only handle the most common cases. */
9118 static rtx
9119 make_field_assignment (rtx x)
9121 rtx dest = SET_DEST (x);
9122 rtx src = SET_SRC (x);
9123 rtx assign;
9124 rtx rhs, lhs;
9125 HOST_WIDE_INT c1;
9126 HOST_WIDE_INT pos;
9127 unsigned HOST_WIDE_INT len;
9128 rtx other;
9129 machine_mode mode;
9131 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9132 a clear of a one-bit field. We will have changed it to
9133 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9134 for a SUBREG. */
9136 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9137 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9138 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9139 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9141 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9142 1, 1, 1, 0);
9143 if (assign != 0)
9144 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9145 return x;
9148 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9149 && subreg_lowpart_p (XEXP (src, 0))
9150 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9151 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9152 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9153 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9154 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9155 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9157 assign = make_extraction (VOIDmode, dest, 0,
9158 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9159 1, 1, 1, 0);
9160 if (assign != 0)
9161 return gen_rtx_SET (VOIDmode, assign, const0_rtx);
9162 return x;
9165 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9166 one-bit field. */
9167 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9168 && XEXP (XEXP (src, 0), 0) == const1_rtx
9169 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9171 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9172 1, 1, 1, 0);
9173 if (assign != 0)
9174 return gen_rtx_SET (VOIDmode, assign, const1_rtx);
9175 return x;
9178 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9179 SRC is an AND with all bits of that field set, then we can discard
9180 the AND. */
9181 if (GET_CODE (dest) == ZERO_EXTRACT
9182 && CONST_INT_P (XEXP (dest, 1))
9183 && GET_CODE (src) == AND
9184 && CONST_INT_P (XEXP (src, 1)))
9186 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9187 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9188 unsigned HOST_WIDE_INT ze_mask;
9190 if (width >= HOST_BITS_PER_WIDE_INT)
9191 ze_mask = -1;
9192 else
9193 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9195 /* Complete overlap. We can remove the source AND. */
9196 if ((and_mask & ze_mask) == ze_mask)
9197 return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
9199 /* Partial overlap. We can reduce the source AND. */
9200 if ((and_mask & ze_mask) != and_mask)
9202 mode = GET_MODE (src);
9203 src = gen_rtx_AND (mode, XEXP (src, 0),
9204 gen_int_mode (and_mask & ze_mask, mode));
9205 return gen_rtx_SET (VOIDmode, dest, src);
9209 /* The other case we handle is assignments into a constant-position
9210 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9211 a mask that has all one bits except for a group of zero bits and
9212 OTHER is known to have zeros where C1 has ones, this is such an
9213 assignment. Compute the position and length from C1. Shift OTHER
9214 to the appropriate position, force it to the required mode, and
9215 make the extraction. Check for the AND in both operands. */
9217 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9218 return x;
9220 rhs = expand_compound_operation (XEXP (src, 0));
9221 lhs = expand_compound_operation (XEXP (src, 1));
9223 if (GET_CODE (rhs) == AND
9224 && CONST_INT_P (XEXP (rhs, 1))
9225 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9226 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9227 else if (GET_CODE (lhs) == AND
9228 && CONST_INT_P (XEXP (lhs, 1))
9229 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9230 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9231 else
9232 return x;
9234 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9235 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9236 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9237 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9238 return x;
9240 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9241 if (assign == 0)
9242 return x;
9244 /* The mode to use for the source is the mode of the assignment, or of
9245 what is inside a possible STRICT_LOW_PART. */
9246 mode = (GET_CODE (assign) == STRICT_LOW_PART
9247 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9249 /* Shift OTHER right POS places and make it the source, restricting it
9250 to the proper length and mode. */
9252 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9253 GET_MODE (src),
9254 other, pos),
9255 dest);
9256 src = force_to_mode (src, mode,
9257 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9258 ? ~(unsigned HOST_WIDE_INT) 0
9259 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9262 /* If SRC is masked by an AND that does not make a difference in
9263 the value being stored, strip it. */
9264 if (GET_CODE (assign) == ZERO_EXTRACT
9265 && CONST_INT_P (XEXP (assign, 1))
9266 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9267 && GET_CODE (src) == AND
9268 && CONST_INT_P (XEXP (src, 1))
9269 && UINTVAL (XEXP (src, 1))
9270 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9271 src = XEXP (src, 0);
9273 return gen_rtx_SET (VOIDmode, assign, src);
9276 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9277 if so. */
9279 static rtx
9280 apply_distributive_law (rtx x)
9282 enum rtx_code code = GET_CODE (x);
9283 enum rtx_code inner_code;
9284 rtx lhs, rhs, other;
9285 rtx tem;
9287 /* Distributivity is not true for floating point as it can change the
9288 value. So we don't do it unless -funsafe-math-optimizations. */
9289 if (FLOAT_MODE_P (GET_MODE (x))
9290 && ! flag_unsafe_math_optimizations)
9291 return x;
9293 /* The outer operation can only be one of the following: */
9294 if (code != IOR && code != AND && code != XOR
9295 && code != PLUS && code != MINUS)
9296 return x;
9298 lhs = XEXP (x, 0);
9299 rhs = XEXP (x, 1);
9301 /* If either operand is a primitive we can't do anything, so get out
9302 fast. */
9303 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9304 return x;
9306 lhs = expand_compound_operation (lhs);
9307 rhs = expand_compound_operation (rhs);
9308 inner_code = GET_CODE (lhs);
9309 if (inner_code != GET_CODE (rhs))
9310 return x;
9312 /* See if the inner and outer operations distribute. */
9313 switch (inner_code)
9315 case LSHIFTRT:
9316 case ASHIFTRT:
9317 case AND:
9318 case IOR:
9319 /* These all distribute except over PLUS. */
9320 if (code == PLUS || code == MINUS)
9321 return x;
9322 break;
9324 case MULT:
9325 if (code != PLUS && code != MINUS)
9326 return x;
9327 break;
9329 case ASHIFT:
9330 /* This is also a multiply, so it distributes over everything. */
9331 break;
9333 /* This used to handle SUBREG, but this turned out to be counter-
9334 productive, since (subreg (op ...)) usually is not handled by
9335 insn patterns, and this "optimization" therefore transformed
9336 recognizable patterns into unrecognizable ones. Therefore the
9337 SUBREG case was removed from here.
9339 It is possible that distributing SUBREG over arithmetic operations
9340 leads to an intermediate result than can then be optimized further,
9341 e.g. by moving the outer SUBREG to the other side of a SET as done
9342 in simplify_set. This seems to have been the original intent of
9343 handling SUBREGs here.
9345 However, with current GCC this does not appear to actually happen,
9346 at least on major platforms. If some case is found where removing
9347 the SUBREG case here prevents follow-on optimizations, distributing
9348 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9350 default:
9351 return x;
9354 /* Set LHS and RHS to the inner operands (A and B in the example
9355 above) and set OTHER to the common operand (C in the example).
9356 There is only one way to do this unless the inner operation is
9357 commutative. */
9358 if (COMMUTATIVE_ARITH_P (lhs)
9359 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9360 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9361 else if (COMMUTATIVE_ARITH_P (lhs)
9362 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9363 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9364 else if (COMMUTATIVE_ARITH_P (lhs)
9365 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9366 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9367 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9368 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9369 else
9370 return x;
9372 /* Form the new inner operation, seeing if it simplifies first. */
9373 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9375 /* There is one exception to the general way of distributing:
9376 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9377 if (code == XOR && inner_code == IOR)
9379 inner_code = AND;
9380 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9383 /* We may be able to continuing distributing the result, so call
9384 ourselves recursively on the inner operation before forming the
9385 outer operation, which we return. */
9386 return simplify_gen_binary (inner_code, GET_MODE (x),
9387 apply_distributive_law (tem), other);
9390 /* See if X is of the form (* (+ A B) C), and if so convert to
9391 (+ (* A C) (* B C)) and try to simplify.
9393 Most of the time, this results in no change. However, if some of
9394 the operands are the same or inverses of each other, simplifications
9395 will result.
9397 For example, (and (ior A B) (not B)) can occur as the result of
9398 expanding a bit field assignment. When we apply the distributive
9399 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9400 which then simplifies to (and (A (not B))).
9402 Note that no checks happen on the validity of applying the inverse
9403 distributive law. This is pointless since we can do it in the
9404 few places where this routine is called.
9406 N is the index of the term that is decomposed (the arithmetic operation,
9407 i.e. (+ A B) in the first example above). !N is the index of the term that
9408 is distributed, i.e. of C in the first example above. */
9409 static rtx
9410 distribute_and_simplify_rtx (rtx x, int n)
9412 machine_mode mode;
9413 enum rtx_code outer_code, inner_code;
9414 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9416 /* Distributivity is not true for floating point as it can change the
9417 value. So we don't do it unless -funsafe-math-optimizations. */
9418 if (FLOAT_MODE_P (GET_MODE (x))
9419 && ! flag_unsafe_math_optimizations)
9420 return NULL_RTX;
9422 decomposed = XEXP (x, n);
9423 if (!ARITHMETIC_P (decomposed))
9424 return NULL_RTX;
9426 mode = GET_MODE (x);
9427 outer_code = GET_CODE (x);
9428 distributed = XEXP (x, !n);
9430 inner_code = GET_CODE (decomposed);
9431 inner_op0 = XEXP (decomposed, 0);
9432 inner_op1 = XEXP (decomposed, 1);
9434 /* Special case (and (xor B C) (not A)), which is equivalent to
9435 (xor (ior A B) (ior A C)) */
9436 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9438 distributed = XEXP (distributed, 0);
9439 outer_code = IOR;
9442 if (n == 0)
9444 /* Distribute the second term. */
9445 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9446 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9448 else
9450 /* Distribute the first term. */
9451 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9452 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9455 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9456 new_op0, new_op1));
9457 if (GET_CODE (tmp) != outer_code
9458 && (set_src_cost (tmp, optimize_this_for_speed_p)
9459 < set_src_cost (x, optimize_this_for_speed_p)))
9460 return tmp;
9462 return NULL_RTX;
9465 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9466 in MODE. Return an equivalent form, if different from (and VAROP
9467 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9469 static rtx
9470 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9471 unsigned HOST_WIDE_INT constop)
9473 unsigned HOST_WIDE_INT nonzero;
9474 unsigned HOST_WIDE_INT orig_constop;
9475 rtx orig_varop;
9476 int i;
9478 orig_varop = varop;
9479 orig_constop = constop;
9480 if (GET_CODE (varop) == CLOBBER)
9481 return NULL_RTX;
9483 /* Simplify VAROP knowing that we will be only looking at some of the
9484 bits in it.
9486 Note by passing in CONSTOP, we guarantee that the bits not set in
9487 CONSTOP are not significant and will never be examined. We must
9488 ensure that is the case by explicitly masking out those bits
9489 before returning. */
9490 varop = force_to_mode (varop, mode, constop, 0);
9492 /* If VAROP is a CLOBBER, we will fail so return it. */
9493 if (GET_CODE (varop) == CLOBBER)
9494 return varop;
9496 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9497 to VAROP and return the new constant. */
9498 if (CONST_INT_P (varop))
9499 return gen_int_mode (INTVAL (varop) & constop, mode);
9501 /* See what bits may be nonzero in VAROP. Unlike the general case of
9502 a call to nonzero_bits, here we don't care about bits outside
9503 MODE. */
9505 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9507 /* Turn off all bits in the constant that are known to already be zero.
9508 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9509 which is tested below. */
9511 constop &= nonzero;
9513 /* If we don't have any bits left, return zero. */
9514 if (constop == 0)
9515 return const0_rtx;
9517 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9518 a power of two, we can replace this with an ASHIFT. */
9519 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9520 && (i = exact_log2 (constop)) >= 0)
9521 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9523 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9524 or XOR, then try to apply the distributive law. This may eliminate
9525 operations if either branch can be simplified because of the AND.
9526 It may also make some cases more complex, but those cases probably
9527 won't match a pattern either with or without this. */
9529 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9530 return
9531 gen_lowpart
9532 (mode,
9533 apply_distributive_law
9534 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9535 simplify_and_const_int (NULL_RTX,
9536 GET_MODE (varop),
9537 XEXP (varop, 0),
9538 constop),
9539 simplify_and_const_int (NULL_RTX,
9540 GET_MODE (varop),
9541 XEXP (varop, 1),
9542 constop))));
9544 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9545 the AND and see if one of the operands simplifies to zero. If so, we
9546 may eliminate it. */
9548 if (GET_CODE (varop) == PLUS
9549 && exact_log2 (constop + 1) >= 0)
9551 rtx o0, o1;
9553 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9554 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9555 if (o0 == const0_rtx)
9556 return o1;
9557 if (o1 == const0_rtx)
9558 return o0;
9561 /* Make a SUBREG if necessary. If we can't make it, fail. */
9562 varop = gen_lowpart (mode, varop);
9563 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9564 return NULL_RTX;
9566 /* If we are only masking insignificant bits, return VAROP. */
9567 if (constop == nonzero)
9568 return varop;
9570 if (varop == orig_varop && constop == orig_constop)
9571 return NULL_RTX;
9573 /* Otherwise, return an AND. */
9574 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9578 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9579 in MODE.
9581 Return an equivalent form, if different from X. Otherwise, return X. If
9582 X is zero, we are to always construct the equivalent form. */
9584 static rtx
9585 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
9586 unsigned HOST_WIDE_INT constop)
9588 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9589 if (tem)
9590 return tem;
9592 if (!x)
9593 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9594 gen_int_mode (constop, mode));
9595 if (GET_MODE (x) != mode)
9596 x = gen_lowpart (mode, x);
9597 return x;
9600 /* Given a REG, X, compute which bits in X can be nonzero.
9601 We don't care about bits outside of those defined in MODE.
9603 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9604 a shift, AND, or zero_extract, we can do better. */
9606 static rtx
9607 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
9608 const_rtx known_x ATTRIBUTE_UNUSED,
9609 machine_mode known_mode ATTRIBUTE_UNUSED,
9610 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9611 unsigned HOST_WIDE_INT *nonzero)
9613 rtx tem;
9614 reg_stat_type *rsp;
9616 /* If X is a register whose nonzero bits value is current, use it.
9617 Otherwise, if X is a register whose value we can find, use that
9618 value. Otherwise, use the previously-computed global nonzero bits
9619 for this register. */
9621 rsp = &reg_stat[REGNO (x)];
9622 if (rsp->last_set_value != 0
9623 && (rsp->last_set_mode == mode
9624 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9625 && GET_MODE_CLASS (mode) == MODE_INT))
9626 && ((rsp->last_set_label >= label_tick_ebb_start
9627 && rsp->last_set_label < label_tick)
9628 || (rsp->last_set_label == label_tick
9629 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9630 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9631 && REG_N_SETS (REGNO (x)) == 1
9632 && !REGNO_REG_SET_P
9633 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9634 REGNO (x)))))
9636 unsigned HOST_WIDE_INT mask = rsp->last_set_nonzero_bits;
9638 if (GET_MODE_PRECISION (rsp->last_set_mode) < GET_MODE_PRECISION (mode))
9639 /* We don't know anything about the upper bits. */
9640 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (rsp->last_set_mode);
9642 *nonzero &= mask;
9643 return NULL;
9646 tem = get_last_value (x);
9648 if (tem)
9650 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9651 /* If X is narrower than MODE and TEM is a non-negative
9652 constant that would appear negative in the mode of X,
9653 sign-extend it for use in reg_nonzero_bits because some
9654 machines (maybe most) will actually do the sign-extension
9655 and this is the conservative approach.
9657 ??? For 2.5, try to tighten up the MD files in this regard
9658 instead of this kludge. */
9660 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
9661 && CONST_INT_P (tem)
9662 && INTVAL (tem) > 0
9663 && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
9664 tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
9665 #endif
9666 return tem;
9668 else if (nonzero_sign_valid && rsp->nonzero_bits)
9670 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9672 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9673 /* We don't know anything about the upper bits. */
9674 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9676 *nonzero &= mask;
9679 return NULL;
9682 /* Return the number of bits at the high-order end of X that are known to
9683 be equal to the sign bit. X will be used in mode MODE; if MODE is
9684 VOIDmode, X will be used in its own mode. The returned value will always
9685 be between 1 and the number of bits in MODE. */
9687 static rtx
9688 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
9689 const_rtx known_x ATTRIBUTE_UNUSED,
9690 machine_mode known_mode
9691 ATTRIBUTE_UNUSED,
9692 unsigned int known_ret ATTRIBUTE_UNUSED,
9693 unsigned int *result)
9695 rtx tem;
9696 reg_stat_type *rsp;
9698 rsp = &reg_stat[REGNO (x)];
9699 if (rsp->last_set_value != 0
9700 && rsp->last_set_mode == mode
9701 && ((rsp->last_set_label >= label_tick_ebb_start
9702 && rsp->last_set_label < label_tick)
9703 || (rsp->last_set_label == label_tick
9704 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9705 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9706 && REG_N_SETS (REGNO (x)) == 1
9707 && !REGNO_REG_SET_P
9708 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9709 REGNO (x)))))
9711 *result = rsp->last_set_sign_bit_copies;
9712 return NULL;
9715 tem = get_last_value (x);
9716 if (tem != 0)
9717 return tem;
9719 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9720 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9721 *result = rsp->sign_bit_copies;
9723 return NULL;
9726 /* Return the number of "extended" bits there are in X, when interpreted
9727 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9728 unsigned quantities, this is the number of high-order zero bits.
9729 For signed quantities, this is the number of copies of the sign bit
9730 minus 1. In both case, this function returns the number of "spare"
9731 bits. For example, if two quantities for which this function returns
9732 at least 1 are added, the addition is known not to overflow.
9734 This function will always return 0 unless called during combine, which
9735 implies that it must be called from a define_split. */
9737 unsigned int
9738 extended_count (const_rtx x, machine_mode mode, int unsignedp)
9740 if (nonzero_sign_valid == 0)
9741 return 0;
9743 return (unsignedp
9744 ? (HWI_COMPUTABLE_MODE_P (mode)
9745 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9746 - floor_log2 (nonzero_bits (x, mode)))
9747 : 0)
9748 : num_sign_bit_copies (x, mode) - 1);
9751 /* This function is called from `simplify_shift_const' to merge two
9752 outer operations. Specifically, we have already found that we need
9753 to perform operation *POP0 with constant *PCONST0 at the outermost
9754 position. We would now like to also perform OP1 with constant CONST1
9755 (with *POP0 being done last).
9757 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9758 the resulting operation. *PCOMP_P is set to 1 if we would need to
9759 complement the innermost operand, otherwise it is unchanged.
9761 MODE is the mode in which the operation will be done. No bits outside
9762 the width of this mode matter. It is assumed that the width of this mode
9763 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9765 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9766 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9767 result is simply *PCONST0.
9769 If the resulting operation cannot be expressed as one operation, we
9770 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9772 static int
9773 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
9775 enum rtx_code op0 = *pop0;
9776 HOST_WIDE_INT const0 = *pconst0;
9778 const0 &= GET_MODE_MASK (mode);
9779 const1 &= GET_MODE_MASK (mode);
9781 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9782 if (op0 == AND)
9783 const1 &= const0;
9785 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9786 if OP0 is SET. */
9788 if (op1 == UNKNOWN || op0 == SET)
9789 return 1;
9791 else if (op0 == UNKNOWN)
9792 op0 = op1, const0 = const1;
9794 else if (op0 == op1)
9796 switch (op0)
9798 case AND:
9799 const0 &= const1;
9800 break;
9801 case IOR:
9802 const0 |= const1;
9803 break;
9804 case XOR:
9805 const0 ^= const1;
9806 break;
9807 case PLUS:
9808 const0 += const1;
9809 break;
9810 case NEG:
9811 op0 = UNKNOWN;
9812 break;
9813 default:
9814 break;
9818 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9819 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9820 return 0;
9822 /* If the two constants aren't the same, we can't do anything. The
9823 remaining six cases can all be done. */
9824 else if (const0 != const1)
9825 return 0;
9827 else
9828 switch (op0)
9830 case IOR:
9831 if (op1 == AND)
9832 /* (a & b) | b == b */
9833 op0 = SET;
9834 else /* op1 == XOR */
9835 /* (a ^ b) | b == a | b */
9837 break;
9839 case XOR:
9840 if (op1 == AND)
9841 /* (a & b) ^ b == (~a) & b */
9842 op0 = AND, *pcomp_p = 1;
9843 else /* op1 == IOR */
9844 /* (a | b) ^ b == a & ~b */
9845 op0 = AND, const0 = ~const0;
9846 break;
9848 case AND:
9849 if (op1 == IOR)
9850 /* (a | b) & b == b */
9851 op0 = SET;
9852 else /* op1 == XOR */
9853 /* (a ^ b) & b) == (~a) & b */
9854 *pcomp_p = 1;
9855 break;
9856 default:
9857 break;
9860 /* Check for NO-OP cases. */
9861 const0 &= GET_MODE_MASK (mode);
9862 if (const0 == 0
9863 && (op0 == IOR || op0 == XOR || op0 == PLUS))
9864 op0 = UNKNOWN;
9865 else if (const0 == 0 && op0 == AND)
9866 op0 = SET;
9867 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9868 && op0 == AND)
9869 op0 = UNKNOWN;
9871 *pop0 = op0;
9873 /* ??? Slightly redundant with the above mask, but not entirely.
9874 Moving this above means we'd have to sign-extend the mode mask
9875 for the final test. */
9876 if (op0 != UNKNOWN && op0 != NEG)
9877 *pconst0 = trunc_int_for_mode (const0, mode);
9879 return 1;
9882 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9883 the shift in. The original shift operation CODE is performed on OP in
9884 ORIG_MODE. Return the wider mode MODE if we can perform the operation
9885 in that mode. Return ORIG_MODE otherwise. We can also assume that the
9886 result of the shift is subject to operation OUTER_CODE with operand
9887 OUTER_CONST. */
9889 static machine_mode
9890 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9891 machine_mode orig_mode, machine_mode mode,
9892 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9894 if (orig_mode == mode)
9895 return mode;
9896 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
9898 /* In general we can't perform in wider mode for right shift and rotate. */
9899 switch (code)
9901 case ASHIFTRT:
9902 /* We can still widen if the bits brought in from the left are identical
9903 to the sign bit of ORIG_MODE. */
9904 if (num_sign_bit_copies (op, mode)
9905 > (unsigned) (GET_MODE_PRECISION (mode)
9906 - GET_MODE_PRECISION (orig_mode)))
9907 return mode;
9908 return orig_mode;
9910 case LSHIFTRT:
9911 /* Similarly here but with zero bits. */
9912 if (HWI_COMPUTABLE_MODE_P (mode)
9913 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9914 return mode;
9916 /* We can also widen if the bits brought in will be masked off. This
9917 operation is performed in ORIG_MODE. */
9918 if (outer_code == AND)
9920 int care_bits = low_bitmask_len (orig_mode, outer_const);
9922 if (care_bits >= 0
9923 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
9924 return mode;
9926 /* fall through */
9928 case ROTATE:
9929 return orig_mode;
9931 case ROTATERT:
9932 gcc_unreachable ();
9934 default:
9935 return mode;
9939 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
9940 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
9941 if we cannot simplify it. Otherwise, return a simplified value.
9943 The shift is normally computed in the widest mode we find in VAROP, as
9944 long as it isn't a different number of words than RESULT_MODE. Exceptions
9945 are ASHIFTRT and ROTATE, which are always done in their original mode. */
9947 static rtx
9948 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
9949 rtx varop, int orig_count)
9951 enum rtx_code orig_code = code;
9952 rtx orig_varop = varop;
9953 int count;
9954 machine_mode mode = result_mode;
9955 machine_mode shift_mode, tmode;
9956 unsigned int mode_words
9957 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9958 /* We form (outer_op (code varop count) (outer_const)). */
9959 enum rtx_code outer_op = UNKNOWN;
9960 HOST_WIDE_INT outer_const = 0;
9961 int complement_p = 0;
9962 rtx new_rtx, x;
9964 /* Make sure and truncate the "natural" shift on the way in. We don't
9965 want to do this inside the loop as it makes it more difficult to
9966 combine shifts. */
9967 if (SHIFT_COUNT_TRUNCATED)
9968 orig_count &= GET_MODE_BITSIZE (mode) - 1;
9970 /* If we were given an invalid count, don't do anything except exactly
9971 what was requested. */
9973 if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
9974 return NULL_RTX;
9976 count = orig_count;
9978 /* Unless one of the branches of the `if' in this loop does a `continue',
9979 we will `break' the loop after the `if'. */
9981 while (count != 0)
9983 /* If we have an operand of (clobber (const_int 0)), fail. */
9984 if (GET_CODE (varop) == CLOBBER)
9985 return NULL_RTX;
9987 /* Convert ROTATERT to ROTATE. */
9988 if (code == ROTATERT)
9990 unsigned int bitsize = GET_MODE_PRECISION (result_mode);
9991 code = ROTATE;
9992 if (VECTOR_MODE_P (result_mode))
9993 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9994 else
9995 count = bitsize - count;
9998 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9999 mode, outer_op, outer_const);
10001 /* Handle cases where the count is greater than the size of the mode
10002 minus 1. For ASHIFT, use the size minus one as the count (this can
10003 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10004 take the count modulo the size. For other shifts, the result is
10005 zero.
10007 Since these shifts are being produced by the compiler by combining
10008 multiple operations, each of which are defined, we know what the
10009 result is supposed to be. */
10011 if (count > (GET_MODE_PRECISION (shift_mode) - 1))
10013 if (code == ASHIFTRT)
10014 count = GET_MODE_PRECISION (shift_mode) - 1;
10015 else if (code == ROTATE || code == ROTATERT)
10016 count %= GET_MODE_PRECISION (shift_mode);
10017 else
10019 /* We can't simply return zero because there may be an
10020 outer op. */
10021 varop = const0_rtx;
10022 count = 0;
10023 break;
10027 /* If we discovered we had to complement VAROP, leave. Making a NOT
10028 here would cause an infinite loop. */
10029 if (complement_p)
10030 break;
10032 /* An arithmetic right shift of a quantity known to be -1 or 0
10033 is a no-op. */
10034 if (code == ASHIFTRT
10035 && (num_sign_bit_copies (varop, shift_mode)
10036 == GET_MODE_PRECISION (shift_mode)))
10038 count = 0;
10039 break;
10042 /* If we are doing an arithmetic right shift and discarding all but
10043 the sign bit copies, this is equivalent to doing a shift by the
10044 bitsize minus one. Convert it into that shift because it will often
10045 allow other simplifications. */
10047 if (code == ASHIFTRT
10048 && (count + num_sign_bit_copies (varop, shift_mode)
10049 >= GET_MODE_PRECISION (shift_mode)))
10050 count = GET_MODE_PRECISION (shift_mode) - 1;
10052 /* We simplify the tests below and elsewhere by converting
10053 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10054 `make_compound_operation' will convert it to an ASHIFTRT for
10055 those machines (such as VAX) that don't have an LSHIFTRT. */
10056 if (code == ASHIFTRT
10057 && val_signbit_known_clear_p (shift_mode,
10058 nonzero_bits (varop, shift_mode)))
10059 code = LSHIFTRT;
10061 if (((code == LSHIFTRT
10062 && HWI_COMPUTABLE_MODE_P (shift_mode)
10063 && !(nonzero_bits (varop, shift_mode) >> count))
10064 || (code == ASHIFT
10065 && HWI_COMPUTABLE_MODE_P (shift_mode)
10066 && !((nonzero_bits (varop, shift_mode) << count)
10067 & GET_MODE_MASK (shift_mode))))
10068 && !side_effects_p (varop))
10069 varop = const0_rtx;
10071 switch (GET_CODE (varop))
10073 case SIGN_EXTEND:
10074 case ZERO_EXTEND:
10075 case SIGN_EXTRACT:
10076 case ZERO_EXTRACT:
10077 new_rtx = expand_compound_operation (varop);
10078 if (new_rtx != varop)
10080 varop = new_rtx;
10081 continue;
10083 break;
10085 case MEM:
10086 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10087 minus the width of a smaller mode, we can do this with a
10088 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10089 if ((code == ASHIFTRT || code == LSHIFTRT)
10090 && ! mode_dependent_address_p (XEXP (varop, 0),
10091 MEM_ADDR_SPACE (varop))
10092 && ! MEM_VOLATILE_P (varop)
10093 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10094 MODE_INT, 1)) != BLKmode)
10096 new_rtx = adjust_address_nv (varop, tmode,
10097 BYTES_BIG_ENDIAN ? 0
10098 : count / BITS_PER_UNIT);
10100 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10101 : ZERO_EXTEND, mode, new_rtx);
10102 count = 0;
10103 continue;
10105 break;
10107 case SUBREG:
10108 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10109 the same number of words as what we've seen so far. Then store
10110 the widest mode in MODE. */
10111 if (subreg_lowpart_p (varop)
10112 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10113 > GET_MODE_SIZE (GET_MODE (varop)))
10114 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10115 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10116 == mode_words
10117 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10118 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10120 varop = SUBREG_REG (varop);
10121 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10122 mode = GET_MODE (varop);
10123 continue;
10125 break;
10127 case MULT:
10128 /* Some machines use MULT instead of ASHIFT because MULT
10129 is cheaper. But it is still better on those machines to
10130 merge two shifts into one. */
10131 if (CONST_INT_P (XEXP (varop, 1))
10132 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10134 varop
10135 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10136 XEXP (varop, 0),
10137 GEN_INT (exact_log2 (
10138 UINTVAL (XEXP (varop, 1)))));
10139 continue;
10141 break;
10143 case UDIV:
10144 /* Similar, for when divides are cheaper. */
10145 if (CONST_INT_P (XEXP (varop, 1))
10146 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10148 varop
10149 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10150 XEXP (varop, 0),
10151 GEN_INT (exact_log2 (
10152 UINTVAL (XEXP (varop, 1)))));
10153 continue;
10155 break;
10157 case ASHIFTRT:
10158 /* If we are extracting just the sign bit of an arithmetic
10159 right shift, that shift is not needed. However, the sign
10160 bit of a wider mode may be different from what would be
10161 interpreted as the sign bit in a narrower mode, so, if
10162 the result is narrower, don't discard the shift. */
10163 if (code == LSHIFTRT
10164 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10165 && (GET_MODE_BITSIZE (result_mode)
10166 >= GET_MODE_BITSIZE (GET_MODE (varop))))
10168 varop = XEXP (varop, 0);
10169 continue;
10172 /* ... fall through ... */
10174 case LSHIFTRT:
10175 case ASHIFT:
10176 case ROTATE:
10177 /* Here we have two nested shifts. The result is usually the
10178 AND of a new shift with a mask. We compute the result below. */
10179 if (CONST_INT_P (XEXP (varop, 1))
10180 && INTVAL (XEXP (varop, 1)) >= 0
10181 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10182 && HWI_COMPUTABLE_MODE_P (result_mode)
10183 && HWI_COMPUTABLE_MODE_P (mode)
10184 && !VECTOR_MODE_P (result_mode))
10186 enum rtx_code first_code = GET_CODE (varop);
10187 unsigned int first_count = INTVAL (XEXP (varop, 1));
10188 unsigned HOST_WIDE_INT mask;
10189 rtx mask_rtx;
10191 /* We have one common special case. We can't do any merging if
10192 the inner code is an ASHIFTRT of a smaller mode. However, if
10193 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10194 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10195 we can convert it to
10196 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10197 This simplifies certain SIGN_EXTEND operations. */
10198 if (code == ASHIFT && first_code == ASHIFTRT
10199 && count == (GET_MODE_PRECISION (result_mode)
10200 - GET_MODE_PRECISION (GET_MODE (varop))))
10202 /* C3 has the low-order C1 bits zero. */
10204 mask = GET_MODE_MASK (mode)
10205 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10207 varop = simplify_and_const_int (NULL_RTX, result_mode,
10208 XEXP (varop, 0), mask);
10209 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10210 varop, count);
10211 count = first_count;
10212 code = ASHIFTRT;
10213 continue;
10216 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10217 than C1 high-order bits equal to the sign bit, we can convert
10218 this to either an ASHIFT or an ASHIFTRT depending on the
10219 two counts.
10221 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10223 if (code == ASHIFTRT && first_code == ASHIFT
10224 && GET_MODE (varop) == shift_mode
10225 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10226 > first_count))
10228 varop = XEXP (varop, 0);
10229 count -= first_count;
10230 if (count < 0)
10232 count = -count;
10233 code = ASHIFT;
10236 continue;
10239 /* There are some cases we can't do. If CODE is ASHIFTRT,
10240 we can only do this if FIRST_CODE is also ASHIFTRT.
10242 We can't do the case when CODE is ROTATE and FIRST_CODE is
10243 ASHIFTRT.
10245 If the mode of this shift is not the mode of the outer shift,
10246 we can't do this if either shift is a right shift or ROTATE.
10248 Finally, we can't do any of these if the mode is too wide
10249 unless the codes are the same.
10251 Handle the case where the shift codes are the same
10252 first. */
10254 if (code == first_code)
10256 if (GET_MODE (varop) != result_mode
10257 && (code == ASHIFTRT || code == LSHIFTRT
10258 || code == ROTATE))
10259 break;
10261 count += first_count;
10262 varop = XEXP (varop, 0);
10263 continue;
10266 if (code == ASHIFTRT
10267 || (code == ROTATE && first_code == ASHIFTRT)
10268 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10269 || (GET_MODE (varop) != result_mode
10270 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10271 || first_code == ROTATE
10272 || code == ROTATE)))
10273 break;
10275 /* To compute the mask to apply after the shift, shift the
10276 nonzero bits of the inner shift the same way the
10277 outer shift will. */
10279 mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10280 result_mode);
10282 mask_rtx
10283 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10284 GEN_INT (count));
10286 /* Give up if we can't compute an outer operation to use. */
10287 if (mask_rtx == 0
10288 || !CONST_INT_P (mask_rtx)
10289 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10290 INTVAL (mask_rtx),
10291 result_mode, &complement_p))
10292 break;
10294 /* If the shifts are in the same direction, we add the
10295 counts. Otherwise, we subtract them. */
10296 if ((code == ASHIFTRT || code == LSHIFTRT)
10297 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10298 count += first_count;
10299 else
10300 count -= first_count;
10302 /* If COUNT is positive, the new shift is usually CODE,
10303 except for the two exceptions below, in which case it is
10304 FIRST_CODE. If the count is negative, FIRST_CODE should
10305 always be used */
10306 if (count > 0
10307 && ((first_code == ROTATE && code == ASHIFT)
10308 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10309 code = first_code;
10310 else if (count < 0)
10311 code = first_code, count = -count;
10313 varop = XEXP (varop, 0);
10314 continue;
10317 /* If we have (A << B << C) for any shift, we can convert this to
10318 (A << C << B). This wins if A is a constant. Only try this if
10319 B is not a constant. */
10321 else if (GET_CODE (varop) == code
10322 && CONST_INT_P (XEXP (varop, 0))
10323 && !CONST_INT_P (XEXP (varop, 1)))
10325 rtx new_rtx = simplify_const_binary_operation (code, mode,
10326 XEXP (varop, 0),
10327 GEN_INT (count));
10328 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10329 count = 0;
10330 continue;
10332 break;
10334 case NOT:
10335 if (VECTOR_MODE_P (mode))
10336 break;
10338 /* Make this fit the case below. */
10339 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10340 continue;
10342 case IOR:
10343 case AND:
10344 case XOR:
10345 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10346 with C the size of VAROP - 1 and the shift is logical if
10347 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10348 we have an (le X 0) operation. If we have an arithmetic shift
10349 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10350 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10352 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10353 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10354 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10355 && (code == LSHIFTRT || code == ASHIFTRT)
10356 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10357 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10359 count = 0;
10360 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10361 const0_rtx);
10363 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10364 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10366 continue;
10369 /* If we have (shift (logical)), move the logical to the outside
10370 to allow it to possibly combine with another logical and the
10371 shift to combine with another shift. This also canonicalizes to
10372 what a ZERO_EXTRACT looks like. Also, some machines have
10373 (and (shift)) insns. */
10375 if (CONST_INT_P (XEXP (varop, 1))
10376 /* We can't do this if we have (ashiftrt (xor)) and the
10377 constant has its sign bit set in shift_mode with shift_mode
10378 wider than result_mode. */
10379 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10380 && result_mode != shift_mode
10381 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10382 shift_mode))
10383 && (new_rtx = simplify_const_binary_operation
10384 (code, result_mode,
10385 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10386 GEN_INT (count))) != 0
10387 && CONST_INT_P (new_rtx)
10388 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10389 INTVAL (new_rtx), result_mode, &complement_p))
10391 varop = XEXP (varop, 0);
10392 continue;
10395 /* If we can't do that, try to simplify the shift in each arm of the
10396 logical expression, make a new logical expression, and apply
10397 the inverse distributive law. This also can't be done for
10398 (ashiftrt (xor)) where we've widened the shift and the constant
10399 changes the sign bit. */
10400 if (CONST_INT_P (XEXP (varop, 1))
10401 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10402 && result_mode != shift_mode
10403 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10404 shift_mode)))
10406 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10407 XEXP (varop, 0), count);
10408 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10409 XEXP (varop, 1), count);
10411 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10412 lhs, rhs);
10413 varop = apply_distributive_law (varop);
10415 count = 0;
10416 continue;
10418 break;
10420 case EQ:
10421 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10422 says that the sign bit can be tested, FOO has mode MODE, C is
10423 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10424 that may be nonzero. */
10425 if (code == LSHIFTRT
10426 && XEXP (varop, 1) == const0_rtx
10427 && GET_MODE (XEXP (varop, 0)) == result_mode
10428 && count == (GET_MODE_PRECISION (result_mode) - 1)
10429 && HWI_COMPUTABLE_MODE_P (result_mode)
10430 && STORE_FLAG_VALUE == -1
10431 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10432 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10433 &complement_p))
10435 varop = XEXP (varop, 0);
10436 count = 0;
10437 continue;
10439 break;
10441 case NEG:
10442 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10443 than the number of bits in the mode is equivalent to A. */
10444 if (code == LSHIFTRT
10445 && count == (GET_MODE_PRECISION (result_mode) - 1)
10446 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10448 varop = XEXP (varop, 0);
10449 count = 0;
10450 continue;
10453 /* NEG commutes with ASHIFT since it is multiplication. Move the
10454 NEG outside to allow shifts to combine. */
10455 if (code == ASHIFT
10456 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10457 &complement_p))
10459 varop = XEXP (varop, 0);
10460 continue;
10462 break;
10464 case PLUS:
10465 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10466 is one less than the number of bits in the mode is
10467 equivalent to (xor A 1). */
10468 if (code == LSHIFTRT
10469 && count == (GET_MODE_PRECISION (result_mode) - 1)
10470 && XEXP (varop, 1) == constm1_rtx
10471 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10472 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10473 &complement_p))
10475 count = 0;
10476 varop = XEXP (varop, 0);
10477 continue;
10480 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10481 that might be nonzero in BAR are those being shifted out and those
10482 bits are known zero in FOO, we can replace the PLUS with FOO.
10483 Similarly in the other operand order. This code occurs when
10484 we are computing the size of a variable-size array. */
10486 if ((code == ASHIFTRT || code == LSHIFTRT)
10487 && count < HOST_BITS_PER_WIDE_INT
10488 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10489 && (nonzero_bits (XEXP (varop, 1), result_mode)
10490 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10492 varop = XEXP (varop, 0);
10493 continue;
10495 else if ((code == ASHIFTRT || code == LSHIFTRT)
10496 && count < HOST_BITS_PER_WIDE_INT
10497 && HWI_COMPUTABLE_MODE_P (result_mode)
10498 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10499 >> count)
10500 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10501 & nonzero_bits (XEXP (varop, 1),
10502 result_mode)))
10504 varop = XEXP (varop, 1);
10505 continue;
10508 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10509 if (code == ASHIFT
10510 && CONST_INT_P (XEXP (varop, 1))
10511 && (new_rtx = simplify_const_binary_operation
10512 (ASHIFT, result_mode,
10513 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10514 GEN_INT (count))) != 0
10515 && CONST_INT_P (new_rtx)
10516 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10517 INTVAL (new_rtx), result_mode, &complement_p))
10519 varop = XEXP (varop, 0);
10520 continue;
10523 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10524 signbit', and attempt to change the PLUS to an XOR and move it to
10525 the outer operation as is done above in the AND/IOR/XOR case
10526 leg for shift(logical). See details in logical handling above
10527 for reasoning in doing so. */
10528 if (code == LSHIFTRT
10529 && CONST_INT_P (XEXP (varop, 1))
10530 && mode_signbit_p (result_mode, XEXP (varop, 1))
10531 && (new_rtx = simplify_const_binary_operation
10532 (code, result_mode,
10533 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10534 GEN_INT (count))) != 0
10535 && CONST_INT_P (new_rtx)
10536 && merge_outer_ops (&outer_op, &outer_const, XOR,
10537 INTVAL (new_rtx), result_mode, &complement_p))
10539 varop = XEXP (varop, 0);
10540 continue;
10543 break;
10545 case MINUS:
10546 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10547 with C the size of VAROP - 1 and the shift is logical if
10548 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10549 we have a (gt X 0) operation. If the shift is arithmetic with
10550 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10551 we have a (neg (gt X 0)) operation. */
10553 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10554 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10555 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10556 && (code == LSHIFTRT || code == ASHIFTRT)
10557 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10558 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10559 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10561 count = 0;
10562 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10563 const0_rtx);
10565 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10566 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10568 continue;
10570 break;
10572 case TRUNCATE:
10573 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10574 if the truncate does not affect the value. */
10575 if (code == LSHIFTRT
10576 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10577 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10578 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10579 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10580 - GET_MODE_PRECISION (GET_MODE (varop)))))
10582 rtx varop_inner = XEXP (varop, 0);
10584 varop_inner
10585 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10586 XEXP (varop_inner, 0),
10587 GEN_INT
10588 (count + INTVAL (XEXP (varop_inner, 1))));
10589 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10590 count = 0;
10591 continue;
10593 break;
10595 default:
10596 break;
10599 break;
10602 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10603 outer_op, outer_const);
10605 /* We have now finished analyzing the shift. The result should be
10606 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10607 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10608 to the result of the shift. OUTER_CONST is the relevant constant,
10609 but we must turn off all bits turned off in the shift. */
10611 if (outer_op == UNKNOWN
10612 && orig_code == code && orig_count == count
10613 && varop == orig_varop
10614 && shift_mode == GET_MODE (varop))
10615 return NULL_RTX;
10617 /* Make a SUBREG if necessary. If we can't make it, fail. */
10618 varop = gen_lowpart (shift_mode, varop);
10619 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10620 return NULL_RTX;
10622 /* If we have an outer operation and we just made a shift, it is
10623 possible that we could have simplified the shift were it not
10624 for the outer operation. So try to do the simplification
10625 recursively. */
10627 if (outer_op != UNKNOWN)
10628 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10629 else
10630 x = NULL_RTX;
10632 if (x == NULL_RTX)
10633 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10635 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10636 turn off all the bits that the shift would have turned off. */
10637 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10638 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10639 GET_MODE_MASK (result_mode) >> orig_count);
10641 /* Do the remainder of the processing in RESULT_MODE. */
10642 x = gen_lowpart_or_truncate (result_mode, x);
10644 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10645 operation. */
10646 if (complement_p)
10647 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10649 if (outer_op != UNKNOWN)
10651 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10652 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10653 outer_const = trunc_int_for_mode (outer_const, result_mode);
10655 if (outer_op == AND)
10656 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10657 else if (outer_op == SET)
10659 /* This means that we have determined that the result is
10660 equivalent to a constant. This should be rare. */
10661 if (!side_effects_p (x))
10662 x = GEN_INT (outer_const);
10664 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10665 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10666 else
10667 x = simplify_gen_binary (outer_op, result_mode, x,
10668 GEN_INT (outer_const));
10671 return x;
10674 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10675 The result of the shift is RESULT_MODE. If we cannot simplify it,
10676 return X or, if it is NULL, synthesize the expression with
10677 simplify_gen_binary. Otherwise, return a simplified value.
10679 The shift is normally computed in the widest mode we find in VAROP, as
10680 long as it isn't a different number of words than RESULT_MODE. Exceptions
10681 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10683 static rtx
10684 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
10685 rtx varop, int count)
10687 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10688 if (tem)
10689 return tem;
10691 if (!x)
10692 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10693 if (GET_MODE (x) != result_mode)
10694 x = gen_lowpart (result_mode, x);
10695 return x;
10699 /* Like recog, but we receive the address of a pointer to a new pattern.
10700 We try to match the rtx that the pointer points to.
10701 If that fails, we may try to modify or replace the pattern,
10702 storing the replacement into the same pointer object.
10704 Modifications include deletion or addition of CLOBBERs.
10706 PNOTES is a pointer to a location where any REG_UNUSED notes added for
10707 the CLOBBERs are placed.
10709 The value is the final insn code from the pattern ultimately matched,
10710 or -1. */
10712 static int
10713 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
10715 rtx pat = *pnewpat;
10716 rtx pat_without_clobbers;
10717 int insn_code_number;
10718 int num_clobbers_to_add = 0;
10719 int i;
10720 rtx notes = NULL_RTX;
10721 rtx old_notes, old_pat;
10722 int old_icode;
10724 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10725 we use to indicate that something didn't match. If we find such a
10726 thing, force rejection. */
10727 if (GET_CODE (pat) == PARALLEL)
10728 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10729 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10730 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10731 return -1;
10733 old_pat = PATTERN (insn);
10734 old_notes = REG_NOTES (insn);
10735 PATTERN (insn) = pat;
10736 REG_NOTES (insn) = NULL_RTX;
10738 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10739 if (dump_file && (dump_flags & TDF_DETAILS))
10741 if (insn_code_number < 0)
10742 fputs ("Failed to match this instruction:\n", dump_file);
10743 else
10744 fputs ("Successfully matched this instruction:\n", dump_file);
10745 print_rtl_single (dump_file, pat);
10748 /* If it isn't, there is the possibility that we previously had an insn
10749 that clobbered some register as a side effect, but the combined
10750 insn doesn't need to do that. So try once more without the clobbers
10751 unless this represents an ASM insn. */
10753 if (insn_code_number < 0 && ! check_asm_operands (pat)
10754 && GET_CODE (pat) == PARALLEL)
10756 int pos;
10758 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10759 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10761 if (i != pos)
10762 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10763 pos++;
10766 SUBST_INT (XVECLEN (pat, 0), pos);
10768 if (pos == 1)
10769 pat = XVECEXP (pat, 0, 0);
10771 PATTERN (insn) = pat;
10772 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10773 if (dump_file && (dump_flags & TDF_DETAILS))
10775 if (insn_code_number < 0)
10776 fputs ("Failed to match this instruction:\n", dump_file);
10777 else
10778 fputs ("Successfully matched this instruction:\n", dump_file);
10779 print_rtl_single (dump_file, pat);
10783 pat_without_clobbers = pat;
10785 PATTERN (insn) = old_pat;
10786 REG_NOTES (insn) = old_notes;
10788 /* Recognize all noop sets, these will be killed by followup pass. */
10789 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10790 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10792 /* If we had any clobbers to add, make a new pattern than contains
10793 them. Then check to make sure that all of them are dead. */
10794 if (num_clobbers_to_add)
10796 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10797 rtvec_alloc (GET_CODE (pat) == PARALLEL
10798 ? (XVECLEN (pat, 0)
10799 + num_clobbers_to_add)
10800 : num_clobbers_to_add + 1));
10802 if (GET_CODE (pat) == PARALLEL)
10803 for (i = 0; i < XVECLEN (pat, 0); i++)
10804 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10805 else
10806 XVECEXP (newpat, 0, 0) = pat;
10808 add_clobbers (newpat, insn_code_number);
10810 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10811 i < XVECLEN (newpat, 0); i++)
10813 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10814 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10815 return -1;
10816 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10818 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10819 notes = alloc_reg_note (REG_UNUSED,
10820 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10823 pat = newpat;
10826 if (insn_code_number >= 0
10827 && insn_code_number != NOOP_MOVE_INSN_CODE)
10829 old_pat = PATTERN (insn);
10830 old_notes = REG_NOTES (insn);
10831 old_icode = INSN_CODE (insn);
10832 PATTERN (insn) = pat;
10833 REG_NOTES (insn) = notes;
10835 /* Allow targets to reject combined insn. */
10836 if (!targetm.legitimate_combined_insn (insn))
10838 if (dump_file && (dump_flags & TDF_DETAILS))
10839 fputs ("Instruction not appropriate for target.",
10840 dump_file);
10842 /* Callers expect recog_for_combine to strip
10843 clobbers from the pattern on failure. */
10844 pat = pat_without_clobbers;
10845 notes = NULL_RTX;
10847 insn_code_number = -1;
10850 PATTERN (insn) = old_pat;
10851 REG_NOTES (insn) = old_notes;
10852 INSN_CODE (insn) = old_icode;
10855 *pnewpat = pat;
10856 *pnotes = notes;
10858 return insn_code_number;
10861 /* Like gen_lowpart_general but for use by combine. In combine it
10862 is not possible to create any new pseudoregs. However, it is
10863 safe to create invalid memory addresses, because combine will
10864 try to recognize them and all they will do is make the combine
10865 attempt fail.
10867 If for some reason this cannot do its job, an rtx
10868 (clobber (const_int 0)) is returned.
10869 An insn containing that will not be recognized. */
10871 static rtx
10872 gen_lowpart_for_combine (machine_mode omode, rtx x)
10874 machine_mode imode = GET_MODE (x);
10875 unsigned int osize = GET_MODE_SIZE (omode);
10876 unsigned int isize = GET_MODE_SIZE (imode);
10877 rtx result;
10879 if (omode == imode)
10880 return x;
10882 /* We can only support MODE being wider than a word if X is a
10883 constant integer or has a mode the same size. */
10884 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10885 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
10886 goto fail;
10888 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
10889 won't know what to do. So we will strip off the SUBREG here and
10890 process normally. */
10891 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10893 x = SUBREG_REG (x);
10895 /* For use in case we fall down into the address adjustments
10896 further below, we need to adjust the known mode and size of
10897 x; imode and isize, since we just adjusted x. */
10898 imode = GET_MODE (x);
10900 if (imode == omode)
10901 return x;
10903 isize = GET_MODE_SIZE (imode);
10906 result = gen_lowpart_common (omode, x);
10908 if (result)
10909 return result;
10911 if (MEM_P (x))
10913 int offset = 0;
10915 /* Refuse to work on a volatile memory ref or one with a mode-dependent
10916 address. */
10917 if (MEM_VOLATILE_P (x)
10918 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
10919 goto fail;
10921 /* If we want to refer to something bigger than the original memref,
10922 generate a paradoxical subreg instead. That will force a reload
10923 of the original memref X. */
10924 if (isize < osize)
10925 return gen_rtx_SUBREG (omode, x, 0);
10927 if (WORDS_BIG_ENDIAN)
10928 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10930 /* Adjust the address so that the address-after-the-data is
10931 unchanged. */
10932 if (BYTES_BIG_ENDIAN)
10933 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10935 return adjust_address_nv (x, omode, offset);
10938 /* If X is a comparison operator, rewrite it in a new mode. This
10939 probably won't match, but may allow further simplifications. */
10940 else if (COMPARISON_P (x))
10941 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10943 /* If we couldn't simplify X any other way, just enclose it in a
10944 SUBREG. Normally, this SUBREG won't match, but some patterns may
10945 include an explicit SUBREG or we may simplify it further in combine. */
10946 else
10948 int offset = 0;
10949 rtx res;
10951 offset = subreg_lowpart_offset (omode, imode);
10952 if (imode == VOIDmode)
10954 imode = int_mode_for_mode (omode);
10955 x = gen_lowpart_common (imode, x);
10956 if (x == NULL)
10957 goto fail;
10959 res = simplify_gen_subreg (omode, x, imode, offset);
10960 if (res)
10961 return res;
10964 fail:
10965 return gen_rtx_CLOBBER (omode, const0_rtx);
10968 /* Try to simplify a comparison between OP0 and a constant OP1,
10969 where CODE is the comparison code that will be tested, into a
10970 (CODE OP0 const0_rtx) form.
10972 The result is a possibly different comparison code to use.
10973 *POP1 may be updated. */
10975 static enum rtx_code
10976 simplify_compare_const (enum rtx_code code, machine_mode mode,
10977 rtx op0, rtx *pop1)
10979 unsigned int mode_width = GET_MODE_PRECISION (mode);
10980 HOST_WIDE_INT const_op = INTVAL (*pop1);
10982 /* Get the constant we are comparing against and turn off all bits
10983 not on in our mode. */
10984 if (mode != VOIDmode)
10985 const_op = trunc_int_for_mode (const_op, mode);
10987 /* If we are comparing against a constant power of two and the value
10988 being compared can only have that single bit nonzero (e.g., it was
10989 `and'ed with that bit), we can replace this with a comparison
10990 with zero. */
10991 if (const_op
10992 && (code == EQ || code == NE || code == GE || code == GEU
10993 || code == LT || code == LTU)
10994 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
10995 && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0
10996 && (nonzero_bits (op0, mode)
10997 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
10999 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11000 const_op = 0;
11003 /* Similarly, if we are comparing a value known to be either -1 or
11004 0 with -1, change it to the opposite comparison against zero. */
11005 if (const_op == -1
11006 && (code == EQ || code == NE || code == GT || code == LE
11007 || code == GEU || code == LTU)
11008 && num_sign_bit_copies (op0, mode) == mode_width)
11010 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11011 const_op = 0;
11014 /* Do some canonicalizations based on the comparison code. We prefer
11015 comparisons against zero and then prefer equality comparisons.
11016 If we can reduce the size of a constant, we will do that too. */
11017 switch (code)
11019 case LT:
11020 /* < C is equivalent to <= (C - 1) */
11021 if (const_op > 0)
11023 const_op -= 1;
11024 code = LE;
11025 /* ... fall through to LE case below. */
11027 else
11028 break;
11030 case LE:
11031 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11032 if (const_op < 0)
11034 const_op += 1;
11035 code = LT;
11038 /* If we are doing a <= 0 comparison on a value known to have
11039 a zero sign bit, we can replace this with == 0. */
11040 else if (const_op == 0
11041 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11042 && (nonzero_bits (op0, mode)
11043 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11044 == 0)
11045 code = EQ;
11046 break;
11048 case GE:
11049 /* >= C is equivalent to > (C - 1). */
11050 if (const_op > 0)
11052 const_op -= 1;
11053 code = GT;
11054 /* ... fall through to GT below. */
11056 else
11057 break;
11059 case GT:
11060 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11061 if (const_op < 0)
11063 const_op += 1;
11064 code = GE;
11067 /* If we are doing a > 0 comparison on a value known to have
11068 a zero sign bit, we can replace this with != 0. */
11069 else if (const_op == 0
11070 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11071 && (nonzero_bits (op0, mode)
11072 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11073 == 0)
11074 code = NE;
11075 break;
11077 case LTU:
11078 /* < C is equivalent to <= (C - 1). */
11079 if (const_op > 0)
11081 const_op -= 1;
11082 code = LEU;
11083 /* ... fall through ... */
11085 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11086 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11087 && (unsigned HOST_WIDE_INT) const_op
11088 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11090 const_op = 0;
11091 code = GE;
11092 break;
11094 else
11095 break;
11097 case LEU:
11098 /* unsigned <= 0 is equivalent to == 0 */
11099 if (const_op == 0)
11100 code = EQ;
11101 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11102 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11103 && (unsigned HOST_WIDE_INT) const_op
11104 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11106 const_op = 0;
11107 code = GE;
11109 break;
11111 case GEU:
11112 /* >= C is equivalent to > (C - 1). */
11113 if (const_op > 1)
11115 const_op -= 1;
11116 code = GTU;
11117 /* ... fall through ... */
11120 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11121 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11122 && (unsigned HOST_WIDE_INT) const_op
11123 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11125 const_op = 0;
11126 code = LT;
11127 break;
11129 else
11130 break;
11132 case GTU:
11133 /* unsigned > 0 is equivalent to != 0 */
11134 if (const_op == 0)
11135 code = NE;
11136 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11137 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11138 && (unsigned HOST_WIDE_INT) const_op
11139 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11141 const_op = 0;
11142 code = LT;
11144 break;
11146 default:
11147 break;
11150 *pop1 = GEN_INT (const_op);
11151 return code;
11154 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11155 comparison code that will be tested.
11157 The result is a possibly different comparison code to use. *POP0 and
11158 *POP1 may be updated.
11160 It is possible that we might detect that a comparison is either always
11161 true or always false. However, we do not perform general constant
11162 folding in combine, so this knowledge isn't useful. Such tautologies
11163 should have been detected earlier. Hence we ignore all such cases. */
11165 static enum rtx_code
11166 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11168 rtx op0 = *pop0;
11169 rtx op1 = *pop1;
11170 rtx tem, tem1;
11171 int i;
11172 machine_mode mode, tmode;
11174 /* Try a few ways of applying the same transformation to both operands. */
11175 while (1)
11177 #ifndef WORD_REGISTER_OPERATIONS
11178 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11179 so check specially. */
11180 if (code != GTU && code != GEU && code != LTU && code != LEU
11181 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11182 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11183 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11184 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11185 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11186 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11187 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11188 && CONST_INT_P (XEXP (op0, 1))
11189 && XEXP (op0, 1) == XEXP (op1, 1)
11190 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11191 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11192 && (INTVAL (XEXP (op0, 1))
11193 == (GET_MODE_PRECISION (GET_MODE (op0))
11194 - (GET_MODE_PRECISION
11195 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11197 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11198 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11200 #endif
11202 /* If both operands are the same constant shift, see if we can ignore the
11203 shift. We can if the shift is a rotate or if the bits shifted out of
11204 this shift are known to be zero for both inputs and if the type of
11205 comparison is compatible with the shift. */
11206 if (GET_CODE (op0) == GET_CODE (op1)
11207 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11208 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11209 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11210 && (code != GT && code != LT && code != GE && code != LE))
11211 || (GET_CODE (op0) == ASHIFTRT
11212 && (code != GTU && code != LTU
11213 && code != GEU && code != LEU)))
11214 && CONST_INT_P (XEXP (op0, 1))
11215 && INTVAL (XEXP (op0, 1)) >= 0
11216 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11217 && XEXP (op0, 1) == XEXP (op1, 1))
11219 machine_mode mode = GET_MODE (op0);
11220 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11221 int shift_count = INTVAL (XEXP (op0, 1));
11223 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11224 mask &= (mask >> shift_count) << shift_count;
11225 else if (GET_CODE (op0) == ASHIFT)
11226 mask = (mask & (mask << shift_count)) >> shift_count;
11228 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11229 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11230 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11231 else
11232 break;
11235 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11236 SUBREGs are of the same mode, and, in both cases, the AND would
11237 be redundant if the comparison was done in the narrower mode,
11238 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11239 and the operand's possibly nonzero bits are 0xffffff01; in that case
11240 if we only care about QImode, we don't need the AND). This case
11241 occurs if the output mode of an scc insn is not SImode and
11242 STORE_FLAG_VALUE == 1 (e.g., the 386).
11244 Similarly, check for a case where the AND's are ZERO_EXTEND
11245 operations from some narrower mode even though a SUBREG is not
11246 present. */
11248 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11249 && CONST_INT_P (XEXP (op0, 1))
11250 && CONST_INT_P (XEXP (op1, 1)))
11252 rtx inner_op0 = XEXP (op0, 0);
11253 rtx inner_op1 = XEXP (op1, 0);
11254 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11255 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11256 int changed = 0;
11258 if (paradoxical_subreg_p (inner_op0)
11259 && GET_CODE (inner_op1) == SUBREG
11260 && (GET_MODE (SUBREG_REG (inner_op0))
11261 == GET_MODE (SUBREG_REG (inner_op1)))
11262 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11263 <= HOST_BITS_PER_WIDE_INT)
11264 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11265 GET_MODE (SUBREG_REG (inner_op0)))))
11266 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11267 GET_MODE (SUBREG_REG (inner_op1))))))
11269 op0 = SUBREG_REG (inner_op0);
11270 op1 = SUBREG_REG (inner_op1);
11272 /* The resulting comparison is always unsigned since we masked
11273 off the original sign bit. */
11274 code = unsigned_condition (code);
11276 changed = 1;
11279 else if (c0 == c1)
11280 for (tmode = GET_CLASS_NARROWEST_MODE
11281 (GET_MODE_CLASS (GET_MODE (op0)));
11282 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11283 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11285 op0 = gen_lowpart (tmode, inner_op0);
11286 op1 = gen_lowpart (tmode, inner_op1);
11287 code = unsigned_condition (code);
11288 changed = 1;
11289 break;
11292 if (! changed)
11293 break;
11296 /* If both operands are NOT, we can strip off the outer operation
11297 and adjust the comparison code for swapped operands; similarly for
11298 NEG, except that this must be an equality comparison. */
11299 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11300 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11301 && (code == EQ || code == NE)))
11302 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11304 else
11305 break;
11308 /* If the first operand is a constant, swap the operands and adjust the
11309 comparison code appropriately, but don't do this if the second operand
11310 is already a constant integer. */
11311 if (swap_commutative_operands_p (op0, op1))
11313 tem = op0, op0 = op1, op1 = tem;
11314 code = swap_condition (code);
11317 /* We now enter a loop during which we will try to simplify the comparison.
11318 For the most part, we only are concerned with comparisons with zero,
11319 but some things may really be comparisons with zero but not start
11320 out looking that way. */
11322 while (CONST_INT_P (op1))
11324 machine_mode mode = GET_MODE (op0);
11325 unsigned int mode_width = GET_MODE_PRECISION (mode);
11326 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11327 int equality_comparison_p;
11328 int sign_bit_comparison_p;
11329 int unsigned_comparison_p;
11330 HOST_WIDE_INT const_op;
11332 /* We only want to handle integral modes. This catches VOIDmode,
11333 CCmode, and the floating-point modes. An exception is that we
11334 can handle VOIDmode if OP0 is a COMPARE or a comparison
11335 operation. */
11337 if (GET_MODE_CLASS (mode) != MODE_INT
11338 && ! (mode == VOIDmode
11339 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11340 break;
11342 /* Try to simplify the compare to constant, possibly changing the
11343 comparison op, and/or changing op1 to zero. */
11344 code = simplify_compare_const (code, mode, op0, &op1);
11345 const_op = INTVAL (op1);
11347 /* Compute some predicates to simplify code below. */
11349 equality_comparison_p = (code == EQ || code == NE);
11350 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11351 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11352 || code == GEU);
11354 /* If this is a sign bit comparison and we can do arithmetic in
11355 MODE, say that we will only be needing the sign bit of OP0. */
11356 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11357 op0 = force_to_mode (op0, mode,
11358 (unsigned HOST_WIDE_INT) 1
11359 << (GET_MODE_PRECISION (mode) - 1),
11362 /* Now try cases based on the opcode of OP0. If none of the cases
11363 does a "continue", we exit this loop immediately after the
11364 switch. */
11366 switch (GET_CODE (op0))
11368 case ZERO_EXTRACT:
11369 /* If we are extracting a single bit from a variable position in
11370 a constant that has only a single bit set and are comparing it
11371 with zero, we can convert this into an equality comparison
11372 between the position and the location of the single bit. */
11373 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11374 have already reduced the shift count modulo the word size. */
11375 if (!SHIFT_COUNT_TRUNCATED
11376 && CONST_INT_P (XEXP (op0, 0))
11377 && XEXP (op0, 1) == const1_rtx
11378 && equality_comparison_p && const_op == 0
11379 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11381 if (BITS_BIG_ENDIAN)
11382 i = BITS_PER_WORD - 1 - i;
11384 op0 = XEXP (op0, 2);
11385 op1 = GEN_INT (i);
11386 const_op = i;
11388 /* Result is nonzero iff shift count is equal to I. */
11389 code = reverse_condition (code);
11390 continue;
11393 /* ... fall through ... */
11395 case SIGN_EXTRACT:
11396 tem = expand_compound_operation (op0);
11397 if (tem != op0)
11399 op0 = tem;
11400 continue;
11402 break;
11404 case NOT:
11405 /* If testing for equality, we can take the NOT of the constant. */
11406 if (equality_comparison_p
11407 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11409 op0 = XEXP (op0, 0);
11410 op1 = tem;
11411 continue;
11414 /* If just looking at the sign bit, reverse the sense of the
11415 comparison. */
11416 if (sign_bit_comparison_p)
11418 op0 = XEXP (op0, 0);
11419 code = (code == GE ? LT : GE);
11420 continue;
11422 break;
11424 case NEG:
11425 /* If testing for equality, we can take the NEG of the constant. */
11426 if (equality_comparison_p
11427 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11429 op0 = XEXP (op0, 0);
11430 op1 = tem;
11431 continue;
11434 /* The remaining cases only apply to comparisons with zero. */
11435 if (const_op != 0)
11436 break;
11438 /* When X is ABS or is known positive,
11439 (neg X) is < 0 if and only if X != 0. */
11441 if (sign_bit_comparison_p
11442 && (GET_CODE (XEXP (op0, 0)) == ABS
11443 || (mode_width <= HOST_BITS_PER_WIDE_INT
11444 && (nonzero_bits (XEXP (op0, 0), mode)
11445 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11446 == 0)))
11448 op0 = XEXP (op0, 0);
11449 code = (code == LT ? NE : EQ);
11450 continue;
11453 /* If we have NEG of something whose two high-order bits are the
11454 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11455 if (num_sign_bit_copies (op0, mode) >= 2)
11457 op0 = XEXP (op0, 0);
11458 code = swap_condition (code);
11459 continue;
11461 break;
11463 case ROTATE:
11464 /* If we are testing equality and our count is a constant, we
11465 can perform the inverse operation on our RHS. */
11466 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11467 && (tem = simplify_binary_operation (ROTATERT, mode,
11468 op1, XEXP (op0, 1))) != 0)
11470 op0 = XEXP (op0, 0);
11471 op1 = tem;
11472 continue;
11475 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11476 a particular bit. Convert it to an AND of a constant of that
11477 bit. This will be converted into a ZERO_EXTRACT. */
11478 if (const_op == 0 && sign_bit_comparison_p
11479 && CONST_INT_P (XEXP (op0, 1))
11480 && mode_width <= HOST_BITS_PER_WIDE_INT)
11482 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11483 ((unsigned HOST_WIDE_INT) 1
11484 << (mode_width - 1
11485 - INTVAL (XEXP (op0, 1)))));
11486 code = (code == LT ? NE : EQ);
11487 continue;
11490 /* Fall through. */
11492 case ABS:
11493 /* ABS is ignorable inside an equality comparison with zero. */
11494 if (const_op == 0 && equality_comparison_p)
11496 op0 = XEXP (op0, 0);
11497 continue;
11499 break;
11501 case SIGN_EXTEND:
11502 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11503 (compare FOO CONST) if CONST fits in FOO's mode and we
11504 are either testing inequality or have an unsigned
11505 comparison with ZERO_EXTEND or a signed comparison with
11506 SIGN_EXTEND. But don't do it if we don't have a compare
11507 insn of the given mode, since we'd have to revert it
11508 later on, and then we wouldn't know whether to sign- or
11509 zero-extend. */
11510 mode = GET_MODE (XEXP (op0, 0));
11511 if (GET_MODE_CLASS (mode) == MODE_INT
11512 && ! unsigned_comparison_p
11513 && HWI_COMPUTABLE_MODE_P (mode)
11514 && trunc_int_for_mode (const_op, mode) == const_op
11515 && have_insn_for (COMPARE, mode))
11517 op0 = XEXP (op0, 0);
11518 continue;
11520 break;
11522 case SUBREG:
11523 /* Check for the case where we are comparing A - C1 with C2, that is
11525 (subreg:MODE (plus (A) (-C1))) op (C2)
11527 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11528 comparison in the wider mode. One of the following two conditions
11529 must be true in order for this to be valid:
11531 1. The mode extension results in the same bit pattern being added
11532 on both sides and the comparison is equality or unsigned. As
11533 C2 has been truncated to fit in MODE, the pattern can only be
11534 all 0s or all 1s.
11536 2. The mode extension results in the sign bit being copied on
11537 each side.
11539 The difficulty here is that we have predicates for A but not for
11540 (A - C1) so we need to check that C1 is within proper bounds so
11541 as to perturbate A as little as possible. */
11543 if (mode_width <= HOST_BITS_PER_WIDE_INT
11544 && subreg_lowpart_p (op0)
11545 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11546 && GET_CODE (SUBREG_REG (op0)) == PLUS
11547 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11549 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11550 rtx a = XEXP (SUBREG_REG (op0), 0);
11551 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11553 if ((c1 > 0
11554 && (unsigned HOST_WIDE_INT) c1
11555 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11556 && (equality_comparison_p || unsigned_comparison_p)
11557 /* (A - C1) zero-extends if it is positive and sign-extends
11558 if it is negative, C2 both zero- and sign-extends. */
11559 && ((0 == (nonzero_bits (a, inner_mode)
11560 & ~GET_MODE_MASK (mode))
11561 && const_op >= 0)
11562 /* (A - C1) sign-extends if it is positive and 1-extends
11563 if it is negative, C2 both sign- and 1-extends. */
11564 || (num_sign_bit_copies (a, inner_mode)
11565 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11566 - mode_width)
11567 && const_op < 0)))
11568 || ((unsigned HOST_WIDE_INT) c1
11569 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11570 /* (A - C1) always sign-extends, like C2. */
11571 && num_sign_bit_copies (a, inner_mode)
11572 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11573 - (mode_width - 1))))
11575 op0 = SUBREG_REG (op0);
11576 continue;
11580 /* If the inner mode is narrower and we are extracting the low part,
11581 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11582 if (subreg_lowpart_p (op0)
11583 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11584 /* Fall through */ ;
11585 else
11586 break;
11588 /* ... fall through ... */
11590 case ZERO_EXTEND:
11591 mode = GET_MODE (XEXP (op0, 0));
11592 if (GET_MODE_CLASS (mode) == MODE_INT
11593 && (unsigned_comparison_p || equality_comparison_p)
11594 && HWI_COMPUTABLE_MODE_P (mode)
11595 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11596 && const_op >= 0
11597 && have_insn_for (COMPARE, mode))
11599 op0 = XEXP (op0, 0);
11600 continue;
11602 break;
11604 case PLUS:
11605 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11606 this for equality comparisons due to pathological cases involving
11607 overflows. */
11608 if (equality_comparison_p
11609 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11610 op1, XEXP (op0, 1))))
11612 op0 = XEXP (op0, 0);
11613 op1 = tem;
11614 continue;
11617 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11618 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11619 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11621 op0 = XEXP (XEXP (op0, 0), 0);
11622 code = (code == LT ? EQ : NE);
11623 continue;
11625 break;
11627 case MINUS:
11628 /* We used to optimize signed comparisons against zero, but that
11629 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11630 arrive here as equality comparisons, or (GEU, LTU) are
11631 optimized away. No need to special-case them. */
11633 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11634 (eq B (minus A C)), whichever simplifies. We can only do
11635 this for equality comparisons due to pathological cases involving
11636 overflows. */
11637 if (equality_comparison_p
11638 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11639 XEXP (op0, 1), op1)))
11641 op0 = XEXP (op0, 0);
11642 op1 = tem;
11643 continue;
11646 if (equality_comparison_p
11647 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11648 XEXP (op0, 0), op1)))
11650 op0 = XEXP (op0, 1);
11651 op1 = tem;
11652 continue;
11655 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11656 of bits in X minus 1, is one iff X > 0. */
11657 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11658 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11659 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11660 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11662 op0 = XEXP (op0, 1);
11663 code = (code == GE ? LE : GT);
11664 continue;
11666 break;
11668 case XOR:
11669 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11670 if C is zero or B is a constant. */
11671 if (equality_comparison_p
11672 && 0 != (tem = simplify_binary_operation (XOR, mode,
11673 XEXP (op0, 1), op1)))
11675 op0 = XEXP (op0, 0);
11676 op1 = tem;
11677 continue;
11679 break;
11681 case EQ: case NE:
11682 case UNEQ: case LTGT:
11683 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11684 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11685 case UNORDERED: case ORDERED:
11686 /* We can't do anything if OP0 is a condition code value, rather
11687 than an actual data value. */
11688 if (const_op != 0
11689 || CC0_P (XEXP (op0, 0))
11690 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11691 break;
11693 /* Get the two operands being compared. */
11694 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11695 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11696 else
11697 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11699 /* Check for the cases where we simply want the result of the
11700 earlier test or the opposite of that result. */
11701 if (code == NE || code == EQ
11702 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11703 && (code == LT || code == GE)))
11705 enum rtx_code new_code;
11706 if (code == LT || code == NE)
11707 new_code = GET_CODE (op0);
11708 else
11709 new_code = reversed_comparison_code (op0, NULL);
11711 if (new_code != UNKNOWN)
11713 code = new_code;
11714 op0 = tem;
11715 op1 = tem1;
11716 continue;
11719 break;
11721 case IOR:
11722 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11723 iff X <= 0. */
11724 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11725 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11726 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11728 op0 = XEXP (op0, 1);
11729 code = (code == GE ? GT : LE);
11730 continue;
11732 break;
11734 case AND:
11735 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11736 will be converted to a ZERO_EXTRACT later. */
11737 if (const_op == 0 && equality_comparison_p
11738 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11739 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11741 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11742 XEXP (XEXP (op0, 0), 1));
11743 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11744 continue;
11747 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11748 zero and X is a comparison and C1 and C2 describe only bits set
11749 in STORE_FLAG_VALUE, we can compare with X. */
11750 if (const_op == 0 && equality_comparison_p
11751 && mode_width <= HOST_BITS_PER_WIDE_INT
11752 && CONST_INT_P (XEXP (op0, 1))
11753 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11754 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11755 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11756 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11758 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11759 << INTVAL (XEXP (XEXP (op0, 0), 1)));
11760 if ((~STORE_FLAG_VALUE & mask) == 0
11761 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11762 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11763 && COMPARISON_P (tem))))
11765 op0 = XEXP (XEXP (op0, 0), 0);
11766 continue;
11770 /* If we are doing an equality comparison of an AND of a bit equal
11771 to the sign bit, replace this with a LT or GE comparison of
11772 the underlying value. */
11773 if (equality_comparison_p
11774 && const_op == 0
11775 && CONST_INT_P (XEXP (op0, 1))
11776 && mode_width <= HOST_BITS_PER_WIDE_INT
11777 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11778 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11780 op0 = XEXP (op0, 0);
11781 code = (code == EQ ? GE : LT);
11782 continue;
11785 /* If this AND operation is really a ZERO_EXTEND from a narrower
11786 mode, the constant fits within that mode, and this is either an
11787 equality or unsigned comparison, try to do this comparison in
11788 the narrower mode.
11790 Note that in:
11792 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11793 -> (ne:DI (reg:SI 4) (const_int 0))
11795 unless TRULY_NOOP_TRUNCATION allows it or the register is
11796 known to hold a value of the required mode the
11797 transformation is invalid. */
11798 if ((equality_comparison_p || unsigned_comparison_p)
11799 && CONST_INT_P (XEXP (op0, 1))
11800 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11801 & GET_MODE_MASK (mode))
11802 + 1)) >= 0
11803 && const_op >> i == 0
11804 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11805 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
11806 || (REG_P (XEXP (op0, 0))
11807 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11809 op0 = gen_lowpart (tmode, XEXP (op0, 0));
11810 continue;
11813 /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11814 fits in both M1 and M2 and the SUBREG is either paradoxical
11815 or represents the low part, permute the SUBREG and the AND
11816 and try again. */
11817 if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11819 unsigned HOST_WIDE_INT c1;
11820 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11821 /* Require an integral mode, to avoid creating something like
11822 (AND:SF ...). */
11823 if (SCALAR_INT_MODE_P (tmode)
11824 /* It is unsafe to commute the AND into the SUBREG if the
11825 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11826 not defined. As originally written the upper bits
11827 have a defined value due to the AND operation.
11828 However, if we commute the AND inside the SUBREG then
11829 they no longer have defined values and the meaning of
11830 the code has been changed. */
11831 && (0
11832 #ifdef WORD_REGISTER_OPERATIONS
11833 || (mode_width > GET_MODE_PRECISION (tmode)
11834 && mode_width <= BITS_PER_WORD)
11835 #endif
11836 || (mode_width <= GET_MODE_PRECISION (tmode)
11837 && subreg_lowpart_p (XEXP (op0, 0))))
11838 && CONST_INT_P (XEXP (op0, 1))
11839 && mode_width <= HOST_BITS_PER_WIDE_INT
11840 && HWI_COMPUTABLE_MODE_P (tmode)
11841 && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11842 && (c1 & ~GET_MODE_MASK (tmode)) == 0
11843 && c1 != mask
11844 && c1 != GET_MODE_MASK (tmode))
11846 op0 = simplify_gen_binary (AND, tmode,
11847 SUBREG_REG (XEXP (op0, 0)),
11848 gen_int_mode (c1, tmode));
11849 op0 = gen_lowpart (mode, op0);
11850 continue;
11854 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
11855 if (const_op == 0 && equality_comparison_p
11856 && XEXP (op0, 1) == const1_rtx
11857 && GET_CODE (XEXP (op0, 0)) == NOT)
11859 op0 = simplify_and_const_int (NULL_RTX, mode,
11860 XEXP (XEXP (op0, 0), 0), 1);
11861 code = (code == NE ? EQ : NE);
11862 continue;
11865 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11866 (eq (and (lshiftrt X) 1) 0).
11867 Also handle the case where (not X) is expressed using xor. */
11868 if (const_op == 0 && equality_comparison_p
11869 && XEXP (op0, 1) == const1_rtx
11870 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11872 rtx shift_op = XEXP (XEXP (op0, 0), 0);
11873 rtx shift_count = XEXP (XEXP (op0, 0), 1);
11875 if (GET_CODE (shift_op) == NOT
11876 || (GET_CODE (shift_op) == XOR
11877 && CONST_INT_P (XEXP (shift_op, 1))
11878 && CONST_INT_P (shift_count)
11879 && HWI_COMPUTABLE_MODE_P (mode)
11880 && (UINTVAL (XEXP (shift_op, 1))
11881 == (unsigned HOST_WIDE_INT) 1
11882 << INTVAL (shift_count))))
11885 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11886 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11887 code = (code == NE ? EQ : NE);
11888 continue;
11891 break;
11893 case ASHIFT:
11894 /* If we have (compare (ashift FOO N) (const_int C)) and
11895 the high order N bits of FOO (N+1 if an inequality comparison)
11896 are known to be zero, we can do this by comparing FOO with C
11897 shifted right N bits so long as the low-order N bits of C are
11898 zero. */
11899 if (CONST_INT_P (XEXP (op0, 1))
11900 && INTVAL (XEXP (op0, 1)) >= 0
11901 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11902 < HOST_BITS_PER_WIDE_INT)
11903 && (((unsigned HOST_WIDE_INT) const_op
11904 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11905 - 1)) == 0)
11906 && mode_width <= HOST_BITS_PER_WIDE_INT
11907 && (nonzero_bits (XEXP (op0, 0), mode)
11908 & ~(mask >> (INTVAL (XEXP (op0, 1))
11909 + ! equality_comparison_p))) == 0)
11911 /* We must perform a logical shift, not an arithmetic one,
11912 as we want the top N bits of C to be zero. */
11913 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11915 temp >>= INTVAL (XEXP (op0, 1));
11916 op1 = gen_int_mode (temp, mode);
11917 op0 = XEXP (op0, 0);
11918 continue;
11921 /* If we are doing a sign bit comparison, it means we are testing
11922 a particular bit. Convert it to the appropriate AND. */
11923 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11924 && mode_width <= HOST_BITS_PER_WIDE_INT)
11926 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11927 ((unsigned HOST_WIDE_INT) 1
11928 << (mode_width - 1
11929 - INTVAL (XEXP (op0, 1)))));
11930 code = (code == LT ? NE : EQ);
11931 continue;
11934 /* If this an equality comparison with zero and we are shifting
11935 the low bit to the sign bit, we can convert this to an AND of the
11936 low-order bit. */
11937 if (const_op == 0 && equality_comparison_p
11938 && CONST_INT_P (XEXP (op0, 1))
11939 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11941 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11942 continue;
11944 break;
11946 case ASHIFTRT:
11947 /* If this is an equality comparison with zero, we can do this
11948 as a logical shift, which might be much simpler. */
11949 if (equality_comparison_p && const_op == 0
11950 && CONST_INT_P (XEXP (op0, 1)))
11952 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11953 XEXP (op0, 0),
11954 INTVAL (XEXP (op0, 1)));
11955 continue;
11958 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11959 do the comparison in a narrower mode. */
11960 if (! unsigned_comparison_p
11961 && CONST_INT_P (XEXP (op0, 1))
11962 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11963 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11964 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11965 MODE_INT, 1)) != BLKmode
11966 && (((unsigned HOST_WIDE_INT) const_op
11967 + (GET_MODE_MASK (tmode) >> 1) + 1)
11968 <= GET_MODE_MASK (tmode)))
11970 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11971 continue;
11974 /* Likewise if OP0 is a PLUS of a sign extension with a
11975 constant, which is usually represented with the PLUS
11976 between the shifts. */
11977 if (! unsigned_comparison_p
11978 && CONST_INT_P (XEXP (op0, 1))
11979 && GET_CODE (XEXP (op0, 0)) == PLUS
11980 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11981 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11982 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11983 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11984 MODE_INT, 1)) != BLKmode
11985 && (((unsigned HOST_WIDE_INT) const_op
11986 + (GET_MODE_MASK (tmode) >> 1) + 1)
11987 <= GET_MODE_MASK (tmode)))
11989 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11990 rtx add_const = XEXP (XEXP (op0, 0), 1);
11991 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11992 add_const, XEXP (op0, 1));
11994 op0 = simplify_gen_binary (PLUS, tmode,
11995 gen_lowpart (tmode, inner),
11996 new_const);
11997 continue;
12000 /* ... fall through ... */
12001 case LSHIFTRT:
12002 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12003 the low order N bits of FOO are known to be zero, we can do this
12004 by comparing FOO with C shifted left N bits so long as no
12005 overflow occurs. Even if the low order N bits of FOO aren't known
12006 to be zero, if the comparison is >= or < we can use the same
12007 optimization and for > or <= by setting all the low
12008 order N bits in the comparison constant. */
12009 if (CONST_INT_P (XEXP (op0, 1))
12010 && INTVAL (XEXP (op0, 1)) > 0
12011 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12012 && mode_width <= HOST_BITS_PER_WIDE_INT
12013 && (((unsigned HOST_WIDE_INT) const_op
12014 + (GET_CODE (op0) != LSHIFTRT
12015 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12016 + 1)
12017 : 0))
12018 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12020 unsigned HOST_WIDE_INT low_bits
12021 = (nonzero_bits (XEXP (op0, 0), mode)
12022 & (((unsigned HOST_WIDE_INT) 1
12023 << INTVAL (XEXP (op0, 1))) - 1));
12024 if (low_bits == 0 || !equality_comparison_p)
12026 /* If the shift was logical, then we must make the condition
12027 unsigned. */
12028 if (GET_CODE (op0) == LSHIFTRT)
12029 code = unsigned_condition (code);
12031 const_op <<= INTVAL (XEXP (op0, 1));
12032 if (low_bits != 0
12033 && (code == GT || code == GTU
12034 || code == LE || code == LEU))
12035 const_op
12036 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
12037 op1 = GEN_INT (const_op);
12038 op0 = XEXP (op0, 0);
12039 continue;
12043 /* If we are using this shift to extract just the sign bit, we
12044 can replace this with an LT or GE comparison. */
12045 if (const_op == 0
12046 && (equality_comparison_p || sign_bit_comparison_p)
12047 && CONST_INT_P (XEXP (op0, 1))
12048 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12050 op0 = XEXP (op0, 0);
12051 code = (code == NE || code == GT ? LT : GE);
12052 continue;
12054 break;
12056 default:
12057 break;
12060 break;
12063 /* Now make any compound operations involved in this comparison. Then,
12064 check for an outmost SUBREG on OP0 that is not doing anything or is
12065 paradoxical. The latter transformation must only be performed when
12066 it is known that the "extra" bits will be the same in op0 and op1 or
12067 that they don't matter. There are three cases to consider:
12069 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12070 care bits and we can assume they have any convenient value. So
12071 making the transformation is safe.
12073 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12074 In this case the upper bits of op0 are undefined. We should not make
12075 the simplification in that case as we do not know the contents of
12076 those bits.
12078 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12079 UNKNOWN. In that case we know those bits are zeros or ones. We must
12080 also be sure that they are the same as the upper bits of op1.
12082 We can never remove a SUBREG for a non-equality comparison because
12083 the sign bit is in a different place in the underlying object. */
12085 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
12086 op1 = make_compound_operation (op1, SET);
12088 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12089 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12090 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12091 && (code == NE || code == EQ))
12093 if (paradoxical_subreg_p (op0))
12095 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12096 implemented. */
12097 if (REG_P (SUBREG_REG (op0)))
12099 op0 = SUBREG_REG (op0);
12100 op1 = gen_lowpart (GET_MODE (op0), op1);
12103 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12104 <= HOST_BITS_PER_WIDE_INT)
12105 && (nonzero_bits (SUBREG_REG (op0),
12106 GET_MODE (SUBREG_REG (op0)))
12107 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12109 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12111 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12112 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12113 op0 = SUBREG_REG (op0), op1 = tem;
12117 /* We now do the opposite procedure: Some machines don't have compare
12118 insns in all modes. If OP0's mode is an integer mode smaller than a
12119 word and we can't do a compare in that mode, see if there is a larger
12120 mode for which we can do the compare. There are a number of cases in
12121 which we can use the wider mode. */
12123 mode = GET_MODE (op0);
12124 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12125 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12126 && ! have_insn_for (COMPARE, mode))
12127 for (tmode = GET_MODE_WIDER_MODE (mode);
12128 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12129 tmode = GET_MODE_WIDER_MODE (tmode))
12130 if (have_insn_for (COMPARE, tmode))
12132 int zero_extended;
12134 /* If this is a test for negative, we can make an explicit
12135 test of the sign bit. Test this first so we can use
12136 a paradoxical subreg to extend OP0. */
12138 if (op1 == const0_rtx && (code == LT || code == GE)
12139 && HWI_COMPUTABLE_MODE_P (mode))
12141 unsigned HOST_WIDE_INT sign
12142 = (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1);
12143 op0 = simplify_gen_binary (AND, tmode,
12144 gen_lowpart (tmode, op0),
12145 gen_int_mode (sign, tmode));
12146 code = (code == LT) ? NE : EQ;
12147 break;
12150 /* If the only nonzero bits in OP0 and OP1 are those in the
12151 narrower mode and this is an equality or unsigned comparison,
12152 we can use the wider mode. Similarly for sign-extended
12153 values, in which case it is true for all comparisons. */
12154 zero_extended = ((code == EQ || code == NE
12155 || code == GEU || code == GTU
12156 || code == LEU || code == LTU)
12157 && (nonzero_bits (op0, tmode)
12158 & ~GET_MODE_MASK (mode)) == 0
12159 && ((CONST_INT_P (op1)
12160 || (nonzero_bits (op1, tmode)
12161 & ~GET_MODE_MASK (mode)) == 0)));
12163 if (zero_extended
12164 || ((num_sign_bit_copies (op0, tmode)
12165 > (unsigned int) (GET_MODE_PRECISION (tmode)
12166 - GET_MODE_PRECISION (mode)))
12167 && (num_sign_bit_copies (op1, tmode)
12168 > (unsigned int) (GET_MODE_PRECISION (tmode)
12169 - GET_MODE_PRECISION (mode)))))
12171 /* If OP0 is an AND and we don't have an AND in MODE either,
12172 make a new AND in the proper mode. */
12173 if (GET_CODE (op0) == AND
12174 && !have_insn_for (AND, mode))
12175 op0 = simplify_gen_binary (AND, tmode,
12176 gen_lowpart (tmode,
12177 XEXP (op0, 0)),
12178 gen_lowpart (tmode,
12179 XEXP (op0, 1)));
12180 else
12182 if (zero_extended)
12184 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12185 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12187 else
12189 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12190 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12192 break;
12197 /* We may have changed the comparison operands. Re-canonicalize. */
12198 if (swap_commutative_operands_p (op0, op1))
12200 tem = op0, op0 = op1, op1 = tem;
12201 code = swap_condition (code);
12204 /* If this machine only supports a subset of valid comparisons, see if we
12205 can convert an unsupported one into a supported one. */
12206 target_canonicalize_comparison (&code, &op0, &op1, 0);
12208 *pop0 = op0;
12209 *pop1 = op1;
12211 return code;
12214 /* Utility function for record_value_for_reg. Count number of
12215 rtxs in X. */
12216 static int
12217 count_rtxs (rtx x)
12219 enum rtx_code code = GET_CODE (x);
12220 const char *fmt;
12221 int i, j, ret = 1;
12223 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12224 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12226 rtx x0 = XEXP (x, 0);
12227 rtx x1 = XEXP (x, 1);
12229 if (x0 == x1)
12230 return 1 + 2 * count_rtxs (x0);
12232 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12233 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12234 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12235 return 2 + 2 * count_rtxs (x0)
12236 + count_rtxs (x == XEXP (x1, 0)
12237 ? XEXP (x1, 1) : XEXP (x1, 0));
12239 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12240 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12241 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12242 return 2 + 2 * count_rtxs (x1)
12243 + count_rtxs (x == XEXP (x0, 0)
12244 ? XEXP (x0, 1) : XEXP (x0, 0));
12247 fmt = GET_RTX_FORMAT (code);
12248 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12249 if (fmt[i] == 'e')
12250 ret += count_rtxs (XEXP (x, i));
12251 else if (fmt[i] == 'E')
12252 for (j = 0; j < XVECLEN (x, i); j++)
12253 ret += count_rtxs (XVECEXP (x, i, j));
12255 return ret;
12258 /* Utility function for following routine. Called when X is part of a value
12259 being stored into last_set_value. Sets last_set_table_tick
12260 for each register mentioned. Similar to mention_regs in cse.c */
12262 static void
12263 update_table_tick (rtx x)
12265 enum rtx_code code = GET_CODE (x);
12266 const char *fmt = GET_RTX_FORMAT (code);
12267 int i, j;
12269 if (code == REG)
12271 unsigned int regno = REGNO (x);
12272 unsigned int endregno = END_REGNO (x);
12273 unsigned int r;
12275 for (r = regno; r < endregno; r++)
12277 reg_stat_type *rsp = &reg_stat[r];
12278 rsp->last_set_table_tick = label_tick;
12281 return;
12284 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12285 if (fmt[i] == 'e')
12287 /* Check for identical subexpressions. If x contains
12288 identical subexpression we only have to traverse one of
12289 them. */
12290 if (i == 0 && ARITHMETIC_P (x))
12292 /* Note that at this point x1 has already been
12293 processed. */
12294 rtx x0 = XEXP (x, 0);
12295 rtx x1 = XEXP (x, 1);
12297 /* If x0 and x1 are identical then there is no need to
12298 process x0. */
12299 if (x0 == x1)
12300 break;
12302 /* If x0 is identical to a subexpression of x1 then while
12303 processing x1, x0 has already been processed. Thus we
12304 are done with x. */
12305 if (ARITHMETIC_P (x1)
12306 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12307 break;
12309 /* If x1 is identical to a subexpression of x0 then we
12310 still have to process the rest of x0. */
12311 if (ARITHMETIC_P (x0)
12312 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12314 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12315 break;
12319 update_table_tick (XEXP (x, i));
12321 else if (fmt[i] == 'E')
12322 for (j = 0; j < XVECLEN (x, i); j++)
12323 update_table_tick (XVECEXP (x, i, j));
12326 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12327 are saying that the register is clobbered and we no longer know its
12328 value. If INSN is zero, don't update reg_stat[].last_set; this is
12329 only permitted with VALUE also zero and is used to invalidate the
12330 register. */
12332 static void
12333 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12335 unsigned int regno = REGNO (reg);
12336 unsigned int endregno = END_REGNO (reg);
12337 unsigned int i;
12338 reg_stat_type *rsp;
12340 /* If VALUE contains REG and we have a previous value for REG, substitute
12341 the previous value. */
12342 if (value && insn && reg_overlap_mentioned_p (reg, value))
12344 rtx tem;
12346 /* Set things up so get_last_value is allowed to see anything set up to
12347 our insn. */
12348 subst_low_luid = DF_INSN_LUID (insn);
12349 tem = get_last_value (reg);
12351 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12352 it isn't going to be useful and will take a lot of time to process,
12353 so just use the CLOBBER. */
12355 if (tem)
12357 if (ARITHMETIC_P (tem)
12358 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12359 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12360 tem = XEXP (tem, 0);
12361 else if (count_occurrences (value, reg, 1) >= 2)
12363 /* If there are two or more occurrences of REG in VALUE,
12364 prevent the value from growing too much. */
12365 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12366 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12369 value = replace_rtx (copy_rtx (value), reg, tem);
12373 /* For each register modified, show we don't know its value, that
12374 we don't know about its bitwise content, that its value has been
12375 updated, and that we don't know the location of the death of the
12376 register. */
12377 for (i = regno; i < endregno; i++)
12379 rsp = &reg_stat[i];
12381 if (insn)
12382 rsp->last_set = insn;
12384 rsp->last_set_value = 0;
12385 rsp->last_set_mode = VOIDmode;
12386 rsp->last_set_nonzero_bits = 0;
12387 rsp->last_set_sign_bit_copies = 0;
12388 rsp->last_death = 0;
12389 rsp->truncated_to_mode = VOIDmode;
12392 /* Mark registers that are being referenced in this value. */
12393 if (value)
12394 update_table_tick (value);
12396 /* Now update the status of each register being set.
12397 If someone is using this register in this block, set this register
12398 to invalid since we will get confused between the two lives in this
12399 basic block. This makes using this register always invalid. In cse, we
12400 scan the table to invalidate all entries using this register, but this
12401 is too much work for us. */
12403 for (i = regno; i < endregno; i++)
12405 rsp = &reg_stat[i];
12406 rsp->last_set_label = label_tick;
12407 if (!insn
12408 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12409 rsp->last_set_invalid = 1;
12410 else
12411 rsp->last_set_invalid = 0;
12414 /* The value being assigned might refer to X (like in "x++;"). In that
12415 case, we must replace it with (clobber (const_int 0)) to prevent
12416 infinite loops. */
12417 rsp = &reg_stat[regno];
12418 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12420 value = copy_rtx (value);
12421 if (!get_last_value_validate (&value, insn, label_tick, 1))
12422 value = 0;
12425 /* For the main register being modified, update the value, the mode, the
12426 nonzero bits, and the number of sign bit copies. */
12428 rsp->last_set_value = value;
12430 if (value)
12432 machine_mode mode = GET_MODE (reg);
12433 subst_low_luid = DF_INSN_LUID (insn);
12434 rsp->last_set_mode = mode;
12435 if (GET_MODE_CLASS (mode) == MODE_INT
12436 && HWI_COMPUTABLE_MODE_P (mode))
12437 mode = nonzero_bits_mode;
12438 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12439 rsp->last_set_sign_bit_copies
12440 = num_sign_bit_copies (value, GET_MODE (reg));
12444 /* Called via note_stores from record_dead_and_set_regs to handle one
12445 SET or CLOBBER in an insn. DATA is the instruction in which the
12446 set is occurring. */
12448 static void
12449 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12451 rtx_insn *record_dead_insn = (rtx_insn *) data;
12453 if (GET_CODE (dest) == SUBREG)
12454 dest = SUBREG_REG (dest);
12456 if (!record_dead_insn)
12458 if (REG_P (dest))
12459 record_value_for_reg (dest, NULL, NULL_RTX);
12460 return;
12463 if (REG_P (dest))
12465 /* If we are setting the whole register, we know its value. Otherwise
12466 show that we don't know the value. We can handle SUBREG in
12467 some cases. */
12468 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12469 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12470 else if (GET_CODE (setter) == SET
12471 && GET_CODE (SET_DEST (setter)) == SUBREG
12472 && SUBREG_REG (SET_DEST (setter)) == dest
12473 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12474 && subreg_lowpart_p (SET_DEST (setter)))
12475 record_value_for_reg (dest, record_dead_insn,
12476 gen_lowpart (GET_MODE (dest),
12477 SET_SRC (setter)));
12478 else
12479 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12481 else if (MEM_P (dest)
12482 /* Ignore pushes, they clobber nothing. */
12483 && ! push_operand (dest, GET_MODE (dest)))
12484 mem_last_set = DF_INSN_LUID (record_dead_insn);
12487 /* Update the records of when each REG was most recently set or killed
12488 for the things done by INSN. This is the last thing done in processing
12489 INSN in the combiner loop.
12491 We update reg_stat[], in particular fields last_set, last_set_value,
12492 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12493 last_death, and also the similar information mem_last_set (which insn
12494 most recently modified memory) and last_call_luid (which insn was the
12495 most recent subroutine call). */
12497 static void
12498 record_dead_and_set_regs (rtx_insn *insn)
12500 rtx link;
12501 unsigned int i;
12503 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12505 if (REG_NOTE_KIND (link) == REG_DEAD
12506 && REG_P (XEXP (link, 0)))
12508 unsigned int regno = REGNO (XEXP (link, 0));
12509 unsigned int endregno = END_REGNO (XEXP (link, 0));
12511 for (i = regno; i < endregno; i++)
12513 reg_stat_type *rsp;
12515 rsp = &reg_stat[i];
12516 rsp->last_death = insn;
12519 else if (REG_NOTE_KIND (link) == REG_INC)
12520 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12523 if (CALL_P (insn))
12525 hard_reg_set_iterator hrsi;
12526 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12528 reg_stat_type *rsp;
12530 rsp = &reg_stat[i];
12531 rsp->last_set_invalid = 1;
12532 rsp->last_set = insn;
12533 rsp->last_set_value = 0;
12534 rsp->last_set_mode = VOIDmode;
12535 rsp->last_set_nonzero_bits = 0;
12536 rsp->last_set_sign_bit_copies = 0;
12537 rsp->last_death = 0;
12538 rsp->truncated_to_mode = VOIDmode;
12541 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12543 /* We can't combine into a call pattern. Remember, though, that
12544 the return value register is set at this LUID. We could
12545 still replace a register with the return value from the
12546 wrong subroutine call! */
12547 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12549 else
12550 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12553 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12554 register present in the SUBREG, so for each such SUBREG go back and
12555 adjust nonzero and sign bit information of the registers that are
12556 known to have some zero/sign bits set.
12558 This is needed because when combine blows the SUBREGs away, the
12559 information on zero/sign bits is lost and further combines can be
12560 missed because of that. */
12562 static void
12563 record_promoted_value (rtx_insn *insn, rtx subreg)
12565 struct insn_link *links;
12566 rtx set;
12567 unsigned int regno = REGNO (SUBREG_REG (subreg));
12568 machine_mode mode = GET_MODE (subreg);
12570 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12571 return;
12573 for (links = LOG_LINKS (insn); links;)
12575 reg_stat_type *rsp;
12577 insn = links->insn;
12578 set = single_set (insn);
12580 if (! set || !REG_P (SET_DEST (set))
12581 || REGNO (SET_DEST (set)) != regno
12582 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12584 links = links->next;
12585 continue;
12588 rsp = &reg_stat[regno];
12589 if (rsp->last_set == insn)
12591 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
12592 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12595 if (REG_P (SET_SRC (set)))
12597 regno = REGNO (SET_SRC (set));
12598 links = LOG_LINKS (insn);
12600 else
12601 break;
12605 /* Check if X, a register, is known to contain a value already
12606 truncated to MODE. In this case we can use a subreg to refer to
12607 the truncated value even though in the generic case we would need
12608 an explicit truncation. */
12610 static bool
12611 reg_truncated_to_mode (machine_mode mode, const_rtx x)
12613 reg_stat_type *rsp = &reg_stat[REGNO (x)];
12614 machine_mode truncated = rsp->truncated_to_mode;
12616 if (truncated == 0
12617 || rsp->truncation_label < label_tick_ebb_start)
12618 return false;
12619 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12620 return true;
12621 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12622 return true;
12623 return false;
12626 /* If X is a hard reg or a subreg record the mode that the register is
12627 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
12628 to turn a truncate into a subreg using this information. Return true
12629 if traversing X is complete. */
12631 static bool
12632 record_truncated_value (rtx x)
12634 machine_mode truncated_mode;
12635 reg_stat_type *rsp;
12637 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12639 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12640 truncated_mode = GET_MODE (x);
12642 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12643 return true;
12645 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12646 return true;
12648 x = SUBREG_REG (x);
12650 /* ??? For hard-regs we now record everything. We might be able to
12651 optimize this using last_set_mode. */
12652 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12653 truncated_mode = GET_MODE (x);
12654 else
12655 return false;
12657 rsp = &reg_stat[REGNO (x)];
12658 if (rsp->truncated_to_mode == 0
12659 || rsp->truncation_label < label_tick_ebb_start
12660 || (GET_MODE_SIZE (truncated_mode)
12661 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12663 rsp->truncated_to_mode = truncated_mode;
12664 rsp->truncation_label = label_tick;
12667 return true;
12670 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12671 the modes they are used in. This can help truning TRUNCATEs into
12672 SUBREGs. */
12674 static void
12675 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
12677 subrtx_var_iterator::array_type array;
12678 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
12679 if (record_truncated_value (*iter))
12680 iter.skip_subrtxes ();
12683 /* Scan X for promoted SUBREGs. For each one found,
12684 note what it implies to the registers used in it. */
12686 static void
12687 check_promoted_subreg (rtx_insn *insn, rtx x)
12689 if (GET_CODE (x) == SUBREG
12690 && SUBREG_PROMOTED_VAR_P (x)
12691 && REG_P (SUBREG_REG (x)))
12692 record_promoted_value (insn, x);
12693 else
12695 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12696 int i, j;
12698 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12699 switch (format[i])
12701 case 'e':
12702 check_promoted_subreg (insn, XEXP (x, i));
12703 break;
12704 case 'V':
12705 case 'E':
12706 if (XVEC (x, i) != 0)
12707 for (j = 0; j < XVECLEN (x, i); j++)
12708 check_promoted_subreg (insn, XVECEXP (x, i, j));
12709 break;
12714 /* Verify that all the registers and memory references mentioned in *LOC are
12715 still valid. *LOC was part of a value set in INSN when label_tick was
12716 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12717 the invalid references with (clobber (const_int 0)) and return 1. This
12718 replacement is useful because we often can get useful information about
12719 the form of a value (e.g., if it was produced by a shift that always
12720 produces -1 or 0) even though we don't know exactly what registers it
12721 was produced from. */
12723 static int
12724 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
12726 rtx x = *loc;
12727 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12728 int len = GET_RTX_LENGTH (GET_CODE (x));
12729 int i, j;
12731 if (REG_P (x))
12733 unsigned int regno = REGNO (x);
12734 unsigned int endregno = END_REGNO (x);
12735 unsigned int j;
12737 for (j = regno; j < endregno; j++)
12739 reg_stat_type *rsp = &reg_stat[j];
12740 if (rsp->last_set_invalid
12741 /* If this is a pseudo-register that was only set once and not
12742 live at the beginning of the function, it is always valid. */
12743 || (! (regno >= FIRST_PSEUDO_REGISTER
12744 && REG_N_SETS (regno) == 1
12745 && (!REGNO_REG_SET_P
12746 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
12747 regno)))
12748 && rsp->last_set_label > tick))
12750 if (replace)
12751 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12752 return replace;
12756 return 1;
12758 /* If this is a memory reference, make sure that there were no stores after
12759 it that might have clobbered the value. We don't have alias info, so we
12760 assume any store invalidates it. Moreover, we only have local UIDs, so
12761 we also assume that there were stores in the intervening basic blocks. */
12762 else if (MEM_P (x) && !MEM_READONLY_P (x)
12763 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12765 if (replace)
12766 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12767 return replace;
12770 for (i = 0; i < len; i++)
12772 if (fmt[i] == 'e')
12774 /* Check for identical subexpressions. If x contains
12775 identical subexpression we only have to traverse one of
12776 them. */
12777 if (i == 1 && ARITHMETIC_P (x))
12779 /* Note that at this point x0 has already been checked
12780 and found valid. */
12781 rtx x0 = XEXP (x, 0);
12782 rtx x1 = XEXP (x, 1);
12784 /* If x0 and x1 are identical then x is also valid. */
12785 if (x0 == x1)
12786 return 1;
12788 /* If x1 is identical to a subexpression of x0 then
12789 while checking x0, x1 has already been checked. Thus
12790 it is valid and so as x. */
12791 if (ARITHMETIC_P (x0)
12792 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12793 return 1;
12795 /* If x0 is identical to a subexpression of x1 then x is
12796 valid iff the rest of x1 is valid. */
12797 if (ARITHMETIC_P (x1)
12798 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12799 return
12800 get_last_value_validate (&XEXP (x1,
12801 x0 == XEXP (x1, 0) ? 1 : 0),
12802 insn, tick, replace);
12805 if (get_last_value_validate (&XEXP (x, i), insn, tick,
12806 replace) == 0)
12807 return 0;
12809 else if (fmt[i] == 'E')
12810 for (j = 0; j < XVECLEN (x, i); j++)
12811 if (get_last_value_validate (&XVECEXP (x, i, j),
12812 insn, tick, replace) == 0)
12813 return 0;
12816 /* If we haven't found a reason for it to be invalid, it is valid. */
12817 return 1;
12820 /* Get the last value assigned to X, if known. Some registers
12821 in the value may be replaced with (clobber (const_int 0)) if their value
12822 is known longer known reliably. */
12824 static rtx
12825 get_last_value (const_rtx x)
12827 unsigned int regno;
12828 rtx value;
12829 reg_stat_type *rsp;
12831 /* If this is a non-paradoxical SUBREG, get the value of its operand and
12832 then convert it to the desired mode. If this is a paradoxical SUBREG,
12833 we cannot predict what values the "extra" bits might have. */
12834 if (GET_CODE (x) == SUBREG
12835 && subreg_lowpart_p (x)
12836 && !paradoxical_subreg_p (x)
12837 && (value = get_last_value (SUBREG_REG (x))) != 0)
12838 return gen_lowpart (GET_MODE (x), value);
12840 if (!REG_P (x))
12841 return 0;
12843 regno = REGNO (x);
12844 rsp = &reg_stat[regno];
12845 value = rsp->last_set_value;
12847 /* If we don't have a value, or if it isn't for this basic block and
12848 it's either a hard register, set more than once, or it's a live
12849 at the beginning of the function, return 0.
12851 Because if it's not live at the beginning of the function then the reg
12852 is always set before being used (is never used without being set).
12853 And, if it's set only once, and it's always set before use, then all
12854 uses must have the same last value, even if it's not from this basic
12855 block. */
12857 if (value == 0
12858 || (rsp->last_set_label < label_tick_ebb_start
12859 && (regno < FIRST_PSEUDO_REGISTER
12860 || REG_N_SETS (regno) != 1
12861 || REGNO_REG_SET_P
12862 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
12863 return 0;
12865 /* If the value was set in a later insn than the ones we are processing,
12866 we can't use it even if the register was only set once. */
12867 if (rsp->last_set_label == label_tick
12868 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12869 return 0;
12871 /* If the value has all its registers valid, return it. */
12872 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12873 return value;
12875 /* Otherwise, make a copy and replace any invalid register with
12876 (clobber (const_int 0)). If that fails for some reason, return 0. */
12878 value = copy_rtx (value);
12879 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12880 return value;
12882 return 0;
12885 /* Return nonzero if expression X refers to a REG or to memory
12886 that is set in an instruction more recent than FROM_LUID. */
12888 static int
12889 use_crosses_set_p (const_rtx x, int from_luid)
12891 const char *fmt;
12892 int i;
12893 enum rtx_code code = GET_CODE (x);
12895 if (code == REG)
12897 unsigned int regno = REGNO (x);
12898 unsigned endreg = END_REGNO (x);
12900 #ifdef PUSH_ROUNDING
12901 /* Don't allow uses of the stack pointer to be moved,
12902 because we don't know whether the move crosses a push insn. */
12903 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12904 return 1;
12905 #endif
12906 for (; regno < endreg; regno++)
12908 reg_stat_type *rsp = &reg_stat[regno];
12909 if (rsp->last_set
12910 && rsp->last_set_label == label_tick
12911 && DF_INSN_LUID (rsp->last_set) > from_luid)
12912 return 1;
12914 return 0;
12917 if (code == MEM && mem_last_set > from_luid)
12918 return 1;
12920 fmt = GET_RTX_FORMAT (code);
12922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12924 if (fmt[i] == 'E')
12926 int j;
12927 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12928 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12929 return 1;
12931 else if (fmt[i] == 'e'
12932 && use_crosses_set_p (XEXP (x, i), from_luid))
12933 return 1;
12935 return 0;
12938 /* Define three variables used for communication between the following
12939 routines. */
12941 static unsigned int reg_dead_regno, reg_dead_endregno;
12942 static int reg_dead_flag;
12944 /* Function called via note_stores from reg_dead_at_p.
12946 If DEST is within [reg_dead_regno, reg_dead_endregno), set
12947 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
12949 static void
12950 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12952 unsigned int regno, endregno;
12954 if (!REG_P (dest))
12955 return;
12957 regno = REGNO (dest);
12958 endregno = END_REGNO (dest);
12959 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12960 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12963 /* Return nonzero if REG is known to be dead at INSN.
12965 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
12966 referencing REG, it is dead. If we hit a SET referencing REG, it is
12967 live. Otherwise, see if it is live or dead at the start of the basic
12968 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
12969 must be assumed to be always live. */
12971 static int
12972 reg_dead_at_p (rtx reg, rtx_insn *insn)
12974 basic_block block;
12975 unsigned int i;
12977 /* Set variables for reg_dead_at_p_1. */
12978 reg_dead_regno = REGNO (reg);
12979 reg_dead_endregno = END_REGNO (reg);
12981 reg_dead_flag = 0;
12983 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
12984 we allow the machine description to decide whether use-and-clobber
12985 patterns are OK. */
12986 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12988 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12989 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12990 return 0;
12993 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12994 beginning of basic block. */
12995 block = BLOCK_FOR_INSN (insn);
12996 for (;;)
12998 if (INSN_P (insn))
13000 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13001 return 1;
13003 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13004 if (reg_dead_flag)
13005 return reg_dead_flag == 1 ? 1 : 0;
13007 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13008 return 1;
13011 if (insn == BB_HEAD (block))
13012 break;
13014 insn = PREV_INSN (insn);
13017 /* Look at live-in sets for the basic block that we were in. */
13018 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13019 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13020 return 0;
13022 return 1;
13025 /* Note hard registers in X that are used. */
13027 static void
13028 mark_used_regs_combine (rtx x)
13030 RTX_CODE code = GET_CODE (x);
13031 unsigned int regno;
13032 int i;
13034 switch (code)
13036 case LABEL_REF:
13037 case SYMBOL_REF:
13038 case CONST:
13039 CASE_CONST_ANY:
13040 case PC:
13041 case ADDR_VEC:
13042 case ADDR_DIFF_VEC:
13043 case ASM_INPUT:
13044 #ifdef HAVE_cc0
13045 /* CC0 must die in the insn after it is set, so we don't need to take
13046 special note of it here. */
13047 case CC0:
13048 #endif
13049 return;
13051 case CLOBBER:
13052 /* If we are clobbering a MEM, mark any hard registers inside the
13053 address as used. */
13054 if (MEM_P (XEXP (x, 0)))
13055 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13056 return;
13058 case REG:
13059 regno = REGNO (x);
13060 /* A hard reg in a wide mode may really be multiple registers.
13061 If so, mark all of them just like the first. */
13062 if (regno < FIRST_PSEUDO_REGISTER)
13064 /* None of this applies to the stack, frame or arg pointers. */
13065 if (regno == STACK_POINTER_REGNUM
13066 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
13067 || regno == HARD_FRAME_POINTER_REGNUM
13068 #endif
13069 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13070 || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13071 #endif
13072 || regno == FRAME_POINTER_REGNUM)
13073 return;
13075 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13077 return;
13079 case SET:
13081 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13082 the address. */
13083 rtx testreg = SET_DEST (x);
13085 while (GET_CODE (testreg) == SUBREG
13086 || GET_CODE (testreg) == ZERO_EXTRACT
13087 || GET_CODE (testreg) == STRICT_LOW_PART)
13088 testreg = XEXP (testreg, 0);
13090 if (MEM_P (testreg))
13091 mark_used_regs_combine (XEXP (testreg, 0));
13093 mark_used_regs_combine (SET_SRC (x));
13095 return;
13097 default:
13098 break;
13101 /* Recursively scan the operands of this expression. */
13104 const char *fmt = GET_RTX_FORMAT (code);
13106 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13108 if (fmt[i] == 'e')
13109 mark_used_regs_combine (XEXP (x, i));
13110 else if (fmt[i] == 'E')
13112 int j;
13114 for (j = 0; j < XVECLEN (x, i); j++)
13115 mark_used_regs_combine (XVECEXP (x, i, j));
13121 /* Remove register number REGNO from the dead registers list of INSN.
13123 Return the note used to record the death, if there was one. */
13126 remove_death (unsigned int regno, rtx_insn *insn)
13128 rtx note = find_regno_note (insn, REG_DEAD, regno);
13130 if (note)
13131 remove_note (insn, note);
13133 return note;
13136 /* For each register (hardware or pseudo) used within expression X, if its
13137 death is in an instruction with luid between FROM_LUID (inclusive) and
13138 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13139 list headed by PNOTES.
13141 That said, don't move registers killed by maybe_kill_insn.
13143 This is done when X is being merged by combination into TO_INSN. These
13144 notes will then be distributed as needed. */
13146 static void
13147 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13148 rtx *pnotes)
13150 const char *fmt;
13151 int len, i;
13152 enum rtx_code code = GET_CODE (x);
13154 if (code == REG)
13156 unsigned int regno = REGNO (x);
13157 rtx_insn *where_dead = reg_stat[regno].last_death;
13159 /* Don't move the register if it gets killed in between from and to. */
13160 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13161 && ! reg_referenced_p (x, maybe_kill_insn))
13162 return;
13164 if (where_dead
13165 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13166 && DF_INSN_LUID (where_dead) >= from_luid
13167 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13169 rtx note = remove_death (regno, where_dead);
13171 /* It is possible for the call above to return 0. This can occur
13172 when last_death points to I2 or I1 that we combined with.
13173 In that case make a new note.
13175 We must also check for the case where X is a hard register
13176 and NOTE is a death note for a range of hard registers
13177 including X. In that case, we must put REG_DEAD notes for
13178 the remaining registers in place of NOTE. */
13180 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13181 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13182 > GET_MODE_SIZE (GET_MODE (x))))
13184 unsigned int deadregno = REGNO (XEXP (note, 0));
13185 unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
13186 unsigned int ourend = END_HARD_REGNO (x);
13187 unsigned int i;
13189 for (i = deadregno; i < deadend; i++)
13190 if (i < regno || i >= ourend)
13191 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13194 /* If we didn't find any note, or if we found a REG_DEAD note that
13195 covers only part of the given reg, and we have a multi-reg hard
13196 register, then to be safe we must check for REG_DEAD notes
13197 for each register other than the first. They could have
13198 their own REG_DEAD notes lying around. */
13199 else if ((note == 0
13200 || (note != 0
13201 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13202 < GET_MODE_SIZE (GET_MODE (x)))))
13203 && regno < FIRST_PSEUDO_REGISTER
13204 && hard_regno_nregs[regno][GET_MODE (x)] > 1)
13206 unsigned int ourend = END_HARD_REGNO (x);
13207 unsigned int i, offset;
13208 rtx oldnotes = 0;
13210 if (note)
13211 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13212 else
13213 offset = 1;
13215 for (i = regno + offset; i < ourend; i++)
13216 move_deaths (regno_reg_rtx[i],
13217 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13220 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13222 XEXP (note, 1) = *pnotes;
13223 *pnotes = note;
13225 else
13226 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13229 return;
13232 else if (GET_CODE (x) == SET)
13234 rtx dest = SET_DEST (x);
13236 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13238 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13239 that accesses one word of a multi-word item, some
13240 piece of everything register in the expression is used by
13241 this insn, so remove any old death. */
13242 /* ??? So why do we test for equality of the sizes? */
13244 if (GET_CODE (dest) == ZERO_EXTRACT
13245 || GET_CODE (dest) == STRICT_LOW_PART
13246 || (GET_CODE (dest) == SUBREG
13247 && (((GET_MODE_SIZE (GET_MODE (dest))
13248 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13249 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13250 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13252 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13253 return;
13256 /* If this is some other SUBREG, we know it replaces the entire
13257 value, so use that as the destination. */
13258 if (GET_CODE (dest) == SUBREG)
13259 dest = SUBREG_REG (dest);
13261 /* If this is a MEM, adjust deaths of anything used in the address.
13262 For a REG (the only other possibility), the entire value is
13263 being replaced so the old value is not used in this insn. */
13265 if (MEM_P (dest))
13266 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13267 to_insn, pnotes);
13268 return;
13271 else if (GET_CODE (x) == CLOBBER)
13272 return;
13274 len = GET_RTX_LENGTH (code);
13275 fmt = GET_RTX_FORMAT (code);
13277 for (i = 0; i < len; i++)
13279 if (fmt[i] == 'E')
13281 int j;
13282 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13283 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13284 to_insn, pnotes);
13286 else if (fmt[i] == 'e')
13287 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13291 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13292 pattern of an insn. X must be a REG. */
13294 static int
13295 reg_bitfield_target_p (rtx x, rtx body)
13297 int i;
13299 if (GET_CODE (body) == SET)
13301 rtx dest = SET_DEST (body);
13302 rtx target;
13303 unsigned int regno, tregno, endregno, endtregno;
13305 if (GET_CODE (dest) == ZERO_EXTRACT)
13306 target = XEXP (dest, 0);
13307 else if (GET_CODE (dest) == STRICT_LOW_PART)
13308 target = SUBREG_REG (XEXP (dest, 0));
13309 else
13310 return 0;
13312 if (GET_CODE (target) == SUBREG)
13313 target = SUBREG_REG (target);
13315 if (!REG_P (target))
13316 return 0;
13318 tregno = REGNO (target), regno = REGNO (x);
13319 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13320 return target == x;
13322 endtregno = end_hard_regno (GET_MODE (target), tregno);
13323 endregno = end_hard_regno (GET_MODE (x), regno);
13325 return endregno > tregno && regno < endtregno;
13328 else if (GET_CODE (body) == PARALLEL)
13329 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13330 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13331 return 1;
13333 return 0;
13336 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13337 as appropriate. I3 and I2 are the insns resulting from the combination
13338 insns including FROM (I2 may be zero).
13340 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13341 not need REG_DEAD notes because they are being substituted for. This
13342 saves searching in the most common cases.
13344 Each note in the list is either ignored or placed on some insns, depending
13345 on the type of note. */
13347 static void
13348 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
13349 rtx elim_i2, rtx elim_i1, rtx elim_i0)
13351 rtx note, next_note;
13352 rtx tem_note;
13353 rtx_insn *tem_insn;
13355 for (note = notes; note; note = next_note)
13357 rtx_insn *place = 0, *place2 = 0;
13359 next_note = XEXP (note, 1);
13360 switch (REG_NOTE_KIND (note))
13362 case REG_BR_PROB:
13363 case REG_BR_PRED:
13364 /* Doesn't matter much where we put this, as long as it's somewhere.
13365 It is preferable to keep these notes on branches, which is most
13366 likely to be i3. */
13367 place = i3;
13368 break;
13370 case REG_NON_LOCAL_GOTO:
13371 if (JUMP_P (i3))
13372 place = i3;
13373 else
13375 gcc_assert (i2 && JUMP_P (i2));
13376 place = i2;
13378 break;
13380 case REG_EH_REGION:
13381 /* These notes must remain with the call or trapping instruction. */
13382 if (CALL_P (i3))
13383 place = i3;
13384 else if (i2 && CALL_P (i2))
13385 place = i2;
13386 else
13388 gcc_assert (cfun->can_throw_non_call_exceptions);
13389 if (may_trap_p (i3))
13390 place = i3;
13391 else if (i2 && may_trap_p (i2))
13392 place = i2;
13393 /* ??? Otherwise assume we've combined things such that we
13394 can now prove that the instructions can't trap. Drop the
13395 note in this case. */
13397 break;
13399 case REG_ARGS_SIZE:
13400 /* ??? How to distribute between i3-i1. Assume i3 contains the
13401 entire adjustment. Assert i3 contains at least some adjust. */
13402 if (!noop_move_p (i3))
13404 int old_size, args_size = INTVAL (XEXP (note, 0));
13405 /* fixup_args_size_notes looks at REG_NORETURN note,
13406 so ensure the note is placed there first. */
13407 if (CALL_P (i3))
13409 rtx *np;
13410 for (np = &next_note; *np; np = &XEXP (*np, 1))
13411 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13413 rtx n = *np;
13414 *np = XEXP (n, 1);
13415 XEXP (n, 1) = REG_NOTES (i3);
13416 REG_NOTES (i3) = n;
13417 break;
13420 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13421 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13422 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13423 gcc_assert (old_size != args_size
13424 || (CALL_P (i3)
13425 && !ACCUMULATE_OUTGOING_ARGS
13426 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13428 break;
13430 case REG_NORETURN:
13431 case REG_SETJMP:
13432 case REG_TM:
13433 case REG_CALL_DECL:
13434 /* These notes must remain with the call. It should not be
13435 possible for both I2 and I3 to be a call. */
13436 if (CALL_P (i3))
13437 place = i3;
13438 else
13440 gcc_assert (i2 && CALL_P (i2));
13441 place = i2;
13443 break;
13445 case REG_UNUSED:
13446 /* Any clobbers for i3 may still exist, and so we must process
13447 REG_UNUSED notes from that insn.
13449 Any clobbers from i2 or i1 can only exist if they were added by
13450 recog_for_combine. In that case, recog_for_combine created the
13451 necessary REG_UNUSED notes. Trying to keep any original
13452 REG_UNUSED notes from these insns can cause incorrect output
13453 if it is for the same register as the original i3 dest.
13454 In that case, we will notice that the register is set in i3,
13455 and then add a REG_UNUSED note for the destination of i3, which
13456 is wrong. However, it is possible to have REG_UNUSED notes from
13457 i2 or i1 for register which were both used and clobbered, so
13458 we keep notes from i2 or i1 if they will turn into REG_DEAD
13459 notes. */
13461 /* If this register is set or clobbered in I3, put the note there
13462 unless there is one already. */
13463 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13465 if (from_insn != i3)
13466 break;
13468 if (! (REG_P (XEXP (note, 0))
13469 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13470 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13471 place = i3;
13473 /* Otherwise, if this register is used by I3, then this register
13474 now dies here, so we must put a REG_DEAD note here unless there
13475 is one already. */
13476 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13477 && ! (REG_P (XEXP (note, 0))
13478 ? find_regno_note (i3, REG_DEAD,
13479 REGNO (XEXP (note, 0)))
13480 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13482 PUT_REG_NOTE_KIND (note, REG_DEAD);
13483 place = i3;
13485 break;
13487 case REG_EQUAL:
13488 case REG_EQUIV:
13489 case REG_NOALIAS:
13490 /* These notes say something about results of an insn. We can
13491 only support them if they used to be on I3 in which case they
13492 remain on I3. Otherwise they are ignored.
13494 If the note refers to an expression that is not a constant, we
13495 must also ignore the note since we cannot tell whether the
13496 equivalence is still true. It might be possible to do
13497 slightly better than this (we only have a problem if I2DEST
13498 or I1DEST is present in the expression), but it doesn't
13499 seem worth the trouble. */
13501 if (from_insn == i3
13502 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13503 place = i3;
13504 break;
13506 case REG_INC:
13507 /* These notes say something about how a register is used. They must
13508 be present on any use of the register in I2 or I3. */
13509 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13510 place = i3;
13512 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13514 if (place)
13515 place2 = i2;
13516 else
13517 place = i2;
13519 break;
13521 case REG_LABEL_TARGET:
13522 case REG_LABEL_OPERAND:
13523 /* This can show up in several ways -- either directly in the
13524 pattern, or hidden off in the constant pool with (or without?)
13525 a REG_EQUAL note. */
13526 /* ??? Ignore the without-reg_equal-note problem for now. */
13527 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13528 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13529 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13530 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0)))
13531 place = i3;
13533 if (i2
13534 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13535 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13536 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13537 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0))))
13539 if (place)
13540 place2 = i2;
13541 else
13542 place = i2;
13545 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13546 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13547 there. */
13548 if (place && JUMP_P (place)
13549 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13550 && (JUMP_LABEL (place) == NULL
13551 || JUMP_LABEL (place) == XEXP (note, 0)))
13553 rtx label = JUMP_LABEL (place);
13555 if (!label)
13556 JUMP_LABEL (place) = XEXP (note, 0);
13557 else if (LABEL_P (label))
13558 LABEL_NUSES (label)--;
13561 if (place2 && JUMP_P (place2)
13562 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13563 && (JUMP_LABEL (place2) == NULL
13564 || JUMP_LABEL (place2) == XEXP (note, 0)))
13566 rtx label = JUMP_LABEL (place2);
13568 if (!label)
13569 JUMP_LABEL (place2) = XEXP (note, 0);
13570 else if (LABEL_P (label))
13571 LABEL_NUSES (label)--;
13572 place2 = 0;
13574 break;
13576 case REG_NONNEG:
13577 /* This note says something about the value of a register prior
13578 to the execution of an insn. It is too much trouble to see
13579 if the note is still correct in all situations. It is better
13580 to simply delete it. */
13581 break;
13583 case REG_DEAD:
13584 /* If we replaced the right hand side of FROM_INSN with a
13585 REG_EQUAL note, the original use of the dying register
13586 will not have been combined into I3 and I2. In such cases,
13587 FROM_INSN is guaranteed to be the first of the combined
13588 instructions, so we simply need to search back before
13589 FROM_INSN for the previous use or set of this register,
13590 then alter the notes there appropriately.
13592 If the register is used as an input in I3, it dies there.
13593 Similarly for I2, if it is nonzero and adjacent to I3.
13595 If the register is not used as an input in either I3 or I2
13596 and it is not one of the registers we were supposed to eliminate,
13597 there are two possibilities. We might have a non-adjacent I2
13598 or we might have somehow eliminated an additional register
13599 from a computation. For example, we might have had A & B where
13600 we discover that B will always be zero. In this case we will
13601 eliminate the reference to A.
13603 In both cases, we must search to see if we can find a previous
13604 use of A and put the death note there. */
13606 if (from_insn
13607 && from_insn == i2mod
13608 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13609 tem_insn = from_insn;
13610 else
13612 if (from_insn
13613 && CALL_P (from_insn)
13614 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13615 place = from_insn;
13616 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13617 place = i3;
13618 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13619 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13620 place = i2;
13621 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13622 && !(i2mod
13623 && reg_overlap_mentioned_p (XEXP (note, 0),
13624 i2mod_old_rhs)))
13625 || rtx_equal_p (XEXP (note, 0), elim_i1)
13626 || rtx_equal_p (XEXP (note, 0), elim_i0))
13627 break;
13628 tem_insn = i3;
13631 if (place == 0)
13633 basic_block bb = this_basic_block;
13635 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
13637 if (!NONDEBUG_INSN_P (tem_insn))
13639 if (tem_insn == BB_HEAD (bb))
13640 break;
13641 continue;
13644 /* If the register is being set at TEM_INSN, see if that is all
13645 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
13646 into a REG_UNUSED note instead. Don't delete sets to
13647 global register vars. */
13648 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13649 || !global_regs[REGNO (XEXP (note, 0))])
13650 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
13652 rtx set = single_set (tem_insn);
13653 rtx inner_dest = 0;
13654 #ifdef HAVE_cc0
13655 rtx_insn *cc0_setter = NULL;
13656 #endif
13658 if (set != 0)
13659 for (inner_dest = SET_DEST (set);
13660 (GET_CODE (inner_dest) == STRICT_LOW_PART
13661 || GET_CODE (inner_dest) == SUBREG
13662 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13663 inner_dest = XEXP (inner_dest, 0))
13666 /* Verify that it was the set, and not a clobber that
13667 modified the register.
13669 CC0 targets must be careful to maintain setter/user
13670 pairs. If we cannot delete the setter due to side
13671 effects, mark the user with an UNUSED note instead
13672 of deleting it. */
13674 if (set != 0 && ! side_effects_p (SET_SRC (set))
13675 && rtx_equal_p (XEXP (note, 0), inner_dest)
13676 #ifdef HAVE_cc0
13677 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13678 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
13679 && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13680 #endif
13683 /* Move the notes and links of TEM_INSN elsewhere.
13684 This might delete other dead insns recursively.
13685 First set the pattern to something that won't use
13686 any register. */
13687 rtx old_notes = REG_NOTES (tem_insn);
13689 PATTERN (tem_insn) = pc_rtx;
13690 REG_NOTES (tem_insn) = NULL;
13692 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
13693 NULL_RTX, NULL_RTX, NULL_RTX);
13694 distribute_links (LOG_LINKS (tem_insn));
13696 SET_INSN_DELETED (tem_insn);
13697 if (tem_insn == i2)
13698 i2 = NULL;
13700 #ifdef HAVE_cc0
13701 /* Delete the setter too. */
13702 if (cc0_setter)
13704 PATTERN (cc0_setter) = pc_rtx;
13705 old_notes = REG_NOTES (cc0_setter);
13706 REG_NOTES (cc0_setter) = NULL;
13708 distribute_notes (old_notes, cc0_setter,
13709 cc0_setter, NULL,
13710 NULL_RTX, NULL_RTX, NULL_RTX);
13711 distribute_links (LOG_LINKS (cc0_setter));
13713 SET_INSN_DELETED (cc0_setter);
13714 if (cc0_setter == i2)
13715 i2 = NULL;
13717 #endif
13719 else
13721 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13723 /* If there isn't already a REG_UNUSED note, put one
13724 here. Do not place a REG_DEAD note, even if
13725 the register is also used here; that would not
13726 match the algorithm used in lifetime analysis
13727 and can cause the consistency check in the
13728 scheduler to fail. */
13729 if (! find_regno_note (tem_insn, REG_UNUSED,
13730 REGNO (XEXP (note, 0))))
13731 place = tem_insn;
13732 break;
13735 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
13736 || (CALL_P (tem_insn)
13737 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
13739 place = tem_insn;
13741 /* If we are doing a 3->2 combination, and we have a
13742 register which formerly died in i3 and was not used
13743 by i2, which now no longer dies in i3 and is used in
13744 i2 but does not die in i2, and place is between i2
13745 and i3, then we may need to move a link from place to
13746 i2. */
13747 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13748 && from_insn
13749 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13750 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13752 struct insn_link *links = LOG_LINKS (place);
13753 LOG_LINKS (place) = NULL;
13754 distribute_links (links);
13756 break;
13759 if (tem_insn == BB_HEAD (bb))
13760 break;
13765 /* If the register is set or already dead at PLACE, we needn't do
13766 anything with this note if it is still a REG_DEAD note.
13767 We check here if it is set at all, not if is it totally replaced,
13768 which is what `dead_or_set_p' checks, so also check for it being
13769 set partially. */
13771 if (place && REG_NOTE_KIND (note) == REG_DEAD)
13773 unsigned int regno = REGNO (XEXP (note, 0));
13774 reg_stat_type *rsp = &reg_stat[regno];
13776 if (dead_or_set_p (place, XEXP (note, 0))
13777 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13779 /* Unless the register previously died in PLACE, clear
13780 last_death. [I no longer understand why this is
13781 being done.] */
13782 if (rsp->last_death != place)
13783 rsp->last_death = 0;
13784 place = 0;
13786 else
13787 rsp->last_death = place;
13789 /* If this is a death note for a hard reg that is occupying
13790 multiple registers, ensure that we are still using all
13791 parts of the object. If we find a piece of the object
13792 that is unused, we must arrange for an appropriate REG_DEAD
13793 note to be added for it. However, we can't just emit a USE
13794 and tag the note to it, since the register might actually
13795 be dead; so we recourse, and the recursive call then finds
13796 the previous insn that used this register. */
13798 if (place && regno < FIRST_PSEUDO_REGISTER
13799 && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13801 unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13802 bool all_used = true;
13803 unsigned int i;
13805 for (i = regno; i < endregno; i++)
13806 if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13807 && ! find_regno_fusage (place, USE, i))
13808 || dead_or_set_regno_p (place, i))
13810 all_used = false;
13811 break;
13814 if (! all_used)
13816 /* Put only REG_DEAD notes for pieces that are
13817 not already dead or set. */
13819 for (i = regno; i < endregno;
13820 i += hard_regno_nregs[i][reg_raw_mode[i]])
13822 rtx piece = regno_reg_rtx[i];
13823 basic_block bb = this_basic_block;
13825 if (! dead_or_set_p (place, piece)
13826 && ! reg_bitfield_target_p (piece,
13827 PATTERN (place)))
13829 rtx new_note = alloc_reg_note (REG_DEAD, piece,
13830 NULL_RTX);
13832 distribute_notes (new_note, place, place,
13833 NULL, NULL_RTX, NULL_RTX,
13834 NULL_RTX);
13836 else if (! refers_to_regno_p (i, i + 1,
13837 PATTERN (place), 0)
13838 && ! find_regno_fusage (place, USE, i))
13839 for (tem_insn = PREV_INSN (place); ;
13840 tem_insn = PREV_INSN (tem_insn))
13842 if (!NONDEBUG_INSN_P (tem_insn))
13844 if (tem_insn == BB_HEAD (bb))
13845 break;
13846 continue;
13848 if (dead_or_set_p (tem_insn, piece)
13849 || reg_bitfield_target_p (piece,
13850 PATTERN (tem_insn)))
13852 add_reg_note (tem_insn, REG_UNUSED, piece);
13853 break;
13858 place = 0;
13862 break;
13864 default:
13865 /* Any other notes should not be present at this point in the
13866 compilation. */
13867 gcc_unreachable ();
13870 if (place)
13872 XEXP (note, 1) = REG_NOTES (place);
13873 REG_NOTES (place) = note;
13876 if (place2)
13877 add_shallow_copy_of_reg_note (place2, note);
13881 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13882 I3, I2, and I1 to new locations. This is also called to add a link
13883 pointing at I3 when I3's destination is changed. */
13885 static void
13886 distribute_links (struct insn_link *links)
13888 struct insn_link *link, *next_link;
13890 for (link = links; link; link = next_link)
13892 rtx_insn *place = 0;
13893 rtx_insn *insn;
13894 rtx set, reg;
13896 next_link = link->next;
13898 /* If the insn that this link points to is a NOTE, ignore it. */
13899 if (NOTE_P (link->insn))
13900 continue;
13902 set = 0;
13903 rtx pat = PATTERN (link->insn);
13904 if (GET_CODE (pat) == SET)
13905 set = pat;
13906 else if (GET_CODE (pat) == PARALLEL)
13908 int i;
13909 for (i = 0; i < XVECLEN (pat, 0); i++)
13911 set = XVECEXP (pat, 0, i);
13912 if (GET_CODE (set) != SET)
13913 continue;
13915 reg = SET_DEST (set);
13916 while (GET_CODE (reg) == ZERO_EXTRACT
13917 || GET_CODE (reg) == STRICT_LOW_PART
13918 || GET_CODE (reg) == SUBREG)
13919 reg = XEXP (reg, 0);
13921 if (!REG_P (reg))
13922 continue;
13924 if (REGNO (reg) == link->regno)
13925 break;
13927 if (i == XVECLEN (pat, 0))
13928 continue;
13930 else
13931 continue;
13933 reg = SET_DEST (set);
13935 while (GET_CODE (reg) == ZERO_EXTRACT
13936 || GET_CODE (reg) == STRICT_LOW_PART
13937 || GET_CODE (reg) == SUBREG)
13938 reg = XEXP (reg, 0);
13940 /* A LOG_LINK is defined as being placed on the first insn that uses
13941 a register and points to the insn that sets the register. Start
13942 searching at the next insn after the target of the link and stop
13943 when we reach a set of the register or the end of the basic block.
13945 Note that this correctly handles the link that used to point from
13946 I3 to I2. Also note that not much searching is typically done here
13947 since most links don't point very far away. */
13949 for (insn = NEXT_INSN (link->insn);
13950 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
13951 || BB_HEAD (this_basic_block->next_bb) != insn));
13952 insn = NEXT_INSN (insn))
13953 if (DEBUG_INSN_P (insn))
13954 continue;
13955 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13957 if (reg_referenced_p (reg, PATTERN (insn)))
13958 place = insn;
13959 break;
13961 else if (CALL_P (insn)
13962 && find_reg_fusage (insn, USE, reg))
13964 place = insn;
13965 break;
13967 else if (INSN_P (insn) && reg_set_p (reg, insn))
13968 break;
13970 /* If we found a place to put the link, place it there unless there
13971 is already a link to the same insn as LINK at that point. */
13973 if (place)
13975 struct insn_link *link2;
13977 FOR_EACH_LOG_LINK (link2, place)
13978 if (link2->insn == link->insn && link2->regno == link->regno)
13979 break;
13981 if (link2 == NULL)
13983 link->next = LOG_LINKS (place);
13984 LOG_LINKS (place) = link;
13986 /* Set added_links_insn to the earliest insn we added a
13987 link to. */
13988 if (added_links_insn == 0
13989 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13990 added_links_insn = place;
13996 /* Check for any register or memory mentioned in EQUIV that is not
13997 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
13998 of EXPR where some registers may have been replaced by constants. */
14000 static bool
14001 unmentioned_reg_p (rtx equiv, rtx expr)
14003 subrtx_iterator::array_type array;
14004 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14006 const_rtx x = *iter;
14007 if ((REG_P (x) || MEM_P (x))
14008 && !reg_mentioned_p (x, expr))
14009 return true;
14011 return false;
14014 DEBUG_FUNCTION void
14015 dump_combine_stats (FILE *file)
14017 fprintf
14018 (file,
14019 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14020 combine_attempts, combine_merges, combine_extras, combine_successes);
14023 void
14024 dump_combine_total_stats (FILE *file)
14026 fprintf
14027 (file,
14028 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14029 total_attempts, total_merges, total_extras, total_successes);
14032 /* Try combining insns through substitution. */
14033 static unsigned int
14034 rest_of_handle_combine (void)
14036 int rebuild_jump_labels_after_combine;
14038 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14039 df_note_add_problem ();
14040 df_analyze ();
14042 regstat_init_n_sets_and_refs ();
14044 rebuild_jump_labels_after_combine
14045 = combine_instructions (get_insns (), max_reg_num ());
14047 /* Combining insns may have turned an indirect jump into a
14048 direct jump. Rebuild the JUMP_LABEL fields of jumping
14049 instructions. */
14050 if (rebuild_jump_labels_after_combine)
14052 timevar_push (TV_JUMP);
14053 rebuild_jump_labels (get_insns ());
14054 cleanup_cfg (0);
14055 timevar_pop (TV_JUMP);
14058 regstat_free_n_sets_and_refs ();
14059 return 0;
14062 namespace {
14064 const pass_data pass_data_combine =
14066 RTL_PASS, /* type */
14067 "combine", /* name */
14068 OPTGROUP_NONE, /* optinfo_flags */
14069 TV_COMBINE, /* tv_id */
14070 PROP_cfglayout, /* properties_required */
14071 0, /* properties_provided */
14072 0, /* properties_destroyed */
14073 0, /* todo_flags_start */
14074 TODO_df_finish, /* todo_flags_finish */
14077 class pass_combine : public rtl_opt_pass
14079 public:
14080 pass_combine (gcc::context *ctxt)
14081 : rtl_opt_pass (pass_data_combine, ctxt)
14084 /* opt_pass methods: */
14085 virtual bool gate (function *) { return (optimize > 0); }
14086 virtual unsigned int execute (function *)
14088 return rest_of_handle_combine ();
14091 }; // class pass_combine
14093 } // anon namespace
14095 rtl_opt_pass *
14096 make_pass_combine (gcc::context *ctxt)
14098 return new pass_combine (ctxt);