1 /* Decompose multiword subregs.
2 Copyright (C) 2007-2013 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 Ian Lance Taylor <iant@google.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "insn-config.h"
32 #include "basic-block.h"
39 #include "tree-pass.h"
41 #include "lower-subreg.h"
43 #ifdef STACK_GROWS_DOWNWARD
44 # undef STACK_GROWS_DOWNWARD
45 # define STACK_GROWS_DOWNWARD 1
47 # define STACK_GROWS_DOWNWARD 0
51 /* Decompose multi-word pseudo-registers into individual
52 pseudo-registers when possible and profitable. This is possible
53 when all the uses of a multi-word register are via SUBREG, or are
54 copies of the register to another location. Breaking apart the
55 register permits more CSE and permits better register allocation.
56 This is profitable if the machine does not have move instructions
59 This pass only splits moves with modes that are wider than
60 word_mode and ASHIFTs, LSHIFTRTs and ZERO_EXTENDs with integer
61 modes that are twice the width of word_mode. The latter could be
62 generalized if there was a need to do this, but the trend in
63 architectures is to not need this.
65 There are two useful preprocessor defines for use by maintainers:
69 if you wish to see the actual cost estimates that are being used
70 for each mode wider than word mode and the cost estimates for zero
71 extension and the shifts. This can be useful when port maintainers
72 are tuning insn rtx costs.
74 #define FORCE_LOWERING 1
76 if you wish to test the pass with all the transformation forced on.
77 This can be useful for finding bugs in the transformations. */
80 #define FORCE_LOWERING 0
82 /* Bit N in this bitmap is set if regno N is used in a context in
83 which we can decompose it. */
84 static bitmap decomposable_context
;
86 /* Bit N in this bitmap is set if regno N is used in a context in
87 which it can not be decomposed. */
88 static bitmap non_decomposable_context
;
90 /* Bit N in this bitmap is set if regno N is used in a subreg
91 which changes the mode but not the size. This typically happens
92 when the register accessed as a floating-point value; we want to
93 avoid generating accesses to its subwords in integer modes. */
94 static bitmap subreg_context
;
96 /* Bit N in the bitmap in element M of this array is set if there is a
97 copy from reg M to reg N. */
98 static vec
<bitmap
> reg_copy_graph
;
100 struct target_lower_subreg default_target_lower_subreg
;
101 #if SWITCHABLE_TARGET
102 struct target_lower_subreg
*this_target_lower_subreg
103 = &default_target_lower_subreg
;
106 #define twice_word_mode \
107 this_target_lower_subreg->x_twice_word_mode
109 this_target_lower_subreg->x_choices
111 /* RTXes used while computing costs. */
113 /* Source and target registers. */
117 /* A twice_word_mode ZERO_EXTEND of SOURCE. */
120 /* A shift of SOURCE. */
123 /* A SET of TARGET. */
127 /* Return the cost of a CODE shift in mode MODE by OP1 bits, using the
128 rtxes in RTXES. SPEED_P selects between the speed and size cost. */
131 shift_cost (bool speed_p
, struct cost_rtxes
*rtxes
, enum rtx_code code
,
132 enum machine_mode mode
, int op1
)
134 PUT_CODE (rtxes
->shift
, code
);
135 PUT_MODE (rtxes
->shift
, mode
);
136 PUT_MODE (rtxes
->source
, mode
);
137 XEXP (rtxes
->shift
, 1) = GEN_INT (op1
);
138 return set_src_cost (rtxes
->shift
, speed_p
);
141 /* For each X in the range [0, BITS_PER_WORD), set SPLITTING[X]
142 to true if it is profitable to split a double-word CODE shift
143 of X + BITS_PER_WORD bits. SPEED_P says whether we are testing
144 for speed or size profitability.
146 Use the rtxes in RTXES to calculate costs. WORD_MOVE_ZERO_COST is
147 the cost of moving zero into a word-mode register. WORD_MOVE_COST
148 is the cost of moving between word registers. */
151 compute_splitting_shift (bool speed_p
, struct cost_rtxes
*rtxes
,
152 bool *splitting
, enum rtx_code code
,
153 int word_move_zero_cost
, int word_move_cost
)
155 int wide_cost
, narrow_cost
, i
;
157 for (i
= 0; i
< BITS_PER_WORD
; i
++)
159 wide_cost
= shift_cost (speed_p
, rtxes
, code
, twice_word_mode
,
162 narrow_cost
= word_move_cost
;
164 narrow_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
, i
);
167 fprintf (stderr
, "%s %s by %d: original cost %d, split cost %d + %d\n",
168 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
),
169 i
+ BITS_PER_WORD
, wide_cost
, narrow_cost
,
170 word_move_zero_cost
);
172 if (FORCE_LOWERING
|| wide_cost
>= narrow_cost
+ word_move_zero_cost
)
177 /* Compute what we should do when optimizing for speed or size; SPEED_P
178 selects which. Use RTXES for computing costs. */
181 compute_costs (bool speed_p
, struct cost_rtxes
*rtxes
)
184 int word_move_zero_cost
, word_move_cost
;
186 PUT_MODE (rtxes
->target
, word_mode
);
187 SET_SRC (rtxes
->set
) = CONST0_RTX (word_mode
);
188 word_move_zero_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
190 SET_SRC (rtxes
->set
) = rtxes
->source
;
191 word_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
194 fprintf (stderr
, "%s move: from zero cost %d, from reg cost %d\n",
195 GET_MODE_NAME (word_mode
), word_move_zero_cost
, word_move_cost
);
197 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
199 enum machine_mode mode
= (enum machine_mode
) i
;
200 int factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
205 PUT_MODE (rtxes
->target
, mode
);
206 PUT_MODE (rtxes
->source
, mode
);
207 mode_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
210 fprintf (stderr
, "%s move: original cost %d, split cost %d * %d\n",
211 GET_MODE_NAME (mode
), mode_move_cost
,
212 word_move_cost
, factor
);
214 if (FORCE_LOWERING
|| mode_move_cost
>= word_move_cost
* factor
)
216 choices
[speed_p
].move_modes_to_split
[i
] = true;
217 choices
[speed_p
].something_to_do
= true;
222 /* For the moves and shifts, the only case that is checked is one
223 where the mode of the target is an integer mode twice the width
226 If it is not profitable to split a double word move then do not
227 even consider the shifts or the zero extension. */
228 if (choices
[speed_p
].move_modes_to_split
[(int) twice_word_mode
])
232 /* The only case here to check to see if moving the upper part with a
233 zero is cheaper than doing the zext itself. */
234 PUT_MODE (rtxes
->source
, word_mode
);
235 zext_cost
= set_src_cost (rtxes
->zext
, speed_p
);
238 fprintf (stderr
, "%s %s: original cost %d, split cost %d + %d\n",
239 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (ZERO_EXTEND
),
240 zext_cost
, word_move_cost
, word_move_zero_cost
);
242 if (FORCE_LOWERING
|| zext_cost
>= word_move_cost
+ word_move_zero_cost
)
243 choices
[speed_p
].splitting_zext
= true;
245 compute_splitting_shift (speed_p
, rtxes
,
246 choices
[speed_p
].splitting_ashift
, ASHIFT
,
247 word_move_zero_cost
, word_move_cost
);
248 compute_splitting_shift (speed_p
, rtxes
,
249 choices
[speed_p
].splitting_lshiftrt
, LSHIFTRT
,
250 word_move_zero_cost
, word_move_cost
);
254 /* Do one-per-target initialisation. This involves determining
255 which operations on the machine are profitable. If none are found,
256 then the pass just returns when called. */
259 init_lower_subreg (void)
261 struct cost_rtxes rtxes
;
263 memset (this_target_lower_subreg
, 0, sizeof (*this_target_lower_subreg
));
265 twice_word_mode
= GET_MODE_2XWIDER_MODE (word_mode
);
267 rtxes
.target
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
);
268 rtxes
.source
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
+ 1);
269 rtxes
.set
= gen_rtx_SET (VOIDmode
, rtxes
.target
, rtxes
.source
);
270 rtxes
.zext
= gen_rtx_ZERO_EXTEND (twice_word_mode
, rtxes
.source
);
271 rtxes
.shift
= gen_rtx_ASHIFT (twice_word_mode
, rtxes
.source
, const0_rtx
);
274 fprintf (stderr
, "\nSize costs\n==========\n\n");
275 compute_costs (false, &rtxes
);
278 fprintf (stderr
, "\nSpeed costs\n===========\n\n");
279 compute_costs (true, &rtxes
);
283 simple_move_operand (rtx x
)
285 if (GET_CODE (x
) == SUBREG
)
291 if (GET_CODE (x
) == LABEL_REF
292 || GET_CODE (x
) == SYMBOL_REF
293 || GET_CODE (x
) == HIGH
294 || GET_CODE (x
) == CONST
)
298 && (MEM_VOLATILE_P (x
)
299 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
))))
305 /* If INSN is a single set between two objects that we want to split,
306 return the single set. SPEED_P says whether we are optimizing
307 INSN for speed or size.
309 INSN should have been passed to recog and extract_insn before this
313 simple_move (rtx insn
, bool speed_p
)
317 enum machine_mode mode
;
319 if (recog_data
.n_operands
!= 2)
322 set
= single_set (insn
);
327 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
329 if (!simple_move_operand (x
))
333 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
335 /* For the src we can handle ASM_OPERANDS, and it is beneficial for
336 things like x86 rdtsc which returns a DImode value. */
337 if (GET_CODE (x
) != ASM_OPERANDS
338 && !simple_move_operand (x
))
341 /* We try to decompose in integer modes, to avoid generating
342 inefficient code copying between integer and floating point
343 registers. That means that we can't decompose if this is a
344 non-integer mode for which there is no integer mode of the same
346 mode
= GET_MODE (SET_SRC (set
));
347 if (!SCALAR_INT_MODE_P (mode
)
348 && (mode_for_size (GET_MODE_SIZE (mode
) * BITS_PER_UNIT
, MODE_INT
, 0)
352 /* Reject PARTIAL_INT modes. They are used for processor specific
353 purposes and it's probably best not to tamper with them. */
354 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
357 if (!choices
[speed_p
].move_modes_to_split
[(int) mode
])
363 /* If SET is a copy from one multi-word pseudo-register to another,
364 record that in reg_copy_graph. Return whether it is such a
368 find_pseudo_copy (rtx set
)
370 rtx dest
= SET_DEST (set
);
371 rtx src
= SET_SRC (set
);
375 if (!REG_P (dest
) || !REG_P (src
))
380 if (HARD_REGISTER_NUM_P (rd
) || HARD_REGISTER_NUM_P (rs
))
383 b
= reg_copy_graph
[rs
];
386 b
= BITMAP_ALLOC (NULL
);
387 reg_copy_graph
[rs
] = b
;
390 bitmap_set_bit (b
, rd
);
395 /* Look through the registers in DECOMPOSABLE_CONTEXT. For each case
396 where they are copied to another register, add the register to
397 which they are copied to DECOMPOSABLE_CONTEXT. Use
398 NON_DECOMPOSABLE_CONTEXT to limit this--we don't bother to track
399 copies of registers which are in NON_DECOMPOSABLE_CONTEXT. */
402 propagate_pseudo_copies (void)
404 bitmap queue
, propagate
;
406 queue
= BITMAP_ALLOC (NULL
);
407 propagate
= BITMAP_ALLOC (NULL
);
409 bitmap_copy (queue
, decomposable_context
);
412 bitmap_iterator iter
;
415 bitmap_clear (propagate
);
417 EXECUTE_IF_SET_IN_BITMAP (queue
, 0, i
, iter
)
419 bitmap b
= reg_copy_graph
[i
];
421 bitmap_ior_and_compl_into (propagate
, b
, non_decomposable_context
);
424 bitmap_and_compl (queue
, propagate
, decomposable_context
);
425 bitmap_ior_into (decomposable_context
, propagate
);
427 while (!bitmap_empty_p (queue
));
430 BITMAP_FREE (propagate
);
433 /* A pointer to one of these values is passed to
434 find_decomposable_subregs via for_each_rtx. */
436 enum classify_move_insn
438 /* Not a simple move from one location to another. */
440 /* A simple move we want to decompose. */
441 DECOMPOSABLE_SIMPLE_MOVE
,
442 /* Any other simple move. */
446 /* This is called via for_each_rtx. If we find a SUBREG which we
447 could use to decompose a pseudo-register, set a bit in
448 DECOMPOSABLE_CONTEXT. If we find an unadorned register which is
449 not a simple pseudo-register copy, DATA will point at the type of
450 move, and we set a bit in DECOMPOSABLE_CONTEXT or
451 NON_DECOMPOSABLE_CONTEXT as appropriate. */
454 find_decomposable_subregs (rtx
*px
, void *data
)
456 enum classify_move_insn
*pcmi
= (enum classify_move_insn
*) data
;
462 if (GET_CODE (x
) == SUBREG
)
464 rtx inner
= SUBREG_REG (x
);
465 unsigned int regno
, outer_size
, inner_size
, outer_words
, inner_words
;
470 regno
= REGNO (inner
);
471 if (HARD_REGISTER_NUM_P (regno
))
474 outer_size
= GET_MODE_SIZE (GET_MODE (x
));
475 inner_size
= GET_MODE_SIZE (GET_MODE (inner
));
476 outer_words
= (outer_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
477 inner_words
= (inner_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
479 /* We only try to decompose single word subregs of multi-word
480 registers. When we find one, we return -1 to avoid iterating
481 over the inner register.
483 ??? This doesn't allow, e.g., DImode subregs of TImode values
484 on 32-bit targets. We would need to record the way the
485 pseudo-register was used, and only decompose if all the uses
486 were the same number and size of pieces. Hopefully this
487 doesn't happen much. */
489 if (outer_words
== 1 && inner_words
> 1)
491 bitmap_set_bit (decomposable_context
, regno
);
495 /* If this is a cast from one mode to another, where the modes
496 have the same size, and they are not tieable, then mark this
497 register as non-decomposable. If we decompose it we are
498 likely to mess up whatever the backend is trying to do. */
500 && outer_size
== inner_size
501 && !MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (inner
)))
503 bitmap_set_bit (non_decomposable_context
, regno
);
504 bitmap_set_bit (subreg_context
, regno
);
512 /* We will see an outer SUBREG before we see the inner REG, so
513 when we see a plain REG here it means a direct reference to
516 If this is not a simple copy from one location to another,
517 then we can not decompose this register. If this is a simple
518 copy we want to decompose, and the mode is right,
519 then we mark the register as decomposable.
520 Otherwise we don't say anything about this register --
521 it could be decomposed, but whether that would be
522 profitable depends upon how it is used elsewhere.
524 We only set bits in the bitmap for multi-word
525 pseudo-registers, since those are the only ones we care about
526 and it keeps the size of the bitmaps down. */
529 if (!HARD_REGISTER_NUM_P (regno
)
530 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
534 case NOT_SIMPLE_MOVE
:
535 bitmap_set_bit (non_decomposable_context
, regno
);
537 case DECOMPOSABLE_SIMPLE_MOVE
:
538 if (MODES_TIEABLE_P (GET_MODE (x
), word_mode
))
539 bitmap_set_bit (decomposable_context
, regno
);
550 enum classify_move_insn cmi_mem
= NOT_SIMPLE_MOVE
;
552 /* Any registers used in a MEM do not participate in a
553 SIMPLE_MOVE or DECOMPOSABLE_SIMPLE_MOVE. Do our own recursion
554 here, and return -1 to block the parent's recursion. */
555 for_each_rtx (&XEXP (x
, 0), find_decomposable_subregs
, &cmi_mem
);
562 /* Decompose REGNO into word-sized components. We smash the REG node
563 in place. This ensures that (1) something goes wrong quickly if we
564 fail to make some replacement, and (2) the debug information inside
565 the symbol table is automatically kept up to date. */
568 decompose_register (unsigned int regno
)
571 unsigned int words
, i
;
574 reg
= regno_reg_rtx
[regno
];
576 regno_reg_rtx
[regno
] = NULL_RTX
;
578 words
= GET_MODE_SIZE (GET_MODE (reg
));
579 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
581 v
= rtvec_alloc (words
);
582 for (i
= 0; i
< words
; ++i
)
583 RTVEC_ELT (v
, i
) = gen_reg_rtx_offset (reg
, word_mode
, i
* UNITS_PER_WORD
);
585 PUT_CODE (reg
, CONCATN
);
590 fprintf (dump_file
, "; Splitting reg %u ->", regno
);
591 for (i
= 0; i
< words
; ++i
)
592 fprintf (dump_file
, " %u", REGNO (XVECEXP (reg
, 0, i
)));
593 fputc ('\n', dump_file
);
597 /* Get a SUBREG of a CONCATN. */
600 simplify_subreg_concatn (enum machine_mode outermode
, rtx op
,
603 unsigned int inner_size
;
604 enum machine_mode innermode
, partmode
;
606 unsigned int final_offset
;
608 gcc_assert (GET_CODE (op
) == CONCATN
);
609 gcc_assert (byte
% GET_MODE_SIZE (outermode
) == 0);
611 innermode
= GET_MODE (op
);
612 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
613 gcc_assert (GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (innermode
));
615 inner_size
= GET_MODE_SIZE (innermode
) / XVECLEN (op
, 0);
616 part
= XVECEXP (op
, 0, byte
/ inner_size
);
617 partmode
= GET_MODE (part
);
619 /* VECTOR_CSTs in debug expressions are expanded into CONCATN instead of
620 regular CONST_VECTORs. They have vector or integer modes, depending
621 on the capabilities of the target. Cope with them. */
622 if (partmode
== VOIDmode
&& VECTOR_MODE_P (innermode
))
623 partmode
= GET_MODE_INNER (innermode
);
624 else if (partmode
== VOIDmode
)
626 enum mode_class mclass
= GET_MODE_CLASS (innermode
);
627 partmode
= mode_for_size (inner_size
* BITS_PER_UNIT
, mclass
, 0);
630 final_offset
= byte
% inner_size
;
631 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
634 return simplify_gen_subreg (outermode
, part
, partmode
, final_offset
);
637 /* Wrapper around simplify_gen_subreg which handles CONCATN. */
640 simplify_gen_subreg_concatn (enum machine_mode outermode
, rtx op
,
641 enum machine_mode innermode
, unsigned int byte
)
645 /* We have to handle generating a SUBREG of a SUBREG of a CONCATN.
646 If OP is a SUBREG of a CONCATN, then it must be a simple mode
647 change with the same size and offset 0, or it must extract a
648 part. We shouldn't see anything else here. */
649 if (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == CONCATN
)
653 if ((GET_MODE_SIZE (GET_MODE (op
))
654 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
655 && SUBREG_BYTE (op
) == 0)
656 return simplify_gen_subreg_concatn (outermode
, SUBREG_REG (op
),
657 GET_MODE (SUBREG_REG (op
)), byte
);
659 op2
= simplify_subreg_concatn (GET_MODE (op
), SUBREG_REG (op
),
663 /* We don't handle paradoxical subregs here. */
664 gcc_assert (GET_MODE_SIZE (outermode
)
665 <= GET_MODE_SIZE (GET_MODE (op
)));
666 gcc_assert (GET_MODE_SIZE (GET_MODE (op
))
667 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))));
668 op2
= simplify_subreg_concatn (outermode
, SUBREG_REG (op
),
669 byte
+ SUBREG_BYTE (op
));
670 gcc_assert (op2
!= NULL_RTX
);
675 gcc_assert (op
!= NULL_RTX
);
676 gcc_assert (innermode
== GET_MODE (op
));
679 if (GET_CODE (op
) == CONCATN
)
680 return simplify_subreg_concatn (outermode
, op
, byte
);
682 ret
= simplify_gen_subreg (outermode
, op
, innermode
, byte
);
684 /* If we see an insn like (set (reg:DI) (subreg:DI (reg:SI) 0)) then
685 resolve_simple_move will ask for the high part of the paradoxical
686 subreg, which does not have a value. Just return a zero. */
688 && GET_CODE (op
) == SUBREG
689 && SUBREG_BYTE (op
) == 0
690 && (GET_MODE_SIZE (innermode
)
691 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
)))))
692 return CONST0_RTX (outermode
);
694 gcc_assert (ret
!= NULL_RTX
);
698 /* Return whether we should resolve X into the registers into which it
702 resolve_reg_p (rtx x
)
704 return GET_CODE (x
) == CONCATN
;
707 /* Return whether X is a SUBREG of a register which we need to
711 resolve_subreg_p (rtx x
)
713 if (GET_CODE (x
) != SUBREG
)
715 return resolve_reg_p (SUBREG_REG (x
));
718 /* This is called via for_each_rtx. Look for SUBREGs which need to be
722 resolve_subreg_use (rtx
*px
, void *data
)
724 rtx insn
= (rtx
) data
;
730 if (resolve_subreg_p (x
))
732 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
735 /* It is possible for a note to contain a reference which we can
736 decompose. In this case, return 1 to the caller to indicate
737 that the note must be removed. */
744 validate_change (insn
, px
, x
, 1);
748 if (resolve_reg_p (x
))
750 /* Return 1 to the caller to indicate that we found a direct
751 reference to a register which is being decomposed. This can
752 happen inside notes, multiword shift or zero-extend
760 /* This is called via for_each_rtx. Look for SUBREGs which can be
761 decomposed and decomposed REGs that need copying. */
764 adjust_decomposed_uses (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
771 if (resolve_subreg_p (x
))
773 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
782 if (resolve_reg_p (x
))
788 /* Resolve any decomposed registers which appear in register notes on
792 resolve_reg_notes (rtx insn
)
796 note
= find_reg_equal_equiv_note (insn
);
799 int old_count
= num_validated_changes ();
800 if (for_each_rtx (&XEXP (note
, 0), resolve_subreg_use
, NULL
))
801 remove_note (insn
, note
);
803 if (old_count
!= num_validated_changes ())
804 df_notes_rescan (insn
);
807 pnote
= ®_NOTES (insn
);
808 while (*pnote
!= NULL_RTX
)
813 switch (REG_NOTE_KIND (note
))
817 if (resolve_reg_p (XEXP (note
, 0)))
826 *pnote
= XEXP (note
, 1);
828 pnote
= &XEXP (note
, 1);
832 /* Return whether X can be decomposed into subwords. */
835 can_decompose_p (rtx x
)
839 unsigned int regno
= REGNO (x
);
841 if (HARD_REGISTER_NUM_P (regno
))
843 unsigned int byte
, num_bytes
;
845 num_bytes
= GET_MODE_SIZE (GET_MODE (x
));
846 for (byte
= 0; byte
< num_bytes
; byte
+= UNITS_PER_WORD
)
847 if (simplify_subreg_regno (regno
, GET_MODE (x
), byte
, word_mode
) < 0)
852 return !bitmap_bit_p (subreg_context
, regno
);
858 /* Decompose the registers used in a simple move SET within INSN. If
859 we don't change anything, return INSN, otherwise return the start
860 of the sequence of moves. */
863 resolve_simple_move (rtx set
, rtx insn
)
865 rtx src
, dest
, real_dest
, insns
;
866 enum machine_mode orig_mode
, dest_mode
;
871 dest
= SET_DEST (set
);
872 orig_mode
= GET_MODE (dest
);
874 words
= (GET_MODE_SIZE (orig_mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
875 gcc_assert (words
> 1);
879 /* We have to handle copying from a SUBREG of a decomposed reg where
880 the SUBREG is larger than word size. Rather than assume that we
881 can take a word_mode SUBREG of the destination, we copy to a new
882 register and then copy that to the destination. */
884 real_dest
= NULL_RTX
;
886 if (GET_CODE (src
) == SUBREG
887 && resolve_reg_p (SUBREG_REG (src
))
888 && (SUBREG_BYTE (src
) != 0
889 || (GET_MODE_SIZE (orig_mode
)
890 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))))
893 dest
= gen_reg_rtx (orig_mode
);
894 if (REG_P (real_dest
))
895 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
898 /* Similarly if we are copying to a SUBREG of a decomposed reg where
899 the SUBREG is larger than word size. */
901 if (GET_CODE (dest
) == SUBREG
902 && resolve_reg_p (SUBREG_REG (dest
))
903 && (SUBREG_BYTE (dest
) != 0
904 || (GET_MODE_SIZE (orig_mode
)
905 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
))))))
907 rtx reg
, minsn
, smove
;
909 reg
= gen_reg_rtx (orig_mode
);
910 minsn
= emit_move_insn (reg
, src
);
911 smove
= single_set (minsn
);
912 gcc_assert (smove
!= NULL_RTX
);
913 resolve_simple_move (smove
, minsn
);
917 /* If we didn't have any big SUBREGS of decomposed registers, and
918 neither side of the move is a register we are decomposing, then
919 we don't have to do anything here. */
921 if (src
== SET_SRC (set
)
922 && dest
== SET_DEST (set
)
923 && !resolve_reg_p (src
)
924 && !resolve_subreg_p (src
)
925 && !resolve_reg_p (dest
)
926 && !resolve_subreg_p (dest
))
932 /* It's possible for the code to use a subreg of a decomposed
933 register while forming an address. We need to handle that before
934 passing the address to emit_move_insn. We pass NULL_RTX as the
935 insn parameter to resolve_subreg_use because we can not validate
937 if (MEM_P (src
) || MEM_P (dest
))
942 for_each_rtx (&XEXP (src
, 0), resolve_subreg_use
, NULL_RTX
);
944 for_each_rtx (&XEXP (dest
, 0), resolve_subreg_use
, NULL_RTX
);
945 acg
= apply_change_group ();
949 /* If SRC is a register which we can't decompose, or has side
950 effects, we need to move via a temporary register. */
952 if (!can_decompose_p (src
)
953 || side_effects_p (src
)
954 || GET_CODE (src
) == ASM_OPERANDS
)
958 reg
= gen_reg_rtx (orig_mode
);
959 emit_move_insn (reg
, src
);
963 /* If DEST is a register which we can't decompose, or has side
964 effects, we need to first move to a temporary register. We
965 handle the common case of pushing an operand directly. We also
966 go through a temporary register if it holds a floating point
967 value. This gives us better code on systems which can't move
968 data easily between integer and floating point registers. */
970 dest_mode
= orig_mode
;
971 pushing
= push_operand (dest
, dest_mode
);
972 if (!can_decompose_p (dest
)
973 || (side_effects_p (dest
) && !pushing
)
974 || (!SCALAR_INT_MODE_P (dest_mode
)
975 && !resolve_reg_p (dest
)
976 && !resolve_subreg_p (dest
)))
978 if (real_dest
== NULL_RTX
)
980 if (!SCALAR_INT_MODE_P (dest_mode
))
982 dest_mode
= mode_for_size (GET_MODE_SIZE (dest_mode
) * BITS_PER_UNIT
,
984 gcc_assert (dest_mode
!= BLKmode
);
986 dest
= gen_reg_rtx (dest_mode
);
987 if (REG_P (real_dest
))
988 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
993 unsigned int i
, j
, jinc
;
995 gcc_assert (GET_MODE_SIZE (orig_mode
) % UNITS_PER_WORD
== 0);
996 gcc_assert (GET_CODE (XEXP (dest
, 0)) != PRE_MODIFY
);
997 gcc_assert (GET_CODE (XEXP (dest
, 0)) != POST_MODIFY
);
999 if (WORDS_BIG_ENDIAN
== STACK_GROWS_DOWNWARD
)
1010 for (i
= 0; i
< words
; ++i
, j
+= jinc
)
1014 temp
= copy_rtx (XEXP (dest
, 0));
1015 temp
= adjust_automodify_address_nv (dest
, word_mode
, temp
,
1016 j
* UNITS_PER_WORD
);
1017 emit_move_insn (temp
,
1018 simplify_gen_subreg_concatn (word_mode
, src
,
1020 j
* UNITS_PER_WORD
));
1027 if (REG_P (dest
) && !HARD_REGISTER_NUM_P (REGNO (dest
)))
1028 emit_clobber (dest
);
1030 for (i
= 0; i
< words
; ++i
)
1031 emit_move_insn (simplify_gen_subreg_concatn (word_mode
, dest
,
1033 i
* UNITS_PER_WORD
),
1034 simplify_gen_subreg_concatn (word_mode
, src
,
1036 i
* UNITS_PER_WORD
));
1039 if (real_dest
!= NULL_RTX
)
1041 rtx mdest
, minsn
, smove
;
1043 if (dest_mode
== orig_mode
)
1046 mdest
= simplify_gen_subreg (orig_mode
, dest
, GET_MODE (dest
), 0);
1047 minsn
= emit_move_insn (real_dest
, mdest
);
1049 smove
= single_set (minsn
);
1050 gcc_assert (smove
!= NULL_RTX
);
1052 resolve_simple_move (smove
, minsn
);
1055 insns
= get_insns ();
1058 copy_reg_eh_region_note_forward (insn
, insns
, NULL_RTX
);
1060 emit_insn_before (insns
, insn
);
1067 /* Change a CLOBBER of a decomposed register into a CLOBBER of the
1068 component registers. Return whether we changed something. */
1071 resolve_clobber (rtx pat
, rtx insn
)
1074 enum machine_mode orig_mode
;
1075 unsigned int words
, i
;
1078 reg
= XEXP (pat
, 0);
1079 if (!resolve_reg_p (reg
) && !resolve_subreg_p (reg
))
1082 orig_mode
= GET_MODE (reg
);
1083 words
= GET_MODE_SIZE (orig_mode
);
1084 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
1086 ret
= validate_change (NULL_RTX
, &XEXP (pat
, 0),
1087 simplify_gen_subreg_concatn (word_mode
, reg
,
1090 df_insn_rescan (insn
);
1091 gcc_assert (ret
!= 0);
1093 for (i
= words
- 1; i
> 0; --i
)
1097 x
= simplify_gen_subreg_concatn (word_mode
, reg
, orig_mode
,
1098 i
* UNITS_PER_WORD
);
1099 x
= gen_rtx_CLOBBER (VOIDmode
, x
);
1100 emit_insn_after (x
, insn
);
1103 resolve_reg_notes (insn
);
1108 /* A USE of a decomposed register is no longer meaningful. Return
1109 whether we changed something. */
1112 resolve_use (rtx pat
, rtx insn
)
1114 if (resolve_reg_p (XEXP (pat
, 0)) || resolve_subreg_p (XEXP (pat
, 0)))
1120 resolve_reg_notes (insn
);
1125 /* A VAR_LOCATION can be simplified. */
1128 resolve_debug (rtx insn
)
1130 for_each_rtx (&PATTERN (insn
), adjust_decomposed_uses
, NULL_RTX
);
1132 df_insn_rescan (insn
);
1134 resolve_reg_notes (insn
);
1137 /* Check if INSN is a decomposable multiword-shift or zero-extend and
1138 set the decomposable_context bitmap accordingly. SPEED_P is true
1139 if we are optimizing INSN for speed rather than size. Return true
1140 if INSN is decomposable. */
1143 find_decomposable_shift_zext (rtx insn
, bool speed_p
)
1149 set
= single_set (insn
);
1154 if (GET_CODE (op
) != ASHIFT
1155 && GET_CODE (op
) != LSHIFTRT
1156 && GET_CODE (op
) != ZERO_EXTEND
)
1159 op_operand
= XEXP (op
, 0);
1160 if (!REG_P (SET_DEST (set
)) || !REG_P (op_operand
)
1161 || HARD_REGISTER_NUM_P (REGNO (SET_DEST (set
)))
1162 || HARD_REGISTER_NUM_P (REGNO (op_operand
))
1163 || GET_MODE (op
) != twice_word_mode
)
1166 if (GET_CODE (op
) == ZERO_EXTEND
)
1168 if (GET_MODE (op_operand
) != word_mode
1169 || !choices
[speed_p
].splitting_zext
)
1172 else /* left or right shift */
1174 bool *splitting
= (GET_CODE (op
) == ASHIFT
1175 ? choices
[speed_p
].splitting_ashift
1176 : choices
[speed_p
].splitting_lshiftrt
);
1177 if (!CONST_INT_P (XEXP (op
, 1))
1178 || !IN_RANGE (INTVAL (XEXP (op
, 1)), BITS_PER_WORD
,
1179 2 * BITS_PER_WORD
- 1)
1180 || !splitting
[INTVAL (XEXP (op
, 1)) - BITS_PER_WORD
])
1183 bitmap_set_bit (decomposable_context
, REGNO (op_operand
));
1186 bitmap_set_bit (decomposable_context
, REGNO (SET_DEST (set
)));
1191 /* Decompose a more than word wide shift (in INSN) of a multiword
1192 pseudo or a multiword zero-extend of a wordmode pseudo into a move
1193 and 'set to zero' insn. Return a pointer to the new insn when a
1194 replacement was done. */
1197 resolve_shift_zext (rtx insn
)
1203 rtx src_reg
, dest_reg
, dest_zero
;
1204 int src_reg_num
, dest_reg_num
, offset1
, offset2
, src_offset
;
1206 set
= single_set (insn
);
1211 if (GET_CODE (op
) != ASHIFT
1212 && GET_CODE (op
) != LSHIFTRT
1213 && GET_CODE (op
) != ZERO_EXTEND
)
1216 op_operand
= XEXP (op
, 0);
1218 /* We can tear this operation apart only if the regs were already
1220 if (!resolve_reg_p (SET_DEST (set
)) && !resolve_reg_p (op_operand
))
1223 /* src_reg_num is the number of the word mode register which we
1224 are operating on. For a left shift and a zero_extend on little
1225 endian machines this is register 0. */
1226 src_reg_num
= GET_CODE (op
) == LSHIFTRT
? 1 : 0;
1228 if (WORDS_BIG_ENDIAN
1229 && GET_MODE_SIZE (GET_MODE (op_operand
)) > UNITS_PER_WORD
)
1230 src_reg_num
= 1 - src_reg_num
;
1232 if (GET_CODE (op
) == ZERO_EXTEND
)
1233 dest_reg_num
= WORDS_BIG_ENDIAN
? 1 : 0;
1235 dest_reg_num
= 1 - src_reg_num
;
1237 offset1
= UNITS_PER_WORD
* dest_reg_num
;
1238 offset2
= UNITS_PER_WORD
* (1 - dest_reg_num
);
1239 src_offset
= UNITS_PER_WORD
* src_reg_num
;
1243 dest_reg
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1244 GET_MODE (SET_DEST (set
)),
1246 dest_zero
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1247 GET_MODE (SET_DEST (set
)),
1249 src_reg
= simplify_gen_subreg_concatn (word_mode
, op_operand
,
1250 GET_MODE (op_operand
),
1252 if (GET_CODE (op
) != ZERO_EXTEND
)
1254 int shift_count
= INTVAL (XEXP (op
, 1));
1255 if (shift_count
> BITS_PER_WORD
)
1256 src_reg
= expand_shift (GET_CODE (op
) == ASHIFT
?
1257 LSHIFT_EXPR
: RSHIFT_EXPR
,
1259 shift_count
- BITS_PER_WORD
,
1263 if (dest_reg
!= src_reg
)
1264 emit_move_insn (dest_reg
, src_reg
);
1265 emit_move_insn (dest_zero
, CONST0_RTX (word_mode
));
1266 insns
= get_insns ();
1270 emit_insn_before (insns
, insn
);
1275 fprintf (dump_file
, "; Replacing insn: %d with insns: ", INSN_UID (insn
));
1276 for (in
= insns
; in
!= insn
; in
= NEXT_INSN (in
))
1277 fprintf (dump_file
, "%d ", INSN_UID (in
));
1278 fprintf (dump_file
, "\n");
1285 /* Print to dump_file a description of what we're doing with shift code CODE.
1286 SPLITTING[X] is true if we are splitting shifts by X + BITS_PER_WORD. */
1289 dump_shift_choices (enum rtx_code code
, bool *splitting
)
1295 " Splitting mode %s for %s lowering with shift amounts = ",
1296 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
));
1298 for (i
= 0; i
< BITS_PER_WORD
; i
++)
1301 fprintf (dump_file
, "%s%d", sep
, i
+ BITS_PER_WORD
);
1304 fprintf (dump_file
, "\n");
1307 /* Print to dump_file a description of what we're doing when optimizing
1308 for speed or size; SPEED_P says which. DESCRIPTION is a description
1309 of the SPEED_P choice. */
1312 dump_choices (bool speed_p
, const char *description
)
1316 fprintf (dump_file
, "Choices when optimizing for %s:\n", description
);
1318 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
1319 if (GET_MODE_SIZE (i
) > UNITS_PER_WORD
)
1320 fprintf (dump_file
, " %s mode %s for copy lowering.\n",
1321 choices
[speed_p
].move_modes_to_split
[i
]
1324 GET_MODE_NAME ((enum machine_mode
) i
));
1326 fprintf (dump_file
, " %s mode %s for zero_extend lowering.\n",
1327 choices
[speed_p
].splitting_zext
? "Splitting" : "Skipping",
1328 GET_MODE_NAME (twice_word_mode
));
1330 dump_shift_choices (ASHIFT
, choices
[speed_p
].splitting_ashift
);
1331 dump_shift_choices (LSHIFTRT
, choices
[speed_p
].splitting_ashift
);
1332 fprintf (dump_file
, "\n");
1335 /* Look for registers which are always accessed via word-sized SUBREGs
1336 or -if DECOMPOSE_COPIES is true- via copies. Decompose these
1337 registers into several word-sized pseudo-registers. */
1340 decompose_multiword_subregs (bool decompose_copies
)
1348 dump_choices (false, "size");
1349 dump_choices (true, "speed");
1352 /* Check if this target even has any modes to consider lowering. */
1353 if (!choices
[false].something_to_do
&& !choices
[true].something_to_do
)
1356 fprintf (dump_file
, "Nothing to do!\n");
1360 max
= max_reg_num ();
1362 /* First see if there are any multi-word pseudo-registers. If there
1363 aren't, there is nothing we can do. This should speed up this
1364 pass in the normal case, since it should be faster than scanning
1368 bool useful_modes_seen
= false;
1370 for (i
= FIRST_PSEUDO_REGISTER
; i
< max
; ++i
)
1371 if (regno_reg_rtx
[i
] != NULL
)
1373 enum machine_mode mode
= GET_MODE (regno_reg_rtx
[i
]);
1374 if (choices
[false].move_modes_to_split
[(int) mode
]
1375 || choices
[true].move_modes_to_split
[(int) mode
])
1377 useful_modes_seen
= true;
1382 if (!useful_modes_seen
)
1385 fprintf (dump_file
, "Nothing to lower in this function.\n");
1392 df_set_flags (DF_DEFER_INSN_RESCAN
);
1396 /* FIXME: It may be possible to change this code to look for each
1397 multi-word pseudo-register and to find each insn which sets or
1398 uses that register. That should be faster than scanning all the
1401 decomposable_context
= BITMAP_ALLOC (NULL
);
1402 non_decomposable_context
= BITMAP_ALLOC (NULL
);
1403 subreg_context
= BITMAP_ALLOC (NULL
);
1405 reg_copy_graph
.create (max
);
1406 reg_copy_graph
.safe_grow_cleared (max
);
1407 memset (reg_copy_graph
.address (), 0, sizeof (bitmap
) * max
);
1409 speed_p
= optimize_function_for_speed_p (cfun
);
1414 FOR_BB_INSNS (bb
, insn
)
1417 enum classify_move_insn cmi
;
1421 || GET_CODE (PATTERN (insn
)) == CLOBBER
1422 || GET_CODE (PATTERN (insn
)) == USE
)
1425 recog_memoized (insn
);
1427 if (find_decomposable_shift_zext (insn
, speed_p
))
1430 extract_insn (insn
);
1432 set
= simple_move (insn
, speed_p
);
1435 cmi
= NOT_SIMPLE_MOVE
;
1438 /* We mark pseudo-to-pseudo copies as decomposable during the
1439 second pass only. The first pass is so early that there is
1440 good chance such moves will be optimized away completely by
1441 subsequent optimizations anyway.
1443 However, we call find_pseudo_copy even during the first pass
1444 so as to properly set up the reg_copy_graph. */
1445 if (find_pseudo_copy (set
))
1446 cmi
= decompose_copies
? DECOMPOSABLE_SIMPLE_MOVE
: SIMPLE_MOVE
;
1451 n
= recog_data
.n_operands
;
1452 for (i
= 0; i
< n
; ++i
)
1454 for_each_rtx (&recog_data
.operand
[i
],
1455 find_decomposable_subregs
,
1458 /* We handle ASM_OPERANDS as a special case to support
1459 things like x86 rdtsc which returns a DImode value.
1460 We can decompose the output, which will certainly be
1461 operand 0, but not the inputs. */
1463 if (cmi
== SIMPLE_MOVE
1464 && GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1466 gcc_assert (i
== 0);
1467 cmi
= NOT_SIMPLE_MOVE
;
1473 bitmap_and_compl_into (decomposable_context
, non_decomposable_context
);
1474 if (!bitmap_empty_p (decomposable_context
))
1478 sbitmap_iterator sbi
;
1479 bitmap_iterator iter
;
1482 propagate_pseudo_copies ();
1484 sub_blocks
= sbitmap_alloc (last_basic_block
);
1485 bitmap_clear (sub_blocks
);
1487 EXECUTE_IF_SET_IN_BITMAP (decomposable_context
, 0, regno
, iter
)
1488 decompose_register (regno
);
1494 FOR_BB_INSNS (bb
, insn
)
1501 pat
= PATTERN (insn
);
1502 if (GET_CODE (pat
) == CLOBBER
)
1503 resolve_clobber (pat
, insn
);
1504 else if (GET_CODE (pat
) == USE
)
1505 resolve_use (pat
, insn
);
1506 else if (DEBUG_INSN_P (insn
))
1507 resolve_debug (insn
);
1513 recog_memoized (insn
);
1514 extract_insn (insn
);
1516 set
= simple_move (insn
, speed_p
);
1519 rtx orig_insn
= insn
;
1520 bool cfi
= control_flow_insn_p (insn
);
1522 /* We can end up splitting loads to multi-word pseudos
1523 into separate loads to machine word size pseudos.
1524 When this happens, we first had one load that can
1525 throw, and after resolve_simple_move we'll have a
1526 bunch of loads (at least two). All those loads may
1527 trap if we can have non-call exceptions, so they
1528 all will end the current basic block. We split the
1529 block after the outer loop over all insns, but we
1530 make sure here that we will be able to split the
1531 basic block and still produce the correct control
1532 flow graph for it. */
1534 || (cfun
->can_throw_non_call_exceptions
1535 && can_throw_internal (insn
)));
1537 insn
= resolve_simple_move (set
, insn
);
1538 if (insn
!= orig_insn
)
1540 recog_memoized (insn
);
1541 extract_insn (insn
);
1544 bitmap_set_bit (sub_blocks
, bb
->index
);
1549 rtx decomposed_shift
;
1551 decomposed_shift
= resolve_shift_zext (insn
);
1552 if (decomposed_shift
!= NULL_RTX
)
1554 insn
= decomposed_shift
;
1555 recog_memoized (insn
);
1556 extract_insn (insn
);
1560 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
1561 for_each_rtx (recog_data
.operand_loc
[i
],
1565 resolve_reg_notes (insn
);
1567 if (num_validated_changes () > 0)
1569 for (i
= recog_data
.n_dups
- 1; i
>= 0; --i
)
1571 rtx
*pl
= recog_data
.dup_loc
[i
];
1572 int dup_num
= recog_data
.dup_num
[i
];
1573 rtx
*px
= recog_data
.operand_loc
[dup_num
];
1575 validate_unshare_change (insn
, pl
, *px
, 1);
1578 i
= apply_change_group ();
1585 /* If we had insns to split that caused control flow insns in the middle
1586 of a basic block, split those blocks now. Note that we only handle
1587 the case where splitting a load has caused multiple possibly trapping
1589 EXECUTE_IF_SET_IN_BITMAP (sub_blocks
, 0, i
, sbi
)
1594 bb
= BASIC_BLOCK (i
);
1595 insn
= BB_HEAD (bb
);
1600 if (control_flow_insn_p (insn
))
1602 /* Split the block after insn. There will be a fallthru
1603 edge, which is OK so we keep it. We have to create the
1604 exception edges ourselves. */
1605 fallthru
= split_block (bb
, insn
);
1606 rtl_make_eh_edge (NULL
, bb
, BB_END (bb
));
1607 bb
= fallthru
->dest
;
1608 insn
= BB_HEAD (bb
);
1611 insn
= NEXT_INSN (insn
);
1615 sbitmap_free (sub_blocks
);
1622 FOR_EACH_VEC_ELT (reg_copy_graph
, i
, b
)
1627 reg_copy_graph
.release ();
1629 BITMAP_FREE (decomposable_context
);
1630 BITMAP_FREE (non_decomposable_context
);
1631 BITMAP_FREE (subreg_context
);
1634 /* Gate function for lower subreg pass. */
1637 gate_handle_lower_subreg (void)
1639 return flag_split_wide_types
!= 0;
1642 /* Implement first lower subreg pass. */
1645 rest_of_handle_lower_subreg (void)
1647 decompose_multiword_subregs (false);
1651 /* Implement second lower subreg pass. */
1654 rest_of_handle_lower_subreg2 (void)
1656 decompose_multiword_subregs (true);
1660 struct rtl_opt_pass pass_lower_subreg
=
1664 "subreg1", /* name */
1665 OPTGROUP_NONE
, /* optinfo_flags */
1666 gate_handle_lower_subreg
, /* gate */
1667 rest_of_handle_lower_subreg
, /* execute */
1670 0, /* static_pass_number */
1671 TV_LOWER_SUBREG
, /* tv_id */
1672 0, /* properties_required */
1673 0, /* properties_provided */
1674 0, /* properties_destroyed */
1675 0, /* todo_flags_start */
1677 TODO_verify_flow
/* todo_flags_finish */
1681 struct rtl_opt_pass pass_lower_subreg2
=
1685 "subreg2", /* name */
1686 OPTGROUP_NONE
, /* optinfo_flags */
1687 gate_handle_lower_subreg
, /* gate */
1688 rest_of_handle_lower_subreg2
, /* execute */
1691 0, /* static_pass_number */
1692 TV_LOWER_SUBREG
, /* tv_id */
1693 0, /* properties_required */
1694 0, /* properties_provided */
1695 0, /* properties_destroyed */
1696 0, /* todo_flags_start */
1697 TODO_df_finish
| TODO_verify_rtl_sharing
|
1699 TODO_verify_flow
/* todo_flags_finish */