1 /* Decompose multiword subregs.
2 Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Richard Henderson <rth@redhat.com>
5 Ian Lance Taylor <iant@google.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "insn-config.h"
33 #include "basic-block.h"
40 #include "tree-pass.h"
42 #include "lower-subreg.h"
44 #ifdef STACK_GROWS_DOWNWARD
45 # undef STACK_GROWS_DOWNWARD
46 # define STACK_GROWS_DOWNWARD 1
48 # define STACK_GROWS_DOWNWARD 0
52 DEF_VEC_ALLOC_P (bitmap
,heap
);
54 /* Decompose multi-word pseudo-registers into individual
55 pseudo-registers when possible and profitable. This is possible
56 when all the uses of a multi-word register are via SUBREG, or are
57 copies of the register to another location. Breaking apart the
58 register permits more CSE and permits better register allocation.
59 This is profitable if the machine does not have move instructions
62 This pass only splits moves with modes that are wider than
63 word_mode and ASHIFTs, LSHIFTRTs and ZERO_EXTENDs with integer
64 modes that are twice the width of word_mode. The latter could be
65 generalized if there was a need to do this, but the trend in
66 architectures is to not need this.
68 There are two useful preprocessor defines for use by maintainers:
72 if you wish to see the actual cost estimates that are being used
73 for each mode wider than word mode and the cost estimates for zero
74 extension and the shifts. This can be useful when port maintainers
75 are tuning insn rtx costs.
77 #define FORCE_LOWERING 1
79 if you wish to test the pass with all the transformation forced on.
80 This can be useful for finding bugs in the transformations. */
83 #define FORCE_LOWERING 0
85 /* Bit N in this bitmap is set if regno N is used in a context in
86 which we can decompose it. */
87 static bitmap decomposable_context
;
89 /* Bit N in this bitmap is set if regno N is used in a context in
90 which it can not be decomposed. */
91 static bitmap non_decomposable_context
;
93 /* Bit N in this bitmap is set if regno N is used in a subreg
94 which changes the mode but not the size. This typically happens
95 when the register accessed as a floating-point value; we want to
96 avoid generating accesses to its subwords in integer modes. */
97 static bitmap subreg_context
;
99 /* Bit N in the bitmap in element M of this array is set if there is a
100 copy from reg M to reg N. */
101 static VEC(bitmap
,heap
) *reg_copy_graph
;
103 struct target_lower_subreg default_target_lower_subreg
;
104 #if SWITCHABLE_TARGET
105 struct target_lower_subreg
*this_target_lower_subreg
106 = &default_target_lower_subreg
;
109 #define twice_word_mode \
110 this_target_lower_subreg->x_twice_word_mode
112 this_target_lower_subreg->x_choices
114 /* RTXes used while computing costs. */
116 /* Source and target registers. */
120 /* A twice_word_mode ZERO_EXTEND of SOURCE. */
123 /* A shift of SOURCE. */
126 /* A SET of TARGET. */
130 /* Return the cost of a CODE shift in mode MODE by OP1 bits, using the
131 rtxes in RTXES. SPEED_P selects between the speed and size cost. */
134 shift_cost (bool speed_p
, struct cost_rtxes
*rtxes
, enum rtx_code code
,
135 enum machine_mode mode
, int op1
)
137 PUT_CODE (rtxes
->shift
, code
);
138 PUT_MODE (rtxes
->shift
, mode
);
139 PUT_MODE (rtxes
->source
, mode
);
140 XEXP (rtxes
->shift
, 1) = GEN_INT (op1
);
141 return set_src_cost (rtxes
->shift
, speed_p
);
144 /* For each X in the range [0, BITS_PER_WORD), set SPLITTING[X]
145 to true if it is profitable to split a double-word CODE shift
146 of X + BITS_PER_WORD bits. SPEED_P says whether we are testing
147 for speed or size profitability.
149 Use the rtxes in RTXES to calculate costs. WORD_MOVE_ZERO_COST is
150 the cost of moving zero into a word-mode register. WORD_MOVE_COST
151 is the cost of moving between word registers. */
154 compute_splitting_shift (bool speed_p
, struct cost_rtxes
*rtxes
,
155 bool *splitting
, enum rtx_code code
,
156 int word_move_zero_cost
, int word_move_cost
)
158 int wide_cost
, narrow_cost
, i
;
160 for (i
= 0; i
< BITS_PER_WORD
; i
++)
162 wide_cost
= shift_cost (speed_p
, rtxes
, code
, twice_word_mode
,
165 narrow_cost
= word_move_cost
;
167 narrow_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
, i
);
170 fprintf (stderr
, "%s %s by %d: original cost %d, split cost %d + %d\n",
171 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
),
172 i
+ BITS_PER_WORD
, wide_cost
, narrow_cost
,
173 word_move_zero_cost
);
175 if (FORCE_LOWERING
|| wide_cost
>= narrow_cost
+ word_move_zero_cost
)
180 /* Compute what we should do when optimizing for speed or size; SPEED_P
181 selects which. Use RTXES for computing costs. */
184 compute_costs (bool speed_p
, struct cost_rtxes
*rtxes
)
187 int word_move_zero_cost
, word_move_cost
;
189 PUT_MODE (rtxes
->target
, word_mode
);
190 SET_SRC (rtxes
->set
) = CONST0_RTX (word_mode
);
191 word_move_zero_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
193 SET_SRC (rtxes
->set
) = rtxes
->source
;
194 word_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
197 fprintf (stderr
, "%s move: from zero cost %d, from reg cost %d\n",
198 GET_MODE_NAME (word_mode
), word_move_zero_cost
, word_move_cost
);
200 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
202 enum machine_mode mode
= (enum machine_mode
) i
;
203 int factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
208 PUT_MODE (rtxes
->target
, mode
);
209 PUT_MODE (rtxes
->source
, mode
);
210 mode_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
213 fprintf (stderr
, "%s move: original cost %d, split cost %d * %d\n",
214 GET_MODE_NAME (mode
), mode_move_cost
,
215 word_move_cost
, factor
);
217 if (FORCE_LOWERING
|| mode_move_cost
>= word_move_cost
* factor
)
219 choices
[speed_p
].move_modes_to_split
[i
] = true;
220 choices
[speed_p
].something_to_do
= true;
225 /* For the moves and shifts, the only case that is checked is one
226 where the mode of the target is an integer mode twice the width
229 If it is not profitable to split a double word move then do not
230 even consider the shifts or the zero extension. */
231 if (choices
[speed_p
].move_modes_to_split
[(int) twice_word_mode
])
235 /* The only case here to check to see if moving the upper part with a
236 zero is cheaper than doing the zext itself. */
237 PUT_MODE (rtxes
->source
, word_mode
);
238 zext_cost
= set_src_cost (rtxes
->zext
, speed_p
);
241 fprintf (stderr
, "%s %s: original cost %d, split cost %d + %d\n",
242 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (ZERO_EXTEND
),
243 zext_cost
, word_move_cost
, word_move_zero_cost
);
245 if (FORCE_LOWERING
|| zext_cost
>= word_move_cost
+ word_move_zero_cost
)
246 choices
[speed_p
].splitting_zext
= true;
248 compute_splitting_shift (speed_p
, rtxes
,
249 choices
[speed_p
].splitting_ashift
, ASHIFT
,
250 word_move_zero_cost
, word_move_cost
);
251 compute_splitting_shift (speed_p
, rtxes
,
252 choices
[speed_p
].splitting_lshiftrt
, LSHIFTRT
,
253 word_move_zero_cost
, word_move_cost
);
257 /* Do one-per-target initialisation. This involves determining
258 which operations on the machine are profitable. If none are found,
259 then the pass just returns when called. */
262 init_lower_subreg (void)
264 struct cost_rtxes rtxes
;
266 memset (this_target_lower_subreg
, 0, sizeof (*this_target_lower_subreg
));
268 twice_word_mode
= GET_MODE_2XWIDER_MODE (word_mode
);
270 rtxes
.target
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
);
271 rtxes
.source
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
+ 1);
272 rtxes
.set
= gen_rtx_SET (VOIDmode
, rtxes
.target
, rtxes
.source
);
273 rtxes
.zext
= gen_rtx_ZERO_EXTEND (twice_word_mode
, rtxes
.source
);
274 rtxes
.shift
= gen_rtx_ASHIFT (twice_word_mode
, rtxes
.source
, const0_rtx
);
277 fprintf (stderr
, "\nSize costs\n==========\n\n");
278 compute_costs (false, &rtxes
);
281 fprintf (stderr
, "\nSpeed costs\n===========\n\n");
282 compute_costs (true, &rtxes
);
286 simple_move_operand (rtx x
)
288 if (GET_CODE (x
) == SUBREG
)
294 if (GET_CODE (x
) == LABEL_REF
295 || GET_CODE (x
) == SYMBOL_REF
296 || GET_CODE (x
) == HIGH
297 || GET_CODE (x
) == CONST
)
301 && (MEM_VOLATILE_P (x
)
302 || mode_dependent_address_p (XEXP (x
, 0))))
308 /* If INSN is a single set between two objects that we want to split,
309 return the single set. SPEED_P says whether we are optimizing
310 INSN for speed or size.
312 INSN should have been passed to recog and extract_insn before this
316 simple_move (rtx insn
, bool speed_p
)
320 enum machine_mode mode
;
322 if (recog_data
.n_operands
!= 2)
325 set
= single_set (insn
);
330 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
332 if (!simple_move_operand (x
))
336 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
338 /* For the src we can handle ASM_OPERANDS, and it is beneficial for
339 things like x86 rdtsc which returns a DImode value. */
340 if (GET_CODE (x
) != ASM_OPERANDS
341 && !simple_move_operand (x
))
344 /* We try to decompose in integer modes, to avoid generating
345 inefficient code copying between integer and floating point
346 registers. That means that we can't decompose if this is a
347 non-integer mode for which there is no integer mode of the same
349 mode
= GET_MODE (SET_SRC (set
));
350 if (!SCALAR_INT_MODE_P (mode
)
351 && (mode_for_size (GET_MODE_SIZE (mode
) * BITS_PER_UNIT
, MODE_INT
, 0)
355 /* Reject PARTIAL_INT modes. They are used for processor specific
356 purposes and it's probably best not to tamper with them. */
357 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
360 if (!choices
[speed_p
].move_modes_to_split
[(int) mode
])
366 /* If SET is a copy from one multi-word pseudo-register to another,
367 record that in reg_copy_graph. Return whether it is such a
371 find_pseudo_copy (rtx set
)
373 rtx dest
= SET_DEST (set
);
374 rtx src
= SET_SRC (set
);
378 if (!REG_P (dest
) || !REG_P (src
))
383 if (HARD_REGISTER_NUM_P (rd
) || HARD_REGISTER_NUM_P (rs
))
386 b
= VEC_index (bitmap
, reg_copy_graph
, rs
);
389 b
= BITMAP_ALLOC (NULL
);
390 VEC_replace (bitmap
, reg_copy_graph
, rs
, b
);
393 bitmap_set_bit (b
, rd
);
398 /* Look through the registers in DECOMPOSABLE_CONTEXT. For each case
399 where they are copied to another register, add the register to
400 which they are copied to DECOMPOSABLE_CONTEXT. Use
401 NON_DECOMPOSABLE_CONTEXT to limit this--we don't bother to track
402 copies of registers which are in NON_DECOMPOSABLE_CONTEXT. */
405 propagate_pseudo_copies (void)
407 bitmap queue
, propagate
;
409 queue
= BITMAP_ALLOC (NULL
);
410 propagate
= BITMAP_ALLOC (NULL
);
412 bitmap_copy (queue
, decomposable_context
);
415 bitmap_iterator iter
;
418 bitmap_clear (propagate
);
420 EXECUTE_IF_SET_IN_BITMAP (queue
, 0, i
, iter
)
422 bitmap b
= VEC_index (bitmap
, reg_copy_graph
, i
);
424 bitmap_ior_and_compl_into (propagate
, b
, non_decomposable_context
);
427 bitmap_and_compl (queue
, propagate
, decomposable_context
);
428 bitmap_ior_into (decomposable_context
, propagate
);
430 while (!bitmap_empty_p (queue
));
433 BITMAP_FREE (propagate
);
436 /* A pointer to one of these values is passed to
437 find_decomposable_subregs via for_each_rtx. */
439 enum classify_move_insn
441 /* Not a simple move from one location to another. */
443 /* A simple move from one pseudo-register to another. */
444 SIMPLE_PSEUDO_REG_MOVE
,
445 /* A simple move involving a non-pseudo-register. */
449 /* This is called via for_each_rtx. If we find a SUBREG which we
450 could use to decompose a pseudo-register, set a bit in
451 DECOMPOSABLE_CONTEXT. If we find an unadorned register which is
452 not a simple pseudo-register copy, DATA will point at the type of
453 move, and we set a bit in DECOMPOSABLE_CONTEXT or
454 NON_DECOMPOSABLE_CONTEXT as appropriate. */
457 find_decomposable_subregs (rtx
*px
, void *data
)
459 enum classify_move_insn
*pcmi
= (enum classify_move_insn
*) data
;
465 if (GET_CODE (x
) == SUBREG
)
467 rtx inner
= SUBREG_REG (x
);
468 unsigned int regno
, outer_size
, inner_size
, outer_words
, inner_words
;
473 regno
= REGNO (inner
);
474 if (HARD_REGISTER_NUM_P (regno
))
477 outer_size
= GET_MODE_SIZE (GET_MODE (x
));
478 inner_size
= GET_MODE_SIZE (GET_MODE (inner
));
479 outer_words
= (outer_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
480 inner_words
= (inner_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
482 /* We only try to decompose single word subregs of multi-word
483 registers. When we find one, we return -1 to avoid iterating
484 over the inner register.
486 ??? This doesn't allow, e.g., DImode subregs of TImode values
487 on 32-bit targets. We would need to record the way the
488 pseudo-register was used, and only decompose if all the uses
489 were the same number and size of pieces. Hopefully this
490 doesn't happen much. */
492 if (outer_words
== 1 && inner_words
> 1)
494 bitmap_set_bit (decomposable_context
, regno
);
498 /* If this is a cast from one mode to another, where the modes
499 have the same size, and they are not tieable, then mark this
500 register as non-decomposable. If we decompose it we are
501 likely to mess up whatever the backend is trying to do. */
503 && outer_size
== inner_size
504 && !MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (inner
)))
506 bitmap_set_bit (non_decomposable_context
, regno
);
507 bitmap_set_bit (subreg_context
, regno
);
515 /* We will see an outer SUBREG before we see the inner REG, so
516 when we see a plain REG here it means a direct reference to
519 If this is not a simple copy from one location to another,
520 then we can not decompose this register. If this is a simple
521 copy from one pseudo-register to another, and the mode is right
522 then we mark the register as decomposable.
523 Otherwise we don't say anything about this register --
524 it could be decomposed, but whether that would be
525 profitable depends upon how it is used elsewhere.
527 We only set bits in the bitmap for multi-word
528 pseudo-registers, since those are the only ones we care about
529 and it keeps the size of the bitmaps down. */
532 if (!HARD_REGISTER_NUM_P (regno
)
533 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
537 case NOT_SIMPLE_MOVE
:
538 bitmap_set_bit (non_decomposable_context
, regno
);
540 case SIMPLE_PSEUDO_REG_MOVE
:
541 if (MODES_TIEABLE_P (GET_MODE (x
), word_mode
))
542 bitmap_set_bit (decomposable_context
, regno
);
553 enum classify_move_insn cmi_mem
= NOT_SIMPLE_MOVE
;
555 /* Any registers used in a MEM do not participate in a
556 SIMPLE_MOVE or SIMPLE_PSEUDO_REG_MOVE. Do our own recursion
557 here, and return -1 to block the parent's recursion. */
558 for_each_rtx (&XEXP (x
, 0), find_decomposable_subregs
, &cmi_mem
);
565 /* Decompose REGNO into word-sized components. We smash the REG node
566 in place. This ensures that (1) something goes wrong quickly if we
567 fail to make some replacement, and (2) the debug information inside
568 the symbol table is automatically kept up to date. */
571 decompose_register (unsigned int regno
)
574 unsigned int words
, i
;
577 reg
= regno_reg_rtx
[regno
];
579 regno_reg_rtx
[regno
] = NULL_RTX
;
581 words
= GET_MODE_SIZE (GET_MODE (reg
));
582 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
584 v
= rtvec_alloc (words
);
585 for (i
= 0; i
< words
; ++i
)
586 RTVEC_ELT (v
, i
) = gen_reg_rtx_offset (reg
, word_mode
, i
* UNITS_PER_WORD
);
588 PUT_CODE (reg
, CONCATN
);
593 fprintf (dump_file
, "; Splitting reg %u ->", regno
);
594 for (i
= 0; i
< words
; ++i
)
595 fprintf (dump_file
, " %u", REGNO (XVECEXP (reg
, 0, i
)));
596 fputc ('\n', dump_file
);
600 /* Get a SUBREG of a CONCATN. */
603 simplify_subreg_concatn (enum machine_mode outermode
, rtx op
,
606 unsigned int inner_size
;
607 enum machine_mode innermode
, partmode
;
609 unsigned int final_offset
;
611 gcc_assert (GET_CODE (op
) == CONCATN
);
612 gcc_assert (byte
% GET_MODE_SIZE (outermode
) == 0);
614 innermode
= GET_MODE (op
);
615 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
616 gcc_assert (GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (innermode
));
618 inner_size
= GET_MODE_SIZE (innermode
) / XVECLEN (op
, 0);
619 part
= XVECEXP (op
, 0, byte
/ inner_size
);
620 partmode
= GET_MODE (part
);
622 /* VECTOR_CSTs in debug expressions are expanded into CONCATN instead of
623 regular CONST_VECTORs. They have vector or integer modes, depending
624 on the capabilities of the target. Cope with them. */
625 if (partmode
== VOIDmode
&& VECTOR_MODE_P (innermode
))
626 partmode
= GET_MODE_INNER (innermode
);
627 else if (partmode
== VOIDmode
)
629 enum mode_class mclass
= GET_MODE_CLASS (innermode
);
630 partmode
= mode_for_size (inner_size
* BITS_PER_UNIT
, mclass
, 0);
633 final_offset
= byte
% inner_size
;
634 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
637 return simplify_gen_subreg (outermode
, part
, partmode
, final_offset
);
640 /* Wrapper around simplify_gen_subreg which handles CONCATN. */
643 simplify_gen_subreg_concatn (enum machine_mode outermode
, rtx op
,
644 enum machine_mode innermode
, unsigned int byte
)
648 /* We have to handle generating a SUBREG of a SUBREG of a CONCATN.
649 If OP is a SUBREG of a CONCATN, then it must be a simple mode
650 change with the same size and offset 0, or it must extract a
651 part. We shouldn't see anything else here. */
652 if (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == CONCATN
)
656 if ((GET_MODE_SIZE (GET_MODE (op
))
657 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
658 && SUBREG_BYTE (op
) == 0)
659 return simplify_gen_subreg_concatn (outermode
, SUBREG_REG (op
),
660 GET_MODE (SUBREG_REG (op
)), byte
);
662 op2
= simplify_subreg_concatn (GET_MODE (op
), SUBREG_REG (op
),
666 /* We don't handle paradoxical subregs here. */
667 gcc_assert (GET_MODE_SIZE (outermode
)
668 <= GET_MODE_SIZE (GET_MODE (op
)));
669 gcc_assert (GET_MODE_SIZE (GET_MODE (op
))
670 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))));
671 op2
= simplify_subreg_concatn (outermode
, SUBREG_REG (op
),
672 byte
+ SUBREG_BYTE (op
));
673 gcc_assert (op2
!= NULL_RTX
);
678 gcc_assert (op
!= NULL_RTX
);
679 gcc_assert (innermode
== GET_MODE (op
));
682 if (GET_CODE (op
) == CONCATN
)
683 return simplify_subreg_concatn (outermode
, op
, byte
);
685 ret
= simplify_gen_subreg (outermode
, op
, innermode
, byte
);
687 /* If we see an insn like (set (reg:DI) (subreg:DI (reg:SI) 0)) then
688 resolve_simple_move will ask for the high part of the paradoxical
689 subreg, which does not have a value. Just return a zero. */
691 && GET_CODE (op
) == SUBREG
692 && SUBREG_BYTE (op
) == 0
693 && (GET_MODE_SIZE (innermode
)
694 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
)))))
695 return CONST0_RTX (outermode
);
697 gcc_assert (ret
!= NULL_RTX
);
701 /* Return whether we should resolve X into the registers into which it
705 resolve_reg_p (rtx x
)
707 return GET_CODE (x
) == CONCATN
;
710 /* Return whether X is a SUBREG of a register which we need to
714 resolve_subreg_p (rtx x
)
716 if (GET_CODE (x
) != SUBREG
)
718 return resolve_reg_p (SUBREG_REG (x
));
721 /* This is called via for_each_rtx. Look for SUBREGs which need to be
725 resolve_subreg_use (rtx
*px
, void *data
)
727 rtx insn
= (rtx
) data
;
733 if (resolve_subreg_p (x
))
735 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
738 /* It is possible for a note to contain a reference which we can
739 decompose. In this case, return 1 to the caller to indicate
740 that the note must be removed. */
747 validate_change (insn
, px
, x
, 1);
751 if (resolve_reg_p (x
))
753 /* Return 1 to the caller to indicate that we found a direct
754 reference to a register which is being decomposed. This can
755 happen inside notes, multiword shift or zero-extend
763 /* This is called via for_each_rtx. Look for SUBREGs which can be
764 decomposed and decomposed REGs that need copying. */
767 adjust_decomposed_uses (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
774 if (resolve_subreg_p (x
))
776 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
785 if (resolve_reg_p (x
))
791 /* Resolve any decomposed registers which appear in register notes on
795 resolve_reg_notes (rtx insn
)
799 note
= find_reg_equal_equiv_note (insn
);
802 int old_count
= num_validated_changes ();
803 if (for_each_rtx (&XEXP (note
, 0), resolve_subreg_use
, NULL
))
804 remove_note (insn
, note
);
806 if (old_count
!= num_validated_changes ())
807 df_notes_rescan (insn
);
810 pnote
= ®_NOTES (insn
);
811 while (*pnote
!= NULL_RTX
)
816 switch (REG_NOTE_KIND (note
))
820 if (resolve_reg_p (XEXP (note
, 0)))
829 *pnote
= XEXP (note
, 1);
831 pnote
= &XEXP (note
, 1);
835 /* Return whether X can be decomposed into subwords. */
838 can_decompose_p (rtx x
)
842 unsigned int regno
= REGNO (x
);
844 if (HARD_REGISTER_NUM_P (regno
))
846 unsigned int byte
, num_bytes
;
848 num_bytes
= GET_MODE_SIZE (GET_MODE (x
));
849 for (byte
= 0; byte
< num_bytes
; byte
+= UNITS_PER_WORD
)
850 if (simplify_subreg_regno (regno
, GET_MODE (x
), byte
, word_mode
) < 0)
855 return !bitmap_bit_p (subreg_context
, regno
);
861 /* Decompose the registers used in a simple move SET within INSN. If
862 we don't change anything, return INSN, otherwise return the start
863 of the sequence of moves. */
866 resolve_simple_move (rtx set
, rtx insn
)
868 rtx src
, dest
, real_dest
, insns
;
869 enum machine_mode orig_mode
, dest_mode
;
874 dest
= SET_DEST (set
);
875 orig_mode
= GET_MODE (dest
);
877 words
= (GET_MODE_SIZE (orig_mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
878 gcc_assert (words
> 1);
882 /* We have to handle copying from a SUBREG of a decomposed reg where
883 the SUBREG is larger than word size. Rather than assume that we
884 can take a word_mode SUBREG of the destination, we copy to a new
885 register and then copy that to the destination. */
887 real_dest
= NULL_RTX
;
889 if (GET_CODE (src
) == SUBREG
890 && resolve_reg_p (SUBREG_REG (src
))
891 && (SUBREG_BYTE (src
) != 0
892 || (GET_MODE_SIZE (orig_mode
)
893 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))))
896 dest
= gen_reg_rtx (orig_mode
);
897 if (REG_P (real_dest
))
898 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
901 /* Similarly if we are copying to a SUBREG of a decomposed reg where
902 the SUBREG is larger than word size. */
904 if (GET_CODE (dest
) == SUBREG
905 && resolve_reg_p (SUBREG_REG (dest
))
906 && (SUBREG_BYTE (dest
) != 0
907 || (GET_MODE_SIZE (orig_mode
)
908 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
))))))
910 rtx reg
, minsn
, smove
;
912 reg
= gen_reg_rtx (orig_mode
);
913 minsn
= emit_move_insn (reg
, src
);
914 smove
= single_set (minsn
);
915 gcc_assert (smove
!= NULL_RTX
);
916 resolve_simple_move (smove
, minsn
);
920 /* If we didn't have any big SUBREGS of decomposed registers, and
921 neither side of the move is a register we are decomposing, then
922 we don't have to do anything here. */
924 if (src
== SET_SRC (set
)
925 && dest
== SET_DEST (set
)
926 && !resolve_reg_p (src
)
927 && !resolve_subreg_p (src
)
928 && !resolve_reg_p (dest
)
929 && !resolve_subreg_p (dest
))
935 /* It's possible for the code to use a subreg of a decomposed
936 register while forming an address. We need to handle that before
937 passing the address to emit_move_insn. We pass NULL_RTX as the
938 insn parameter to resolve_subreg_use because we can not validate
940 if (MEM_P (src
) || MEM_P (dest
))
945 for_each_rtx (&XEXP (src
, 0), resolve_subreg_use
, NULL_RTX
);
947 for_each_rtx (&XEXP (dest
, 0), resolve_subreg_use
, NULL_RTX
);
948 acg
= apply_change_group ();
952 /* If SRC is a register which we can't decompose, or has side
953 effects, we need to move via a temporary register. */
955 if (!can_decompose_p (src
)
956 || side_effects_p (src
)
957 || GET_CODE (src
) == ASM_OPERANDS
)
961 reg
= gen_reg_rtx (orig_mode
);
962 emit_move_insn (reg
, src
);
966 /* If DEST is a register which we can't decompose, or has side
967 effects, we need to first move to a temporary register. We
968 handle the common case of pushing an operand directly. We also
969 go through a temporary register if it holds a floating point
970 value. This gives us better code on systems which can't move
971 data easily between integer and floating point registers. */
973 dest_mode
= orig_mode
;
974 pushing
= push_operand (dest
, dest_mode
);
975 if (!can_decompose_p (dest
)
976 || (side_effects_p (dest
) && !pushing
)
977 || (!SCALAR_INT_MODE_P (dest_mode
)
978 && !resolve_reg_p (dest
)
979 && !resolve_subreg_p (dest
)))
981 if (real_dest
== NULL_RTX
)
983 if (!SCALAR_INT_MODE_P (dest_mode
))
985 dest_mode
= mode_for_size (GET_MODE_SIZE (dest_mode
) * BITS_PER_UNIT
,
987 gcc_assert (dest_mode
!= BLKmode
);
989 dest
= gen_reg_rtx (dest_mode
);
990 if (REG_P (real_dest
))
991 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
996 unsigned int i
, j
, jinc
;
998 gcc_assert (GET_MODE_SIZE (orig_mode
) % UNITS_PER_WORD
== 0);
999 gcc_assert (GET_CODE (XEXP (dest
, 0)) != PRE_MODIFY
);
1000 gcc_assert (GET_CODE (XEXP (dest
, 0)) != POST_MODIFY
);
1002 if (WORDS_BIG_ENDIAN
== STACK_GROWS_DOWNWARD
)
1013 for (i
= 0; i
< words
; ++i
, j
+= jinc
)
1017 temp
= copy_rtx (XEXP (dest
, 0));
1018 temp
= adjust_automodify_address_nv (dest
, word_mode
, temp
,
1019 j
* UNITS_PER_WORD
);
1020 emit_move_insn (temp
,
1021 simplify_gen_subreg_concatn (word_mode
, src
,
1023 j
* UNITS_PER_WORD
));
1030 if (REG_P (dest
) && !HARD_REGISTER_NUM_P (REGNO (dest
)))
1031 emit_clobber (dest
);
1033 for (i
= 0; i
< words
; ++i
)
1034 emit_move_insn (simplify_gen_subreg_concatn (word_mode
, dest
,
1036 i
* UNITS_PER_WORD
),
1037 simplify_gen_subreg_concatn (word_mode
, src
,
1039 i
* UNITS_PER_WORD
));
1042 if (real_dest
!= NULL_RTX
)
1044 rtx mdest
, minsn
, smove
;
1046 if (dest_mode
== orig_mode
)
1049 mdest
= simplify_gen_subreg (orig_mode
, dest
, GET_MODE (dest
), 0);
1050 minsn
= emit_move_insn (real_dest
, mdest
);
1052 smove
= single_set (minsn
);
1053 gcc_assert (smove
!= NULL_RTX
);
1055 resolve_simple_move (smove
, minsn
);
1058 insns
= get_insns ();
1061 copy_reg_eh_region_note_forward (insn
, insns
, NULL_RTX
);
1063 emit_insn_before (insns
, insn
);
1070 /* Change a CLOBBER of a decomposed register into a CLOBBER of the
1071 component registers. Return whether we changed something. */
1074 resolve_clobber (rtx pat
, rtx insn
)
1077 enum machine_mode orig_mode
;
1078 unsigned int words
, i
;
1081 reg
= XEXP (pat
, 0);
1082 if (!resolve_reg_p (reg
) && !resolve_subreg_p (reg
))
1085 orig_mode
= GET_MODE (reg
);
1086 words
= GET_MODE_SIZE (orig_mode
);
1087 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
1089 ret
= validate_change (NULL_RTX
, &XEXP (pat
, 0),
1090 simplify_gen_subreg_concatn (word_mode
, reg
,
1093 df_insn_rescan (insn
);
1094 gcc_assert (ret
!= 0);
1096 for (i
= words
- 1; i
> 0; --i
)
1100 x
= simplify_gen_subreg_concatn (word_mode
, reg
, orig_mode
,
1101 i
* UNITS_PER_WORD
);
1102 x
= gen_rtx_CLOBBER (VOIDmode
, x
);
1103 emit_insn_after (x
, insn
);
1106 resolve_reg_notes (insn
);
1111 /* A USE of a decomposed register is no longer meaningful. Return
1112 whether we changed something. */
1115 resolve_use (rtx pat
, rtx insn
)
1117 if (resolve_reg_p (XEXP (pat
, 0)) || resolve_subreg_p (XEXP (pat
, 0)))
1123 resolve_reg_notes (insn
);
1128 /* A VAR_LOCATION can be simplified. */
1131 resolve_debug (rtx insn
)
1133 for_each_rtx (&PATTERN (insn
), adjust_decomposed_uses
, NULL_RTX
);
1135 df_insn_rescan (insn
);
1137 resolve_reg_notes (insn
);
1140 /* Check if INSN is a decomposable multiword-shift or zero-extend and
1141 set the decomposable_context bitmap accordingly. SPEED_P is true
1142 if we are optimizing INSN for speed rather than size. Return true
1143 if INSN is decomposable. */
1146 find_decomposable_shift_zext (rtx insn
, bool speed_p
)
1152 set
= single_set (insn
);
1157 if (GET_CODE (op
) != ASHIFT
1158 && GET_CODE (op
) != LSHIFTRT
1159 && GET_CODE (op
) != ZERO_EXTEND
)
1162 op_operand
= XEXP (op
, 0);
1163 if (!REG_P (SET_DEST (set
)) || !REG_P (op_operand
)
1164 || HARD_REGISTER_NUM_P (REGNO (SET_DEST (set
)))
1165 || HARD_REGISTER_NUM_P (REGNO (op_operand
))
1166 || GET_MODE (op
) != twice_word_mode
)
1169 if (GET_CODE (op
) == ZERO_EXTEND
)
1171 if (GET_MODE (op_operand
) != word_mode
1172 || !choices
[speed_p
].splitting_zext
)
1175 else /* left or right shift */
1177 bool *splitting
= (GET_CODE (op
) == ASHIFT
1178 ? choices
[speed_p
].splitting_ashift
1179 : choices
[speed_p
].splitting_lshiftrt
);
1180 if (!CONST_INT_P (XEXP (op
, 1))
1181 || !IN_RANGE (INTVAL (XEXP (op
, 1)), BITS_PER_WORD
,
1182 2 * BITS_PER_WORD
- 1)
1183 || !splitting
[INTVAL (XEXP (op
, 1)) - BITS_PER_WORD
])
1186 bitmap_set_bit (decomposable_context
, REGNO (op_operand
));
1189 bitmap_set_bit (decomposable_context
, REGNO (SET_DEST (set
)));
1194 /* Decompose a more than word wide shift (in INSN) of a multiword
1195 pseudo or a multiword zero-extend of a wordmode pseudo into a move
1196 and 'set to zero' insn. Return a pointer to the new insn when a
1197 replacement was done. */
1200 resolve_shift_zext (rtx insn
)
1206 rtx src_reg
, dest_reg
, dest_zero
;
1207 int src_reg_num
, dest_reg_num
, offset1
, offset2
, src_offset
;
1209 set
= single_set (insn
);
1214 if (GET_CODE (op
) != ASHIFT
1215 && GET_CODE (op
) != LSHIFTRT
1216 && GET_CODE (op
) != ZERO_EXTEND
)
1219 op_operand
= XEXP (op
, 0);
1221 /* We can tear this operation apart only if the regs were already
1223 if (!resolve_reg_p (SET_DEST (set
)) && !resolve_reg_p (op_operand
))
1226 /* src_reg_num is the number of the word mode register which we
1227 are operating on. For a left shift and a zero_extend on little
1228 endian machines this is register 0. */
1229 src_reg_num
= GET_CODE (op
) == LSHIFTRT
? 1 : 0;
1231 if (WORDS_BIG_ENDIAN
1232 && GET_MODE_SIZE (GET_MODE (op_operand
)) > UNITS_PER_WORD
)
1233 src_reg_num
= 1 - src_reg_num
;
1235 if (GET_CODE (op
) == ZERO_EXTEND
)
1236 dest_reg_num
= WORDS_BIG_ENDIAN
? 1 : 0;
1238 dest_reg_num
= 1 - src_reg_num
;
1240 offset1
= UNITS_PER_WORD
* dest_reg_num
;
1241 offset2
= UNITS_PER_WORD
* (1 - dest_reg_num
);
1242 src_offset
= UNITS_PER_WORD
* src_reg_num
;
1246 dest_reg
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1247 GET_MODE (SET_DEST (set
)),
1249 dest_zero
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1250 GET_MODE (SET_DEST (set
)),
1252 src_reg
= simplify_gen_subreg_concatn (word_mode
, op_operand
,
1253 GET_MODE (op_operand
),
1255 if (GET_CODE (op
) != ZERO_EXTEND
)
1257 int shift_count
= INTVAL (XEXP (op
, 1));
1258 if (shift_count
> BITS_PER_WORD
)
1259 src_reg
= expand_shift (GET_CODE (op
) == ASHIFT
?
1260 LSHIFT_EXPR
: RSHIFT_EXPR
,
1262 shift_count
- BITS_PER_WORD
,
1266 if (dest_reg
!= src_reg
)
1267 emit_move_insn (dest_reg
, src_reg
);
1268 emit_move_insn (dest_zero
, CONST0_RTX (word_mode
));
1269 insns
= get_insns ();
1273 emit_insn_before (insns
, insn
);
1278 fprintf (dump_file
, "; Replacing insn: %d with insns: ", INSN_UID (insn
));
1279 for (in
= insns
; in
!= insn
; in
= NEXT_INSN (in
))
1280 fprintf (dump_file
, "%d ", INSN_UID (in
));
1281 fprintf (dump_file
, "\n");
1288 /* Print to dump_file a description of what we're doing with shift code CODE.
1289 SPLITTING[X] is true if we are splitting shifts by X + BITS_PER_WORD. */
1292 dump_shift_choices (enum rtx_code code
, bool *splitting
)
1298 " Splitting mode %s for %s lowering with shift amounts = ",
1299 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
));
1301 for (i
= 0; i
< BITS_PER_WORD
; i
++)
1304 fprintf (dump_file
, "%s%d", sep
, i
+ BITS_PER_WORD
);
1307 fprintf (dump_file
, "\n");
1310 /* Print to dump_file a description of what we're doing when optimizing
1311 for speed or size; SPEED_P says which. DESCRIPTION is a description
1312 of the SPEED_P choice. */
1315 dump_choices (bool speed_p
, const char *description
)
1319 fprintf (dump_file
, "Choices when optimizing for %s:\n", description
);
1321 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
1322 if (GET_MODE_SIZE (i
) > UNITS_PER_WORD
)
1323 fprintf (dump_file
, " %s mode %s for copy lowering.\n",
1324 choices
[speed_p
].move_modes_to_split
[i
]
1327 GET_MODE_NAME ((enum machine_mode
) i
));
1329 fprintf (dump_file
, " %s mode %s for zero_extend lowering.\n",
1330 choices
[speed_p
].splitting_zext
? "Splitting" : "Skipping",
1331 GET_MODE_NAME (twice_word_mode
));
1333 dump_shift_choices (ASHIFT
, choices
[speed_p
].splitting_ashift
);
1334 dump_shift_choices (LSHIFTRT
, choices
[speed_p
].splitting_ashift
);
1335 fprintf (dump_file
, "\n");
1338 /* Look for registers which are always accessed via word-sized SUBREGs
1339 or via copies. Decompose these registers into several word-sized
1340 pseudo-registers. */
1343 decompose_multiword_subregs (void)
1351 dump_choices (false, "size");
1352 dump_choices (true, "speed");
1355 /* Check if this target even has any modes to consider lowering. */
1356 if (!choices
[false].something_to_do
&& !choices
[true].something_to_do
)
1359 fprintf (dump_file
, "Nothing to do!\n");
1363 max
= max_reg_num ();
1365 /* First see if there are any multi-word pseudo-registers. If there
1366 aren't, there is nothing we can do. This should speed up this
1367 pass in the normal case, since it should be faster than scanning
1371 bool useful_modes_seen
= false;
1373 for (i
= FIRST_PSEUDO_REGISTER
; i
< max
; ++i
)
1374 if (regno_reg_rtx
[i
] != NULL
)
1376 enum machine_mode mode
= GET_MODE (regno_reg_rtx
[i
]);
1377 if (choices
[false].move_modes_to_split
[(int) mode
]
1378 || choices
[true].move_modes_to_split
[(int) mode
])
1380 useful_modes_seen
= true;
1385 if (!useful_modes_seen
)
1388 fprintf (dump_file
, "Nothing to lower in this function.\n");
1395 df_set_flags (DF_DEFER_INSN_RESCAN
);
1399 /* FIXME: It may be possible to change this code to look for each
1400 multi-word pseudo-register and to find each insn which sets or
1401 uses that register. That should be faster than scanning all the
1404 decomposable_context
= BITMAP_ALLOC (NULL
);
1405 non_decomposable_context
= BITMAP_ALLOC (NULL
);
1406 subreg_context
= BITMAP_ALLOC (NULL
);
1408 reg_copy_graph
= VEC_alloc (bitmap
, heap
, max
);
1409 VEC_safe_grow (bitmap
, heap
, reg_copy_graph
, max
);
1410 memset (VEC_address (bitmap
, reg_copy_graph
), 0, sizeof (bitmap
) * max
);
1412 speed_p
= optimize_function_for_speed_p (cfun
);
1417 FOR_BB_INSNS (bb
, insn
)
1420 enum classify_move_insn cmi
;
1424 || GET_CODE (PATTERN (insn
)) == CLOBBER
1425 || GET_CODE (PATTERN (insn
)) == USE
)
1428 recog_memoized (insn
);
1430 if (find_decomposable_shift_zext (insn
, speed_p
))
1433 extract_insn (insn
);
1435 set
= simple_move (insn
, speed_p
);
1438 cmi
= NOT_SIMPLE_MOVE
;
1441 if (find_pseudo_copy (set
))
1442 cmi
= SIMPLE_PSEUDO_REG_MOVE
;
1447 n
= recog_data
.n_operands
;
1448 for (i
= 0; i
< n
; ++i
)
1450 for_each_rtx (&recog_data
.operand
[i
],
1451 find_decomposable_subregs
,
1454 /* We handle ASM_OPERANDS as a special case to support
1455 things like x86 rdtsc which returns a DImode value.
1456 We can decompose the output, which will certainly be
1457 operand 0, but not the inputs. */
1459 if (cmi
== SIMPLE_MOVE
1460 && GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1462 gcc_assert (i
== 0);
1463 cmi
= NOT_SIMPLE_MOVE
;
1469 bitmap_and_compl_into (decomposable_context
, non_decomposable_context
);
1470 if (!bitmap_empty_p (decomposable_context
))
1474 sbitmap_iterator sbi
;
1475 bitmap_iterator iter
;
1478 propagate_pseudo_copies ();
1480 sub_blocks
= sbitmap_alloc (last_basic_block
);
1481 sbitmap_zero (sub_blocks
);
1483 EXECUTE_IF_SET_IN_BITMAP (decomposable_context
, 0, regno
, iter
)
1484 decompose_register (regno
);
1490 FOR_BB_INSNS (bb
, insn
)
1497 pat
= PATTERN (insn
);
1498 if (GET_CODE (pat
) == CLOBBER
)
1499 resolve_clobber (pat
, insn
);
1500 else if (GET_CODE (pat
) == USE
)
1501 resolve_use (pat
, insn
);
1502 else if (DEBUG_INSN_P (insn
))
1503 resolve_debug (insn
);
1509 recog_memoized (insn
);
1510 extract_insn (insn
);
1512 set
= simple_move (insn
, speed_p
);
1515 rtx orig_insn
= insn
;
1516 bool cfi
= control_flow_insn_p (insn
);
1518 /* We can end up splitting loads to multi-word pseudos
1519 into separate loads to machine word size pseudos.
1520 When this happens, we first had one load that can
1521 throw, and after resolve_simple_move we'll have a
1522 bunch of loads (at least two). All those loads may
1523 trap if we can have non-call exceptions, so they
1524 all will end the current basic block. We split the
1525 block after the outer loop over all insns, but we
1526 make sure here that we will be able to split the
1527 basic block and still produce the correct control
1528 flow graph for it. */
1530 || (cfun
->can_throw_non_call_exceptions
1531 && can_throw_internal (insn
)));
1533 insn
= resolve_simple_move (set
, insn
);
1534 if (insn
!= orig_insn
)
1536 recog_memoized (insn
);
1537 extract_insn (insn
);
1540 SET_BIT (sub_blocks
, bb
->index
);
1545 rtx decomposed_shift
;
1547 decomposed_shift
= resolve_shift_zext (insn
);
1548 if (decomposed_shift
!= NULL_RTX
)
1550 insn
= decomposed_shift
;
1551 recog_memoized (insn
);
1552 extract_insn (insn
);
1556 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
1557 for_each_rtx (recog_data
.operand_loc
[i
],
1561 resolve_reg_notes (insn
);
1563 if (num_validated_changes () > 0)
1565 for (i
= recog_data
.n_dups
- 1; i
>= 0; --i
)
1567 rtx
*pl
= recog_data
.dup_loc
[i
];
1568 int dup_num
= recog_data
.dup_num
[i
];
1569 rtx
*px
= recog_data
.operand_loc
[dup_num
];
1571 validate_unshare_change (insn
, pl
, *px
, 1);
1574 i
= apply_change_group ();
1581 /* If we had insns to split that caused control flow insns in the middle
1582 of a basic block, split those blocks now. Note that we only handle
1583 the case where splitting a load has caused multiple possibly trapping
1585 EXECUTE_IF_SET_IN_SBITMAP (sub_blocks
, 0, i
, sbi
)
1590 bb
= BASIC_BLOCK (i
);
1591 insn
= BB_HEAD (bb
);
1596 if (control_flow_insn_p (insn
))
1598 /* Split the block after insn. There will be a fallthru
1599 edge, which is OK so we keep it. We have to create the
1600 exception edges ourselves. */
1601 fallthru
= split_block (bb
, insn
);
1602 rtl_make_eh_edge (NULL
, bb
, BB_END (bb
));
1603 bb
= fallthru
->dest
;
1604 insn
= BB_HEAD (bb
);
1607 insn
= NEXT_INSN (insn
);
1611 sbitmap_free (sub_blocks
);
1618 FOR_EACH_VEC_ELT (bitmap
, reg_copy_graph
, i
, b
)
1623 VEC_free (bitmap
, heap
, reg_copy_graph
);
1625 BITMAP_FREE (decomposable_context
);
1626 BITMAP_FREE (non_decomposable_context
);
1627 BITMAP_FREE (subreg_context
);
1630 /* Gate function for lower subreg pass. */
1633 gate_handle_lower_subreg (void)
1635 return flag_split_wide_types
!= 0;
1638 /* Implement first lower subreg pass. */
1641 rest_of_handle_lower_subreg (void)
1643 decompose_multiword_subregs ();
1647 /* Implement second lower subreg pass. */
1650 rest_of_handle_lower_subreg2 (void)
1652 decompose_multiword_subregs ();
1656 struct rtl_opt_pass pass_lower_subreg
=
1660 "subreg1", /* name */
1661 gate_handle_lower_subreg
, /* gate */
1662 rest_of_handle_lower_subreg
, /* execute */
1665 0, /* static_pass_number */
1666 TV_LOWER_SUBREG
, /* tv_id */
1667 0, /* properties_required */
1668 0, /* properties_provided */
1669 0, /* properties_destroyed */
1670 0, /* todo_flags_start */
1672 TODO_verify_flow
/* todo_flags_finish */
1676 struct rtl_opt_pass pass_lower_subreg2
=
1680 "subreg2", /* name */
1681 gate_handle_lower_subreg
, /* gate */
1682 rest_of_handle_lower_subreg2
, /* execute */
1685 0, /* static_pass_number */
1686 TV_LOWER_SUBREG
, /* tv_id */
1687 0, /* properties_required */
1688 0, /* properties_provided */
1689 0, /* properties_destroyed */
1690 0, /* todo_flags_start */
1691 TODO_df_finish
| TODO_verify_rtl_sharing
|
1693 TODO_verify_flow
/* todo_flags_finish */