1 /* Decompose multiword subregs.
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 Ian Lance Taylor <iant@google.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "insn-config.h"
33 #include "basic-block.h"
40 #include "tree-pass.h"
42 #include "lower-subreg.h"
44 #ifdef STACK_GROWS_DOWNWARD
45 # undef STACK_GROWS_DOWNWARD
46 # define STACK_GROWS_DOWNWARD 1
48 # define STACK_GROWS_DOWNWARD 0
52 /* Decompose multi-word pseudo-registers into individual
53 pseudo-registers when possible and profitable. This is possible
54 when all the uses of a multi-word register are via SUBREG, or are
55 copies of the register to another location. Breaking apart the
56 register permits more CSE and permits better register allocation.
57 This is profitable if the machine does not have move instructions
60 This pass only splits moves with modes that are wider than
61 word_mode and ASHIFTs, LSHIFTRTs, ASHIFTRTs and ZERO_EXTENDs with
62 integer modes that are twice the width of word_mode. The latter
63 could be generalized if there was a need to do this, but the trend in
64 architectures is to not need this.
66 There are two useful preprocessor defines for use by maintainers:
70 if you wish to see the actual cost estimates that are being used
71 for each mode wider than word mode and the cost estimates for zero
72 extension and the shifts. This can be useful when port maintainers
73 are tuning insn rtx costs.
75 #define FORCE_LOWERING 1
77 if you wish to test the pass with all the transformation forced on.
78 This can be useful for finding bugs in the transformations. */
81 #define FORCE_LOWERING 0
83 /* Bit N in this bitmap is set if regno N is used in a context in
84 which we can decompose it. */
85 static bitmap decomposable_context
;
87 /* Bit N in this bitmap is set if regno N is used in a context in
88 which it can not be decomposed. */
89 static bitmap non_decomposable_context
;
91 /* Bit N in this bitmap is set if regno N is used in a subreg
92 which changes the mode but not the size. This typically happens
93 when the register accessed as a floating-point value; we want to
94 avoid generating accesses to its subwords in integer modes. */
95 static bitmap subreg_context
;
97 /* Bit N in the bitmap in element M of this array is set if there is a
98 copy from reg M to reg N. */
99 static vec
<bitmap
> reg_copy_graph
;
101 struct target_lower_subreg default_target_lower_subreg
;
102 #if SWITCHABLE_TARGET
103 struct target_lower_subreg
*this_target_lower_subreg
104 = &default_target_lower_subreg
;
107 #define twice_word_mode \
108 this_target_lower_subreg->x_twice_word_mode
110 this_target_lower_subreg->x_choices
112 /* RTXes used while computing costs. */
114 /* Source and target registers. */
118 /* A twice_word_mode ZERO_EXTEND of SOURCE. */
121 /* A shift of SOURCE. */
124 /* A SET of TARGET. */
128 /* Return the cost of a CODE shift in mode MODE by OP1 bits, using the
129 rtxes in RTXES. SPEED_P selects between the speed and size cost. */
132 shift_cost (bool speed_p
, struct cost_rtxes
*rtxes
, enum rtx_code code
,
133 enum machine_mode mode
, int op1
)
135 PUT_CODE (rtxes
->shift
, code
);
136 PUT_MODE (rtxes
->shift
, mode
);
137 PUT_MODE (rtxes
->source
, mode
);
138 XEXP (rtxes
->shift
, 1) = GEN_INT (op1
);
139 return set_src_cost (rtxes
->shift
, speed_p
);
142 /* For each X in the range [0, BITS_PER_WORD), set SPLITTING[X]
143 to true if it is profitable to split a double-word CODE shift
144 of X + BITS_PER_WORD bits. SPEED_P says whether we are testing
145 for speed or size profitability.
147 Use the rtxes in RTXES to calculate costs. WORD_MOVE_ZERO_COST is
148 the cost of moving zero into a word-mode register. WORD_MOVE_COST
149 is the cost of moving between word registers. */
152 compute_splitting_shift (bool speed_p
, struct cost_rtxes
*rtxes
,
153 bool *splitting
, enum rtx_code code
,
154 int word_move_zero_cost
, int word_move_cost
)
156 int wide_cost
, narrow_cost
, upper_cost
, i
;
158 for (i
= 0; i
< BITS_PER_WORD
; i
++)
160 wide_cost
= shift_cost (speed_p
, rtxes
, code
, twice_word_mode
,
163 narrow_cost
= word_move_cost
;
165 narrow_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
, i
);
167 if (code
!= ASHIFTRT
)
168 upper_cost
= word_move_zero_cost
;
169 else if (i
== BITS_PER_WORD
- 1)
170 upper_cost
= word_move_cost
;
172 upper_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
,
176 fprintf (stderr
, "%s %s by %d: original cost %d, split cost %d + %d\n",
177 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
),
178 i
+ BITS_PER_WORD
, wide_cost
, narrow_cost
, upper_cost
);
180 if (FORCE_LOWERING
|| wide_cost
>= narrow_cost
+ upper_cost
)
185 /* Compute what we should do when optimizing for speed or size; SPEED_P
186 selects which. Use RTXES for computing costs. */
189 compute_costs (bool speed_p
, struct cost_rtxes
*rtxes
)
192 int word_move_zero_cost
, word_move_cost
;
194 PUT_MODE (rtxes
->target
, word_mode
);
195 SET_SRC (rtxes
->set
) = CONST0_RTX (word_mode
);
196 word_move_zero_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
198 SET_SRC (rtxes
->set
) = rtxes
->source
;
199 word_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
202 fprintf (stderr
, "%s move: from zero cost %d, from reg cost %d\n",
203 GET_MODE_NAME (word_mode
), word_move_zero_cost
, word_move_cost
);
205 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
207 enum machine_mode mode
= (enum machine_mode
) i
;
208 int factor
= GET_MODE_SIZE (mode
) / UNITS_PER_WORD
;
213 PUT_MODE (rtxes
->target
, mode
);
214 PUT_MODE (rtxes
->source
, mode
);
215 mode_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
218 fprintf (stderr
, "%s move: original cost %d, split cost %d * %d\n",
219 GET_MODE_NAME (mode
), mode_move_cost
,
220 word_move_cost
, factor
);
222 if (FORCE_LOWERING
|| mode_move_cost
>= word_move_cost
* factor
)
224 choices
[speed_p
].move_modes_to_split
[i
] = true;
225 choices
[speed_p
].something_to_do
= true;
230 /* For the moves and shifts, the only case that is checked is one
231 where the mode of the target is an integer mode twice the width
234 If it is not profitable to split a double word move then do not
235 even consider the shifts or the zero extension. */
236 if (choices
[speed_p
].move_modes_to_split
[(int) twice_word_mode
])
240 /* The only case here to check to see if moving the upper part with a
241 zero is cheaper than doing the zext itself. */
242 PUT_MODE (rtxes
->source
, word_mode
);
243 zext_cost
= set_src_cost (rtxes
->zext
, speed_p
);
246 fprintf (stderr
, "%s %s: original cost %d, split cost %d + %d\n",
247 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (ZERO_EXTEND
),
248 zext_cost
, word_move_cost
, word_move_zero_cost
);
250 if (FORCE_LOWERING
|| zext_cost
>= word_move_cost
+ word_move_zero_cost
)
251 choices
[speed_p
].splitting_zext
= true;
253 compute_splitting_shift (speed_p
, rtxes
,
254 choices
[speed_p
].splitting_ashift
, ASHIFT
,
255 word_move_zero_cost
, word_move_cost
);
256 compute_splitting_shift (speed_p
, rtxes
,
257 choices
[speed_p
].splitting_lshiftrt
, LSHIFTRT
,
258 word_move_zero_cost
, word_move_cost
);
259 compute_splitting_shift (speed_p
, rtxes
,
260 choices
[speed_p
].splitting_ashiftrt
, ASHIFTRT
,
261 word_move_zero_cost
, word_move_cost
);
265 /* Do one-per-target initialisation. This involves determining
266 which operations on the machine are profitable. If none are found,
267 then the pass just returns when called. */
270 init_lower_subreg (void)
272 struct cost_rtxes rtxes
;
274 memset (this_target_lower_subreg
, 0, sizeof (*this_target_lower_subreg
));
276 twice_word_mode
= GET_MODE_2XWIDER_MODE (word_mode
);
278 rtxes
.target
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
);
279 rtxes
.source
= gen_rtx_REG (word_mode
, FIRST_PSEUDO_REGISTER
+ 1);
280 rtxes
.set
= gen_rtx_SET (VOIDmode
, rtxes
.target
, rtxes
.source
);
281 rtxes
.zext
= gen_rtx_ZERO_EXTEND (twice_word_mode
, rtxes
.source
);
282 rtxes
.shift
= gen_rtx_ASHIFT (twice_word_mode
, rtxes
.source
, const0_rtx
);
285 fprintf (stderr
, "\nSize costs\n==========\n\n");
286 compute_costs (false, &rtxes
);
289 fprintf (stderr
, "\nSpeed costs\n===========\n\n");
290 compute_costs (true, &rtxes
);
294 simple_move_operand (rtx x
)
296 if (GET_CODE (x
) == SUBREG
)
302 if (GET_CODE (x
) == LABEL_REF
303 || GET_CODE (x
) == SYMBOL_REF
304 || GET_CODE (x
) == HIGH
305 || GET_CODE (x
) == CONST
)
309 && (MEM_VOLATILE_P (x
)
310 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
))))
316 /* If INSN is a single set between two objects that we want to split,
317 return the single set. SPEED_P says whether we are optimizing
318 INSN for speed or size.
320 INSN should have been passed to recog and extract_insn before this
324 simple_move (rtx insn
, bool speed_p
)
328 enum machine_mode mode
;
330 if (recog_data
.n_operands
!= 2)
333 set
= single_set (insn
);
338 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
340 if (!simple_move_operand (x
))
344 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
346 /* For the src we can handle ASM_OPERANDS, and it is beneficial for
347 things like x86 rdtsc which returns a DImode value. */
348 if (GET_CODE (x
) != ASM_OPERANDS
349 && !simple_move_operand (x
))
352 /* We try to decompose in integer modes, to avoid generating
353 inefficient code copying between integer and floating point
354 registers. That means that we can't decompose if this is a
355 non-integer mode for which there is no integer mode of the same
357 mode
= GET_MODE (SET_DEST (set
));
358 if (!SCALAR_INT_MODE_P (mode
)
359 && (mode_for_size (GET_MODE_SIZE (mode
) * BITS_PER_UNIT
, MODE_INT
, 0)
363 /* Reject PARTIAL_INT modes. They are used for processor specific
364 purposes and it's probably best not to tamper with them. */
365 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
368 if (!choices
[speed_p
].move_modes_to_split
[(int) mode
])
374 /* If SET is a copy from one multi-word pseudo-register to another,
375 record that in reg_copy_graph. Return whether it is such a
379 find_pseudo_copy (rtx set
)
381 rtx dest
= SET_DEST (set
);
382 rtx src
= SET_SRC (set
);
386 if (!REG_P (dest
) || !REG_P (src
))
391 if (HARD_REGISTER_NUM_P (rd
) || HARD_REGISTER_NUM_P (rs
))
394 b
= reg_copy_graph
[rs
];
397 b
= BITMAP_ALLOC (NULL
);
398 reg_copy_graph
[rs
] = b
;
401 bitmap_set_bit (b
, rd
);
406 /* Look through the registers in DECOMPOSABLE_CONTEXT. For each case
407 where they are copied to another register, add the register to
408 which they are copied to DECOMPOSABLE_CONTEXT. Use
409 NON_DECOMPOSABLE_CONTEXT to limit this--we don't bother to track
410 copies of registers which are in NON_DECOMPOSABLE_CONTEXT. */
413 propagate_pseudo_copies (void)
415 bitmap queue
, propagate
;
417 queue
= BITMAP_ALLOC (NULL
);
418 propagate
= BITMAP_ALLOC (NULL
);
420 bitmap_copy (queue
, decomposable_context
);
423 bitmap_iterator iter
;
426 bitmap_clear (propagate
);
428 EXECUTE_IF_SET_IN_BITMAP (queue
, 0, i
, iter
)
430 bitmap b
= reg_copy_graph
[i
];
432 bitmap_ior_and_compl_into (propagate
, b
, non_decomposable_context
);
435 bitmap_and_compl (queue
, propagate
, decomposable_context
);
436 bitmap_ior_into (decomposable_context
, propagate
);
438 while (!bitmap_empty_p (queue
));
441 BITMAP_FREE (propagate
);
444 /* A pointer to one of these values is passed to
445 find_decomposable_subregs via for_each_rtx. */
447 enum classify_move_insn
449 /* Not a simple move from one location to another. */
451 /* A simple move we want to decompose. */
452 DECOMPOSABLE_SIMPLE_MOVE
,
453 /* Any other simple move. */
457 /* This is called via for_each_rtx. If we find a SUBREG which we
458 could use to decompose a pseudo-register, set a bit in
459 DECOMPOSABLE_CONTEXT. If we find an unadorned register which is
460 not a simple pseudo-register copy, DATA will point at the type of
461 move, and we set a bit in DECOMPOSABLE_CONTEXT or
462 NON_DECOMPOSABLE_CONTEXT as appropriate. */
465 find_decomposable_subregs (rtx
*px
, void *data
)
467 enum classify_move_insn
*pcmi
= (enum classify_move_insn
*) data
;
473 if (GET_CODE (x
) == SUBREG
)
475 rtx inner
= SUBREG_REG (x
);
476 unsigned int regno
, outer_size
, inner_size
, outer_words
, inner_words
;
481 regno
= REGNO (inner
);
482 if (HARD_REGISTER_NUM_P (regno
))
485 outer_size
= GET_MODE_SIZE (GET_MODE (x
));
486 inner_size
= GET_MODE_SIZE (GET_MODE (inner
));
487 outer_words
= (outer_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
488 inner_words
= (inner_size
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
490 /* We only try to decompose single word subregs of multi-word
491 registers. When we find one, we return -1 to avoid iterating
492 over the inner register.
494 ??? This doesn't allow, e.g., DImode subregs of TImode values
495 on 32-bit targets. We would need to record the way the
496 pseudo-register was used, and only decompose if all the uses
497 were the same number and size of pieces. Hopefully this
498 doesn't happen much. */
500 if (outer_words
== 1 && inner_words
> 1)
502 bitmap_set_bit (decomposable_context
, regno
);
506 /* If this is a cast from one mode to another, where the modes
507 have the same size, and they are not tieable, then mark this
508 register as non-decomposable. If we decompose it we are
509 likely to mess up whatever the backend is trying to do. */
511 && outer_size
== inner_size
512 && !MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (inner
)))
514 bitmap_set_bit (non_decomposable_context
, regno
);
515 bitmap_set_bit (subreg_context
, regno
);
523 /* We will see an outer SUBREG before we see the inner REG, so
524 when we see a plain REG here it means a direct reference to
527 If this is not a simple copy from one location to another,
528 then we can not decompose this register. If this is a simple
529 copy we want to decompose, and the mode is right,
530 then we mark the register as decomposable.
531 Otherwise we don't say anything about this register --
532 it could be decomposed, but whether that would be
533 profitable depends upon how it is used elsewhere.
535 We only set bits in the bitmap for multi-word
536 pseudo-registers, since those are the only ones we care about
537 and it keeps the size of the bitmaps down. */
540 if (!HARD_REGISTER_NUM_P (regno
)
541 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
545 case NOT_SIMPLE_MOVE
:
546 bitmap_set_bit (non_decomposable_context
, regno
);
548 case DECOMPOSABLE_SIMPLE_MOVE
:
549 if (MODES_TIEABLE_P (GET_MODE (x
), word_mode
))
550 bitmap_set_bit (decomposable_context
, regno
);
561 enum classify_move_insn cmi_mem
= NOT_SIMPLE_MOVE
;
563 /* Any registers used in a MEM do not participate in a
564 SIMPLE_MOVE or DECOMPOSABLE_SIMPLE_MOVE. Do our own recursion
565 here, and return -1 to block the parent's recursion. */
566 for_each_rtx (&XEXP (x
, 0), find_decomposable_subregs
, &cmi_mem
);
573 /* Decompose REGNO into word-sized components. We smash the REG node
574 in place. This ensures that (1) something goes wrong quickly if we
575 fail to make some replacement, and (2) the debug information inside
576 the symbol table is automatically kept up to date. */
579 decompose_register (unsigned int regno
)
582 unsigned int words
, i
;
585 reg
= regno_reg_rtx
[regno
];
587 regno_reg_rtx
[regno
] = NULL_RTX
;
589 words
= GET_MODE_SIZE (GET_MODE (reg
));
590 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
592 v
= rtvec_alloc (words
);
593 for (i
= 0; i
< words
; ++i
)
594 RTVEC_ELT (v
, i
) = gen_reg_rtx_offset (reg
, word_mode
, i
* UNITS_PER_WORD
);
596 PUT_CODE (reg
, CONCATN
);
601 fprintf (dump_file
, "; Splitting reg %u ->", regno
);
602 for (i
= 0; i
< words
; ++i
)
603 fprintf (dump_file
, " %u", REGNO (XVECEXP (reg
, 0, i
)));
604 fputc ('\n', dump_file
);
608 /* Get a SUBREG of a CONCATN. */
611 simplify_subreg_concatn (enum machine_mode outermode
, rtx op
,
614 unsigned int inner_size
;
615 enum machine_mode innermode
, partmode
;
617 unsigned int final_offset
;
619 gcc_assert (GET_CODE (op
) == CONCATN
);
620 gcc_assert (byte
% GET_MODE_SIZE (outermode
) == 0);
622 innermode
= GET_MODE (op
);
623 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
624 gcc_assert (GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (innermode
));
626 inner_size
= GET_MODE_SIZE (innermode
) / XVECLEN (op
, 0);
627 part
= XVECEXP (op
, 0, byte
/ inner_size
);
628 partmode
= GET_MODE (part
);
630 /* VECTOR_CSTs in debug expressions are expanded into CONCATN instead of
631 regular CONST_VECTORs. They have vector or integer modes, depending
632 on the capabilities of the target. Cope with them. */
633 if (partmode
== VOIDmode
&& VECTOR_MODE_P (innermode
))
634 partmode
= GET_MODE_INNER (innermode
);
635 else if (partmode
== VOIDmode
)
637 enum mode_class mclass
= GET_MODE_CLASS (innermode
);
638 partmode
= mode_for_size (inner_size
* BITS_PER_UNIT
, mclass
, 0);
641 final_offset
= byte
% inner_size
;
642 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
645 return simplify_gen_subreg (outermode
, part
, partmode
, final_offset
);
648 /* Wrapper around simplify_gen_subreg which handles CONCATN. */
651 simplify_gen_subreg_concatn (enum machine_mode outermode
, rtx op
,
652 enum machine_mode innermode
, unsigned int byte
)
656 /* We have to handle generating a SUBREG of a SUBREG of a CONCATN.
657 If OP is a SUBREG of a CONCATN, then it must be a simple mode
658 change with the same size and offset 0, or it must extract a
659 part. We shouldn't see anything else here. */
660 if (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == CONCATN
)
664 if ((GET_MODE_SIZE (GET_MODE (op
))
665 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
666 && SUBREG_BYTE (op
) == 0)
667 return simplify_gen_subreg_concatn (outermode
, SUBREG_REG (op
),
668 GET_MODE (SUBREG_REG (op
)), byte
);
670 op2
= simplify_subreg_concatn (GET_MODE (op
), SUBREG_REG (op
),
674 /* We don't handle paradoxical subregs here. */
675 gcc_assert (GET_MODE_SIZE (outermode
)
676 <= GET_MODE_SIZE (GET_MODE (op
)));
677 gcc_assert (GET_MODE_SIZE (GET_MODE (op
))
678 <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))));
679 op2
= simplify_subreg_concatn (outermode
, SUBREG_REG (op
),
680 byte
+ SUBREG_BYTE (op
));
681 gcc_assert (op2
!= NULL_RTX
);
686 gcc_assert (op
!= NULL_RTX
);
687 gcc_assert (innermode
== GET_MODE (op
));
690 if (GET_CODE (op
) == CONCATN
)
691 return simplify_subreg_concatn (outermode
, op
, byte
);
693 ret
= simplify_gen_subreg (outermode
, op
, innermode
, byte
);
695 /* If we see an insn like (set (reg:DI) (subreg:DI (reg:SI) 0)) then
696 resolve_simple_move will ask for the high part of the paradoxical
697 subreg, which does not have a value. Just return a zero. */
699 && GET_CODE (op
) == SUBREG
700 && SUBREG_BYTE (op
) == 0
701 && (GET_MODE_SIZE (innermode
)
702 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
)))))
703 return CONST0_RTX (outermode
);
705 gcc_assert (ret
!= NULL_RTX
);
709 /* Return whether we should resolve X into the registers into which it
713 resolve_reg_p (rtx x
)
715 return GET_CODE (x
) == CONCATN
;
718 /* Return whether X is a SUBREG of a register which we need to
722 resolve_subreg_p (rtx x
)
724 if (GET_CODE (x
) != SUBREG
)
726 return resolve_reg_p (SUBREG_REG (x
));
729 /* This is called via for_each_rtx. Look for SUBREGs which need to be
733 resolve_subreg_use (rtx
*px
, void *data
)
735 rtx insn
= (rtx
) data
;
741 if (resolve_subreg_p (x
))
743 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
746 /* It is possible for a note to contain a reference which we can
747 decompose. In this case, return 1 to the caller to indicate
748 that the note must be removed. */
755 validate_change (insn
, px
, x
, 1);
759 if (resolve_reg_p (x
))
761 /* Return 1 to the caller to indicate that we found a direct
762 reference to a register which is being decomposed. This can
763 happen inside notes, multiword shift or zero-extend
771 /* This is called via for_each_rtx. Look for SUBREGs which can be
772 decomposed and decomposed REGs that need copying. */
775 adjust_decomposed_uses (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
782 if (resolve_subreg_p (x
))
784 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
793 if (resolve_reg_p (x
))
799 /* Resolve any decomposed registers which appear in register notes on
803 resolve_reg_notes (rtx insn
)
807 note
= find_reg_equal_equiv_note (insn
);
810 int old_count
= num_validated_changes ();
811 if (for_each_rtx (&XEXP (note
, 0), resolve_subreg_use
, NULL
))
812 remove_note (insn
, note
);
814 if (old_count
!= num_validated_changes ())
815 df_notes_rescan (insn
);
818 pnote
= ®_NOTES (insn
);
819 while (*pnote
!= NULL_RTX
)
824 switch (REG_NOTE_KIND (note
))
828 if (resolve_reg_p (XEXP (note
, 0)))
837 *pnote
= XEXP (note
, 1);
839 pnote
= &XEXP (note
, 1);
843 /* Return whether X can be decomposed into subwords. */
846 can_decompose_p (rtx x
)
850 unsigned int regno
= REGNO (x
);
852 if (HARD_REGISTER_NUM_P (regno
))
854 unsigned int byte
, num_bytes
;
856 num_bytes
= GET_MODE_SIZE (GET_MODE (x
));
857 for (byte
= 0; byte
< num_bytes
; byte
+= UNITS_PER_WORD
)
858 if (simplify_subreg_regno (regno
, GET_MODE (x
), byte
, word_mode
) < 0)
863 return !bitmap_bit_p (subreg_context
, regno
);
869 /* Decompose the registers used in a simple move SET within INSN. If
870 we don't change anything, return INSN, otherwise return the start
871 of the sequence of moves. */
874 resolve_simple_move (rtx set
, rtx insn
)
876 rtx src
, dest
, real_dest
, insns
;
877 enum machine_mode orig_mode
, dest_mode
;
882 dest
= SET_DEST (set
);
883 orig_mode
= GET_MODE (dest
);
885 words
= (GET_MODE_SIZE (orig_mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
886 gcc_assert (words
> 1);
890 /* We have to handle copying from a SUBREG of a decomposed reg where
891 the SUBREG is larger than word size. Rather than assume that we
892 can take a word_mode SUBREG of the destination, we copy to a new
893 register and then copy that to the destination. */
895 real_dest
= NULL_RTX
;
897 if (GET_CODE (src
) == SUBREG
898 && resolve_reg_p (SUBREG_REG (src
))
899 && (SUBREG_BYTE (src
) != 0
900 || (GET_MODE_SIZE (orig_mode
)
901 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))))
904 dest
= gen_reg_rtx (orig_mode
);
905 if (REG_P (real_dest
))
906 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
909 /* Similarly if we are copying to a SUBREG of a decomposed reg where
910 the SUBREG is larger than word size. */
912 if (GET_CODE (dest
) == SUBREG
913 && resolve_reg_p (SUBREG_REG (dest
))
914 && (SUBREG_BYTE (dest
) != 0
915 || (GET_MODE_SIZE (orig_mode
)
916 != GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
))))))
918 rtx reg
, minsn
, smove
;
920 reg
= gen_reg_rtx (orig_mode
);
921 minsn
= emit_move_insn (reg
, src
);
922 smove
= single_set (minsn
);
923 gcc_assert (smove
!= NULL_RTX
);
924 resolve_simple_move (smove
, minsn
);
928 /* If we didn't have any big SUBREGS of decomposed registers, and
929 neither side of the move is a register we are decomposing, then
930 we don't have to do anything here. */
932 if (src
== SET_SRC (set
)
933 && dest
== SET_DEST (set
)
934 && !resolve_reg_p (src
)
935 && !resolve_subreg_p (src
)
936 && !resolve_reg_p (dest
)
937 && !resolve_subreg_p (dest
))
943 /* It's possible for the code to use a subreg of a decomposed
944 register while forming an address. We need to handle that before
945 passing the address to emit_move_insn. We pass NULL_RTX as the
946 insn parameter to resolve_subreg_use because we can not validate
948 if (MEM_P (src
) || MEM_P (dest
))
953 for_each_rtx (&XEXP (src
, 0), resolve_subreg_use
, NULL_RTX
);
955 for_each_rtx (&XEXP (dest
, 0), resolve_subreg_use
, NULL_RTX
);
956 acg
= apply_change_group ();
960 /* If SRC is a register which we can't decompose, or has side
961 effects, we need to move via a temporary register. */
963 if (!can_decompose_p (src
)
964 || side_effects_p (src
)
965 || GET_CODE (src
) == ASM_OPERANDS
)
969 reg
= gen_reg_rtx (orig_mode
);
973 rtx move
= emit_move_insn (reg
, src
);
976 rtx note
= find_reg_note (insn
, REG_INC
, NULL_RTX
);
978 add_reg_note (move
, REG_INC
, XEXP (note
, 0));
982 emit_move_insn (reg
, src
);
987 /* If DEST is a register which we can't decompose, or has side
988 effects, we need to first move to a temporary register. We
989 handle the common case of pushing an operand directly. We also
990 go through a temporary register if it holds a floating point
991 value. This gives us better code on systems which can't move
992 data easily between integer and floating point registers. */
994 dest_mode
= orig_mode
;
995 pushing
= push_operand (dest
, dest_mode
);
996 if (!can_decompose_p (dest
)
997 || (side_effects_p (dest
) && !pushing
)
998 || (!SCALAR_INT_MODE_P (dest_mode
)
999 && !resolve_reg_p (dest
)
1000 && !resolve_subreg_p (dest
)))
1002 if (real_dest
== NULL_RTX
)
1004 if (!SCALAR_INT_MODE_P (dest_mode
))
1006 dest_mode
= mode_for_size (GET_MODE_SIZE (dest_mode
) * BITS_PER_UNIT
,
1008 gcc_assert (dest_mode
!= BLKmode
);
1010 dest
= gen_reg_rtx (dest_mode
);
1011 if (REG_P (real_dest
))
1012 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
1017 unsigned int i
, j
, jinc
;
1019 gcc_assert (GET_MODE_SIZE (orig_mode
) % UNITS_PER_WORD
== 0);
1020 gcc_assert (GET_CODE (XEXP (dest
, 0)) != PRE_MODIFY
);
1021 gcc_assert (GET_CODE (XEXP (dest
, 0)) != POST_MODIFY
);
1023 if (WORDS_BIG_ENDIAN
== STACK_GROWS_DOWNWARD
)
1034 for (i
= 0; i
< words
; ++i
, j
+= jinc
)
1038 temp
= copy_rtx (XEXP (dest
, 0));
1039 temp
= adjust_automodify_address_nv (dest
, word_mode
, temp
,
1040 j
* UNITS_PER_WORD
);
1041 emit_move_insn (temp
,
1042 simplify_gen_subreg_concatn (word_mode
, src
,
1044 j
* UNITS_PER_WORD
));
1051 if (REG_P (dest
) && !HARD_REGISTER_NUM_P (REGNO (dest
)))
1052 emit_clobber (dest
);
1054 for (i
= 0; i
< words
; ++i
)
1055 emit_move_insn (simplify_gen_subreg_concatn (word_mode
, dest
,
1057 i
* UNITS_PER_WORD
),
1058 simplify_gen_subreg_concatn (word_mode
, src
,
1060 i
* UNITS_PER_WORD
));
1063 if (real_dest
!= NULL_RTX
)
1065 rtx mdest
, minsn
, smove
;
1067 if (dest_mode
== orig_mode
)
1070 mdest
= simplify_gen_subreg (orig_mode
, dest
, GET_MODE (dest
), 0);
1071 minsn
= emit_move_insn (real_dest
, mdest
);
1074 if (MEM_P (real_dest
)
1075 && !(resolve_reg_p (real_dest
) || resolve_subreg_p (real_dest
)))
1077 rtx note
= find_reg_note (insn
, REG_INC
, NULL_RTX
);
1079 add_reg_note (minsn
, REG_INC
, XEXP (note
, 0));
1083 smove
= single_set (minsn
);
1084 gcc_assert (smove
!= NULL_RTX
);
1086 resolve_simple_move (smove
, minsn
);
1089 insns
= get_insns ();
1092 copy_reg_eh_region_note_forward (insn
, insns
, NULL_RTX
);
1094 emit_insn_before (insns
, insn
);
1096 /* If we get here via self-recursion, then INSN is not yet in the insns
1097 chain and delete_insn will fail. We only want to remove INSN from the
1098 current sequence. See PR56738. */
1099 if (in_sequence_p ())
1107 /* Change a CLOBBER of a decomposed register into a CLOBBER of the
1108 component registers. Return whether we changed something. */
1111 resolve_clobber (rtx pat
, rtx insn
)
1114 enum machine_mode orig_mode
;
1115 unsigned int words
, i
;
1118 reg
= XEXP (pat
, 0);
1119 if (!resolve_reg_p (reg
) && !resolve_subreg_p (reg
))
1122 orig_mode
= GET_MODE (reg
);
1123 words
= GET_MODE_SIZE (orig_mode
);
1124 words
= (words
+ UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
1126 ret
= validate_change (NULL_RTX
, &XEXP (pat
, 0),
1127 simplify_gen_subreg_concatn (word_mode
, reg
,
1130 df_insn_rescan (insn
);
1131 gcc_assert (ret
!= 0);
1133 for (i
= words
- 1; i
> 0; --i
)
1137 x
= simplify_gen_subreg_concatn (word_mode
, reg
, orig_mode
,
1138 i
* UNITS_PER_WORD
);
1139 x
= gen_rtx_CLOBBER (VOIDmode
, x
);
1140 emit_insn_after (x
, insn
);
1143 resolve_reg_notes (insn
);
1148 /* A USE of a decomposed register is no longer meaningful. Return
1149 whether we changed something. */
1152 resolve_use (rtx pat
, rtx insn
)
1154 if (resolve_reg_p (XEXP (pat
, 0)) || resolve_subreg_p (XEXP (pat
, 0)))
1160 resolve_reg_notes (insn
);
1165 /* A VAR_LOCATION can be simplified. */
1168 resolve_debug (rtx insn
)
1170 for_each_rtx (&PATTERN (insn
), adjust_decomposed_uses
, NULL_RTX
);
1172 df_insn_rescan (insn
);
1174 resolve_reg_notes (insn
);
1177 /* Check if INSN is a decomposable multiword-shift or zero-extend and
1178 set the decomposable_context bitmap accordingly. SPEED_P is true
1179 if we are optimizing INSN for speed rather than size. Return true
1180 if INSN is decomposable. */
1183 find_decomposable_shift_zext (rtx insn
, bool speed_p
)
1189 set
= single_set (insn
);
1194 if (GET_CODE (op
) != ASHIFT
1195 && GET_CODE (op
) != LSHIFTRT
1196 && GET_CODE (op
) != ASHIFTRT
1197 && GET_CODE (op
) != ZERO_EXTEND
)
1200 op_operand
= XEXP (op
, 0);
1201 if (!REG_P (SET_DEST (set
)) || !REG_P (op_operand
)
1202 || HARD_REGISTER_NUM_P (REGNO (SET_DEST (set
)))
1203 || HARD_REGISTER_NUM_P (REGNO (op_operand
))
1204 || GET_MODE (op
) != twice_word_mode
)
1207 if (GET_CODE (op
) == ZERO_EXTEND
)
1209 if (GET_MODE (op_operand
) != word_mode
1210 || !choices
[speed_p
].splitting_zext
)
1213 else /* left or right shift */
1215 bool *splitting
= (GET_CODE (op
) == ASHIFT
1216 ? choices
[speed_p
].splitting_ashift
1217 : GET_CODE (op
) == ASHIFTRT
1218 ? choices
[speed_p
].splitting_ashiftrt
1219 : choices
[speed_p
].splitting_lshiftrt
);
1220 if (!CONST_INT_P (XEXP (op
, 1))
1221 || !IN_RANGE (INTVAL (XEXP (op
, 1)), BITS_PER_WORD
,
1222 2 * BITS_PER_WORD
- 1)
1223 || !splitting
[INTVAL (XEXP (op
, 1)) - BITS_PER_WORD
])
1226 bitmap_set_bit (decomposable_context
, REGNO (op_operand
));
1229 bitmap_set_bit (decomposable_context
, REGNO (SET_DEST (set
)));
1234 /* Decompose a more than word wide shift (in INSN) of a multiword
1235 pseudo or a multiword zero-extend of a wordmode pseudo into a move
1236 and 'set to zero' insn. Return a pointer to the new insn when a
1237 replacement was done. */
1240 resolve_shift_zext (rtx insn
)
1246 rtx src_reg
, dest_reg
, dest_upper
, upper_src
= NULL_RTX
;
1247 int src_reg_num
, dest_reg_num
, offset1
, offset2
, src_offset
;
1249 set
= single_set (insn
);
1254 if (GET_CODE (op
) != ASHIFT
1255 && GET_CODE (op
) != LSHIFTRT
1256 && GET_CODE (op
) != ASHIFTRT
1257 && GET_CODE (op
) != ZERO_EXTEND
)
1260 op_operand
= XEXP (op
, 0);
1262 /* We can tear this operation apart only if the regs were already
1264 if (!resolve_reg_p (SET_DEST (set
)) && !resolve_reg_p (op_operand
))
1267 /* src_reg_num is the number of the word mode register which we
1268 are operating on. For a left shift and a zero_extend on little
1269 endian machines this is register 0. */
1270 src_reg_num
= (GET_CODE (op
) == LSHIFTRT
|| GET_CODE (op
) == ASHIFTRT
)
1273 if (WORDS_BIG_ENDIAN
1274 && GET_MODE_SIZE (GET_MODE (op_operand
)) > UNITS_PER_WORD
)
1275 src_reg_num
= 1 - src_reg_num
;
1277 if (GET_CODE (op
) == ZERO_EXTEND
)
1278 dest_reg_num
= WORDS_BIG_ENDIAN
? 1 : 0;
1280 dest_reg_num
= 1 - src_reg_num
;
1282 offset1
= UNITS_PER_WORD
* dest_reg_num
;
1283 offset2
= UNITS_PER_WORD
* (1 - dest_reg_num
);
1284 src_offset
= UNITS_PER_WORD
* src_reg_num
;
1288 dest_reg
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1289 GET_MODE (SET_DEST (set
)),
1291 dest_upper
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1292 GET_MODE (SET_DEST (set
)),
1294 src_reg
= simplify_gen_subreg_concatn (word_mode
, op_operand
,
1295 GET_MODE (op_operand
),
1297 if (GET_CODE (op
) == ASHIFTRT
1298 && INTVAL (XEXP (op
, 1)) != 2 * BITS_PER_WORD
- 1)
1299 upper_src
= expand_shift (RSHIFT_EXPR
, word_mode
, copy_rtx (src_reg
),
1300 BITS_PER_WORD
- 1, NULL_RTX
, 0);
1302 if (GET_CODE (op
) != ZERO_EXTEND
)
1304 int shift_count
= INTVAL (XEXP (op
, 1));
1305 if (shift_count
> BITS_PER_WORD
)
1306 src_reg
= expand_shift (GET_CODE (op
) == ASHIFT
?
1307 LSHIFT_EXPR
: RSHIFT_EXPR
,
1309 shift_count
- BITS_PER_WORD
,
1310 dest_reg
, GET_CODE (op
) != ASHIFTRT
);
1313 if (dest_reg
!= src_reg
)
1314 emit_move_insn (dest_reg
, src_reg
);
1315 if (GET_CODE (op
) != ASHIFTRT
)
1316 emit_move_insn (dest_upper
, CONST0_RTX (word_mode
));
1317 else if (INTVAL (XEXP (op
, 1)) == 2 * BITS_PER_WORD
- 1)
1318 emit_move_insn (dest_upper
, copy_rtx (src_reg
));
1320 emit_move_insn (dest_upper
, upper_src
);
1321 insns
= get_insns ();
1325 emit_insn_before (insns
, insn
);
1330 fprintf (dump_file
, "; Replacing insn: %d with insns: ", INSN_UID (insn
));
1331 for (in
= insns
; in
!= insn
; in
= NEXT_INSN (in
))
1332 fprintf (dump_file
, "%d ", INSN_UID (in
));
1333 fprintf (dump_file
, "\n");
1340 /* Print to dump_file a description of what we're doing with shift code CODE.
1341 SPLITTING[X] is true if we are splitting shifts by X + BITS_PER_WORD. */
1344 dump_shift_choices (enum rtx_code code
, bool *splitting
)
1350 " Splitting mode %s for %s lowering with shift amounts = ",
1351 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
));
1353 for (i
= 0; i
< BITS_PER_WORD
; i
++)
1356 fprintf (dump_file
, "%s%d", sep
, i
+ BITS_PER_WORD
);
1359 fprintf (dump_file
, "\n");
1362 /* Print to dump_file a description of what we're doing when optimizing
1363 for speed or size; SPEED_P says which. DESCRIPTION is a description
1364 of the SPEED_P choice. */
1367 dump_choices (bool speed_p
, const char *description
)
1371 fprintf (dump_file
, "Choices when optimizing for %s:\n", description
);
1373 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
1374 if (GET_MODE_SIZE ((enum machine_mode
) i
) > UNITS_PER_WORD
)
1375 fprintf (dump_file
, " %s mode %s for copy lowering.\n",
1376 choices
[speed_p
].move_modes_to_split
[i
]
1379 GET_MODE_NAME ((enum machine_mode
) i
));
1381 fprintf (dump_file
, " %s mode %s for zero_extend lowering.\n",
1382 choices
[speed_p
].splitting_zext
? "Splitting" : "Skipping",
1383 GET_MODE_NAME (twice_word_mode
));
1385 dump_shift_choices (ASHIFT
, choices
[speed_p
].splitting_ashift
);
1386 dump_shift_choices (LSHIFTRT
, choices
[speed_p
].splitting_lshiftrt
);
1387 dump_shift_choices (ASHIFTRT
, choices
[speed_p
].splitting_ashiftrt
);
1388 fprintf (dump_file
, "\n");
1391 /* Look for registers which are always accessed via word-sized SUBREGs
1392 or -if DECOMPOSE_COPIES is true- via copies. Decompose these
1393 registers into several word-sized pseudo-registers. */
1396 decompose_multiword_subregs (bool decompose_copies
)
1404 dump_choices (false, "size");
1405 dump_choices (true, "speed");
1408 /* Check if this target even has any modes to consider lowering. */
1409 if (!choices
[false].something_to_do
&& !choices
[true].something_to_do
)
1412 fprintf (dump_file
, "Nothing to do!\n");
1416 max
= max_reg_num ();
1418 /* First see if there are any multi-word pseudo-registers. If there
1419 aren't, there is nothing we can do. This should speed up this
1420 pass in the normal case, since it should be faster than scanning
1424 bool useful_modes_seen
= false;
1426 for (i
= FIRST_PSEUDO_REGISTER
; i
< max
; ++i
)
1427 if (regno_reg_rtx
[i
] != NULL
)
1429 enum machine_mode mode
= GET_MODE (regno_reg_rtx
[i
]);
1430 if (choices
[false].move_modes_to_split
[(int) mode
]
1431 || choices
[true].move_modes_to_split
[(int) mode
])
1433 useful_modes_seen
= true;
1438 if (!useful_modes_seen
)
1441 fprintf (dump_file
, "Nothing to lower in this function.\n");
1448 df_set_flags (DF_DEFER_INSN_RESCAN
);
1452 /* FIXME: It may be possible to change this code to look for each
1453 multi-word pseudo-register and to find each insn which sets or
1454 uses that register. That should be faster than scanning all the
1457 decomposable_context
= BITMAP_ALLOC (NULL
);
1458 non_decomposable_context
= BITMAP_ALLOC (NULL
);
1459 subreg_context
= BITMAP_ALLOC (NULL
);
1461 reg_copy_graph
.create (max
);
1462 reg_copy_graph
.safe_grow_cleared (max
);
1463 memset (reg_copy_graph
.address (), 0, sizeof (bitmap
) * max
);
1465 speed_p
= optimize_function_for_speed_p (cfun
);
1466 FOR_EACH_BB_FN (bb
, cfun
)
1470 FOR_BB_INSNS (bb
, insn
)
1473 enum classify_move_insn cmi
;
1477 || GET_CODE (PATTERN (insn
)) == CLOBBER
1478 || GET_CODE (PATTERN (insn
)) == USE
)
1481 recog_memoized (insn
);
1483 if (find_decomposable_shift_zext (insn
, speed_p
))
1486 extract_insn (insn
);
1488 set
= simple_move (insn
, speed_p
);
1491 cmi
= NOT_SIMPLE_MOVE
;
1494 /* We mark pseudo-to-pseudo copies as decomposable during the
1495 second pass only. The first pass is so early that there is
1496 good chance such moves will be optimized away completely by
1497 subsequent optimizations anyway.
1499 However, we call find_pseudo_copy even during the first pass
1500 so as to properly set up the reg_copy_graph. */
1501 if (find_pseudo_copy (set
))
1502 cmi
= decompose_copies
? DECOMPOSABLE_SIMPLE_MOVE
: SIMPLE_MOVE
;
1507 n
= recog_data
.n_operands
;
1508 for (i
= 0; i
< n
; ++i
)
1510 for_each_rtx (&recog_data
.operand
[i
],
1511 find_decomposable_subregs
,
1514 /* We handle ASM_OPERANDS as a special case to support
1515 things like x86 rdtsc which returns a DImode value.
1516 We can decompose the output, which will certainly be
1517 operand 0, but not the inputs. */
1519 if (cmi
== SIMPLE_MOVE
1520 && GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1522 gcc_assert (i
== 0);
1523 cmi
= NOT_SIMPLE_MOVE
;
1529 bitmap_and_compl_into (decomposable_context
, non_decomposable_context
);
1530 if (!bitmap_empty_p (decomposable_context
))
1534 sbitmap_iterator sbi
;
1535 bitmap_iterator iter
;
1538 propagate_pseudo_copies ();
1540 sub_blocks
= sbitmap_alloc (last_basic_block_for_fn (cfun
));
1541 bitmap_clear (sub_blocks
);
1543 EXECUTE_IF_SET_IN_BITMAP (decomposable_context
, 0, regno
, iter
)
1544 decompose_register (regno
);
1546 FOR_EACH_BB_FN (bb
, cfun
)
1550 FOR_BB_INSNS (bb
, insn
)
1557 pat
= PATTERN (insn
);
1558 if (GET_CODE (pat
) == CLOBBER
)
1559 resolve_clobber (pat
, insn
);
1560 else if (GET_CODE (pat
) == USE
)
1561 resolve_use (pat
, insn
);
1562 else if (DEBUG_INSN_P (insn
))
1563 resolve_debug (insn
);
1569 recog_memoized (insn
);
1570 extract_insn (insn
);
1572 set
= simple_move (insn
, speed_p
);
1575 rtx orig_insn
= insn
;
1576 bool cfi
= control_flow_insn_p (insn
);
1578 /* We can end up splitting loads to multi-word pseudos
1579 into separate loads to machine word size pseudos.
1580 When this happens, we first had one load that can
1581 throw, and after resolve_simple_move we'll have a
1582 bunch of loads (at least two). All those loads may
1583 trap if we can have non-call exceptions, so they
1584 all will end the current basic block. We split the
1585 block after the outer loop over all insns, but we
1586 make sure here that we will be able to split the
1587 basic block and still produce the correct control
1588 flow graph for it. */
1590 || (cfun
->can_throw_non_call_exceptions
1591 && can_throw_internal (insn
)));
1593 insn
= resolve_simple_move (set
, insn
);
1594 if (insn
!= orig_insn
)
1596 recog_memoized (insn
);
1597 extract_insn (insn
);
1600 bitmap_set_bit (sub_blocks
, bb
->index
);
1605 rtx decomposed_shift
;
1607 decomposed_shift
= resolve_shift_zext (insn
);
1608 if (decomposed_shift
!= NULL_RTX
)
1610 insn
= decomposed_shift
;
1611 recog_memoized (insn
);
1612 extract_insn (insn
);
1616 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
1617 for_each_rtx (recog_data
.operand_loc
[i
],
1621 resolve_reg_notes (insn
);
1623 if (num_validated_changes () > 0)
1625 for (i
= recog_data
.n_dups
- 1; i
>= 0; --i
)
1627 rtx
*pl
= recog_data
.dup_loc
[i
];
1628 int dup_num
= recog_data
.dup_num
[i
];
1629 rtx
*px
= recog_data
.operand_loc
[dup_num
];
1631 validate_unshare_change (insn
, pl
, *px
, 1);
1634 i
= apply_change_group ();
1641 /* If we had insns to split that caused control flow insns in the middle
1642 of a basic block, split those blocks now. Note that we only handle
1643 the case where splitting a load has caused multiple possibly trapping
1645 EXECUTE_IF_SET_IN_BITMAP (sub_blocks
, 0, i
, sbi
)
1650 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
1651 insn
= BB_HEAD (bb
);
1656 if (control_flow_insn_p (insn
))
1658 /* Split the block after insn. There will be a fallthru
1659 edge, which is OK so we keep it. We have to create the
1660 exception edges ourselves. */
1661 fallthru
= split_block (bb
, insn
);
1662 rtl_make_eh_edge (NULL
, bb
, BB_END (bb
));
1663 bb
= fallthru
->dest
;
1664 insn
= BB_HEAD (bb
);
1667 insn
= NEXT_INSN (insn
);
1671 sbitmap_free (sub_blocks
);
1678 FOR_EACH_VEC_ELT (reg_copy_graph
, i
, b
)
1683 reg_copy_graph
.release ();
1685 BITMAP_FREE (decomposable_context
);
1686 BITMAP_FREE (non_decomposable_context
);
1687 BITMAP_FREE (subreg_context
);
1690 /* Implement first lower subreg pass. */
1694 const pass_data pass_data_lower_subreg
=
1696 RTL_PASS
, /* type */
1697 "subreg1", /* name */
1698 OPTGROUP_NONE
, /* optinfo_flags */
1699 TV_LOWER_SUBREG
, /* tv_id */
1700 0, /* properties_required */
1701 0, /* properties_provided */
1702 0, /* properties_destroyed */
1703 0, /* todo_flags_start */
1704 0, /* todo_flags_finish */
1707 class pass_lower_subreg
: public rtl_opt_pass
1710 pass_lower_subreg (gcc::context
*ctxt
)
1711 : rtl_opt_pass (pass_data_lower_subreg
, ctxt
)
1714 /* opt_pass methods: */
1715 virtual bool gate (function
*) { return flag_split_wide_types
!= 0; }
1716 virtual unsigned int execute (function
*)
1718 decompose_multiword_subregs (false);
1722 }; // class pass_lower_subreg
1727 make_pass_lower_subreg (gcc::context
*ctxt
)
1729 return new pass_lower_subreg (ctxt
);
1732 /* Implement second lower subreg pass. */
1736 const pass_data pass_data_lower_subreg2
=
1738 RTL_PASS
, /* type */
1739 "subreg2", /* name */
1740 OPTGROUP_NONE
, /* optinfo_flags */
1741 TV_LOWER_SUBREG
, /* tv_id */
1742 0, /* properties_required */
1743 0, /* properties_provided */
1744 0, /* properties_destroyed */
1745 0, /* todo_flags_start */
1746 TODO_df_finish
, /* todo_flags_finish */
1749 class pass_lower_subreg2
: public rtl_opt_pass
1752 pass_lower_subreg2 (gcc::context
*ctxt
)
1753 : rtl_opt_pass (pass_data_lower_subreg2
, ctxt
)
1756 /* opt_pass methods: */
1757 virtual bool gate (function
*) { return flag_split_wide_types
!= 0; }
1758 virtual unsigned int execute (function
*)
1760 decompose_multiword_subregs (true);
1764 }; // class pass_lower_subreg2
1769 make_pass_lower_subreg2 (gcc::context
*ctxt
)
1771 return new pass_lower_subreg2 (ctxt
);