1 /* Decompose multiword subregs.
2 Copyright (C) 2007-2021 Free Software Foundation, Inc.
3 Contributed by Richard Henderson <rth@redhat.com>
4 Ian Lance Taylor <iant@google.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
33 #include "insn-config.h"
40 #include "tree-pass.h"
41 #include "lower-subreg.h"
46 /* Decompose multi-word pseudo-registers into individual
47 pseudo-registers when possible and profitable. This is possible
48 when all the uses of a multi-word register are via SUBREG, or are
49 copies of the register to another location. Breaking apart the
50 register permits more CSE and permits better register allocation.
51 This is profitable if the machine does not have move instructions
54 This pass only splits moves with modes that are wider than
55 word_mode and ASHIFTs, LSHIFTRTs, ASHIFTRTs and ZERO_EXTENDs with
56 integer modes that are twice the width of word_mode. The latter
57 could be generalized if there was a need to do this, but the trend in
58 architectures is to not need this.
60 There are two useful preprocessor defines for use by maintainers:
64 if you wish to see the actual cost estimates that are being used
65 for each mode wider than word mode and the cost estimates for zero
66 extension and the shifts. This can be useful when port maintainers
67 are tuning insn rtx costs.
69 #define FORCE_LOWERING 1
71 if you wish to test the pass with all the transformation forced on.
72 This can be useful for finding bugs in the transformations. */
75 #define FORCE_LOWERING 0
77 /* Bit N in this bitmap is set if regno N is used in a context in
78 which we can decompose it. */
79 static bitmap decomposable_context
;
81 /* Bit N in this bitmap is set if regno N is used in a context in
82 which it cannot be decomposed. */
83 static bitmap non_decomposable_context
;
85 /* Bit N in this bitmap is set if regno N is used in a subreg
86 which changes the mode but not the size. This typically happens
87 when the register accessed as a floating-point value; we want to
88 avoid generating accesses to its subwords in integer modes. */
89 static bitmap subreg_context
;
91 /* Bit N in the bitmap in element M of this array is set if there is a
92 copy from reg M to reg N. */
93 static vec
<bitmap
> reg_copy_graph
;
95 struct target_lower_subreg default_target_lower_subreg
;
97 struct target_lower_subreg
*this_target_lower_subreg
98 = &default_target_lower_subreg
;
101 #define twice_word_mode \
102 this_target_lower_subreg->x_twice_word_mode
104 this_target_lower_subreg->x_choices
106 /* Return true if MODE is a mode we know how to lower. When returning true,
107 store its byte size in *BYTES and its word size in *WORDS. */
110 interesting_mode_p (machine_mode mode
, unsigned int *bytes
,
113 if (!GET_MODE_SIZE (mode
).is_constant (bytes
))
115 *words
= CEIL (*bytes
, UNITS_PER_WORD
);
119 /* RTXes used while computing costs. */
121 /* Source and target registers. */
125 /* A twice_word_mode ZERO_EXTEND of SOURCE. */
128 /* A shift of SOURCE. */
131 /* A SET of TARGET. */
135 /* Return the cost of a CODE shift in mode MODE by OP1 bits, using the
136 rtxes in RTXES. SPEED_P selects between the speed and size cost. */
139 shift_cost (bool speed_p
, struct cost_rtxes
*rtxes
, enum rtx_code code
,
140 machine_mode mode
, int op1
)
142 PUT_CODE (rtxes
->shift
, code
);
143 PUT_MODE (rtxes
->shift
, mode
);
144 PUT_MODE (rtxes
->source
, mode
);
145 XEXP (rtxes
->shift
, 1) = gen_int_shift_amount (mode
, op1
);
146 return set_src_cost (rtxes
->shift
, mode
, speed_p
);
149 /* For each X in the range [0, BITS_PER_WORD), set SPLITTING[X]
150 to true if it is profitable to split a double-word CODE shift
151 of X + BITS_PER_WORD bits. SPEED_P says whether we are testing
152 for speed or size profitability.
154 Use the rtxes in RTXES to calculate costs. WORD_MOVE_ZERO_COST is
155 the cost of moving zero into a word-mode register. WORD_MOVE_COST
156 is the cost of moving between word registers. */
159 compute_splitting_shift (bool speed_p
, struct cost_rtxes
*rtxes
,
160 bool *splitting
, enum rtx_code code
,
161 int word_move_zero_cost
, int word_move_cost
)
163 int wide_cost
, narrow_cost
, upper_cost
, i
;
165 for (i
= 0; i
< BITS_PER_WORD
; i
++)
167 wide_cost
= shift_cost (speed_p
, rtxes
, code
, twice_word_mode
,
170 narrow_cost
= word_move_cost
;
172 narrow_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
, i
);
174 if (code
!= ASHIFTRT
)
175 upper_cost
= word_move_zero_cost
;
176 else if (i
== BITS_PER_WORD
- 1)
177 upper_cost
= word_move_cost
;
179 upper_cost
= shift_cost (speed_p
, rtxes
, code
, word_mode
,
183 fprintf (stderr
, "%s %s by %d: original cost %d, split cost %d + %d\n",
184 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
),
185 i
+ BITS_PER_WORD
, wide_cost
, narrow_cost
, upper_cost
);
187 if (FORCE_LOWERING
|| wide_cost
>= narrow_cost
+ upper_cost
)
192 /* Compute what we should do when optimizing for speed or size; SPEED_P
193 selects which. Use RTXES for computing costs. */
196 compute_costs (bool speed_p
, struct cost_rtxes
*rtxes
)
199 int word_move_zero_cost
, word_move_cost
;
201 PUT_MODE (rtxes
->target
, word_mode
);
202 SET_SRC (rtxes
->set
) = CONST0_RTX (word_mode
);
203 word_move_zero_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
205 SET_SRC (rtxes
->set
) = rtxes
->source
;
206 word_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
209 fprintf (stderr
, "%s move: from zero cost %d, from reg cost %d\n",
210 GET_MODE_NAME (word_mode
), word_move_zero_cost
, word_move_cost
);
212 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
214 machine_mode mode
= (machine_mode
) i
;
215 unsigned int size
, factor
;
216 if (interesting_mode_p (mode
, &size
, &factor
) && factor
> 1)
218 unsigned int mode_move_cost
;
220 PUT_MODE (rtxes
->target
, mode
);
221 PUT_MODE (rtxes
->source
, mode
);
222 mode_move_cost
= set_rtx_cost (rtxes
->set
, speed_p
);
225 fprintf (stderr
, "%s move: original cost %d, split cost %d * %d\n",
226 GET_MODE_NAME (mode
), mode_move_cost
,
227 word_move_cost
, factor
);
229 if (FORCE_LOWERING
|| mode_move_cost
>= word_move_cost
* factor
)
231 choices
[speed_p
].move_modes_to_split
[i
] = true;
232 choices
[speed_p
].something_to_do
= true;
237 /* For the moves and shifts, the only case that is checked is one
238 where the mode of the target is an integer mode twice the width
241 If it is not profitable to split a double word move then do not
242 even consider the shifts or the zero extension. */
243 if (choices
[speed_p
].move_modes_to_split
[(int) twice_word_mode
])
247 /* The only case here to check to see if moving the upper part with a
248 zero is cheaper than doing the zext itself. */
249 PUT_MODE (rtxes
->source
, word_mode
);
250 zext_cost
= set_src_cost (rtxes
->zext
, twice_word_mode
, speed_p
);
253 fprintf (stderr
, "%s %s: original cost %d, split cost %d + %d\n",
254 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (ZERO_EXTEND
),
255 zext_cost
, word_move_cost
, word_move_zero_cost
);
257 if (FORCE_LOWERING
|| zext_cost
>= word_move_cost
+ word_move_zero_cost
)
258 choices
[speed_p
].splitting_zext
= true;
260 compute_splitting_shift (speed_p
, rtxes
,
261 choices
[speed_p
].splitting_ashift
, ASHIFT
,
262 word_move_zero_cost
, word_move_cost
);
263 compute_splitting_shift (speed_p
, rtxes
,
264 choices
[speed_p
].splitting_lshiftrt
, LSHIFTRT
,
265 word_move_zero_cost
, word_move_cost
);
266 compute_splitting_shift (speed_p
, rtxes
,
267 choices
[speed_p
].splitting_ashiftrt
, ASHIFTRT
,
268 word_move_zero_cost
, word_move_cost
);
272 /* Do one-per-target initialisation. This involves determining
273 which operations on the machine are profitable. If none are found,
274 then the pass just returns when called. */
277 init_lower_subreg (void)
279 struct cost_rtxes rtxes
;
281 memset (this_target_lower_subreg
, 0, sizeof (*this_target_lower_subreg
));
283 twice_word_mode
= GET_MODE_2XWIDER_MODE (word_mode
).require ();
285 rtxes
.target
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 1);
286 rtxes
.source
= gen_rtx_REG (word_mode
, LAST_VIRTUAL_REGISTER
+ 2);
287 rtxes
.set
= gen_rtx_SET (rtxes
.target
, rtxes
.source
);
288 rtxes
.zext
= gen_rtx_ZERO_EXTEND (twice_word_mode
, rtxes
.source
);
289 rtxes
.shift
= gen_rtx_ASHIFT (twice_word_mode
, rtxes
.source
, const0_rtx
);
292 fprintf (stderr
, "\nSize costs\n==========\n\n");
293 compute_costs (false, &rtxes
);
296 fprintf (stderr
, "\nSpeed costs\n===========\n\n");
297 compute_costs (true, &rtxes
);
301 simple_move_operand (rtx x
)
303 if (GET_CODE (x
) == SUBREG
)
309 if (GET_CODE (x
) == LABEL_REF
310 || GET_CODE (x
) == SYMBOL_REF
311 || GET_CODE (x
) == HIGH
312 || GET_CODE (x
) == CONST
)
316 && (MEM_VOLATILE_P (x
)
317 || mode_dependent_address_p (XEXP (x
, 0), MEM_ADDR_SPACE (x
))))
323 /* If X is an operator that can be treated as a simple move that we
324 can split, then return the operand that is operated on. */
327 operand_for_swap_move_operator (rtx x
)
329 /* A word sized rotate of a register pair is equivalent to swapping
330 the registers in the register pair. */
331 if (GET_CODE (x
) == ROTATE
332 && GET_MODE (x
) == twice_word_mode
333 && simple_move_operand (XEXP (x
, 0))
334 && CONST_INT_P (XEXP (x
, 1))
335 && INTVAL (XEXP (x
, 1)) == BITS_PER_WORD
)
341 /* If INSN is a single set between two objects that we want to split,
342 return the single set. SPEED_P says whether we are optimizing
343 INSN for speed or size.
345 INSN should have been passed to recog and extract_insn before this
349 simple_move (rtx_insn
*insn
, bool speed_p
)
355 if (recog_data
.n_operands
!= 2)
358 set
= single_set (insn
);
363 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
365 if (!simple_move_operand (x
))
369 if ((op
= operand_for_swap_move_operator (x
)) != NULL_RTX
)
372 if (x
!= recog_data
.operand
[0] && x
!= recog_data
.operand
[1])
374 /* For the src we can handle ASM_OPERANDS, and it is beneficial for
375 things like x86 rdtsc which returns a DImode value. */
376 if (GET_CODE (x
) != ASM_OPERANDS
377 && !simple_move_operand (x
))
380 /* We try to decompose in integer modes, to avoid generating
381 inefficient code copying between integer and floating point
382 registers. That means that we can't decompose if this is a
383 non-integer mode for which there is no integer mode of the same
385 mode
= GET_MODE (SET_DEST (set
));
386 if (!SCALAR_INT_MODE_P (mode
)
387 && !int_mode_for_size (GET_MODE_BITSIZE (mode
), 0).exists ())
390 /* Reject PARTIAL_INT modes. They are used for processor specific
391 purposes and it's probably best not to tamper with them. */
392 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
395 if (!choices
[speed_p
].move_modes_to_split
[(int) mode
])
401 /* If SET is a copy from one multi-word pseudo-register to another,
402 record that in reg_copy_graph. Return whether it is such a
406 find_pseudo_copy (rtx set
)
408 rtx dest
= SET_DEST (set
);
409 rtx src
= SET_SRC (set
);
414 if ((op
= operand_for_swap_move_operator (src
)) != NULL_RTX
)
417 if (!REG_P (dest
) || !REG_P (src
))
422 if (HARD_REGISTER_NUM_P (rd
) || HARD_REGISTER_NUM_P (rs
))
425 b
= reg_copy_graph
[rs
];
428 b
= BITMAP_ALLOC (NULL
);
429 reg_copy_graph
[rs
] = b
;
432 bitmap_set_bit (b
, rd
);
437 /* Look through the registers in DECOMPOSABLE_CONTEXT. For each case
438 where they are copied to another register, add the register to
439 which they are copied to DECOMPOSABLE_CONTEXT. Use
440 NON_DECOMPOSABLE_CONTEXT to limit this--we don't bother to track
441 copies of registers which are in NON_DECOMPOSABLE_CONTEXT. */
444 propagate_pseudo_copies (void)
446 auto_bitmap queue
, propagate
;
448 bitmap_copy (queue
, decomposable_context
);
451 bitmap_iterator iter
;
454 bitmap_clear (propagate
);
456 EXECUTE_IF_SET_IN_BITMAP (queue
, 0, i
, iter
)
458 bitmap b
= reg_copy_graph
[i
];
460 bitmap_ior_and_compl_into (propagate
, b
, non_decomposable_context
);
463 bitmap_and_compl (queue
, propagate
, decomposable_context
);
464 bitmap_ior_into (decomposable_context
, propagate
);
466 while (!bitmap_empty_p (queue
));
469 /* A pointer to one of these values is passed to
470 find_decomposable_subregs. */
472 enum classify_move_insn
474 /* Not a simple move from one location to another. */
476 /* A simple move we want to decompose. */
477 DECOMPOSABLE_SIMPLE_MOVE
,
478 /* Any other simple move. */
482 /* If we find a SUBREG in *LOC which we could use to decompose a
483 pseudo-register, set a bit in DECOMPOSABLE_CONTEXT. If we find an
484 unadorned register which is not a simple pseudo-register copy,
485 DATA will point at the type of move, and we set a bit in
486 DECOMPOSABLE_CONTEXT or NON_DECOMPOSABLE_CONTEXT as appropriate. */
489 find_decomposable_subregs (rtx
*loc
, enum classify_move_insn
*pcmi
)
491 subrtx_var_iterator::array_type array
;
492 FOR_EACH_SUBRTX_VAR (iter
, array
, *loc
, NONCONST
)
495 if (GET_CODE (x
) == SUBREG
)
497 rtx inner
= SUBREG_REG (x
);
498 unsigned int regno
, outer_size
, inner_size
, outer_words
, inner_words
;
503 regno
= REGNO (inner
);
504 if (HARD_REGISTER_NUM_P (regno
))
506 iter
.skip_subrtxes ();
510 if (!interesting_mode_p (GET_MODE (x
), &outer_size
, &outer_words
)
511 || !interesting_mode_p (GET_MODE (inner
), &inner_size
,
515 /* We only try to decompose single word subregs of multi-word
516 registers. When we find one, we return -1 to avoid iterating
517 over the inner register.
519 ??? This doesn't allow, e.g., DImode subregs of TImode values
520 on 32-bit targets. We would need to record the way the
521 pseudo-register was used, and only decompose if all the uses
522 were the same number and size of pieces. Hopefully this
523 doesn't happen much. */
527 /* Don't allow to decompose floating point subregs of
528 multi-word pseudos if the floating point mode does
529 not have word size, because otherwise we'd generate
530 a subreg with that floating mode from a different
531 sized integral pseudo which is not allowed by
533 && (!FLOAT_MODE_P (GET_MODE (x
))
534 || outer_size
== UNITS_PER_WORD
))
536 bitmap_set_bit (decomposable_context
, regno
);
537 iter
.skip_subrtxes ();
541 /* If this is a cast from one mode to another, where the modes
542 have the same size, and they are not tieable, then mark this
543 register as non-decomposable. If we decompose it we are
544 likely to mess up whatever the backend is trying to do. */
546 && outer_size
== inner_size
547 && !targetm
.modes_tieable_p (GET_MODE (x
), GET_MODE (inner
)))
549 bitmap_set_bit (non_decomposable_context
, regno
);
550 bitmap_set_bit (subreg_context
, regno
);
551 iter
.skip_subrtxes ();
557 unsigned int regno
, size
, words
;
559 /* We will see an outer SUBREG before we see the inner REG, so
560 when we see a plain REG here it means a direct reference to
563 If this is not a simple copy from one location to another,
564 then we cannot decompose this register. If this is a simple
565 copy we want to decompose, and the mode is right,
566 then we mark the register as decomposable.
567 Otherwise we don't say anything about this register --
568 it could be decomposed, but whether that would be
569 profitable depends upon how it is used elsewhere.
571 We only set bits in the bitmap for multi-word
572 pseudo-registers, since those are the only ones we care about
573 and it keeps the size of the bitmaps down. */
576 if (!HARD_REGISTER_NUM_P (regno
)
577 && interesting_mode_p (GET_MODE (x
), &size
, &words
)
582 case NOT_SIMPLE_MOVE
:
583 bitmap_set_bit (non_decomposable_context
, regno
);
585 case DECOMPOSABLE_SIMPLE_MOVE
:
586 if (targetm
.modes_tieable_p (GET_MODE (x
), word_mode
))
587 bitmap_set_bit (decomposable_context
, regno
);
598 enum classify_move_insn cmi_mem
= NOT_SIMPLE_MOVE
;
600 /* Any registers used in a MEM do not participate in a
601 SIMPLE_MOVE or DECOMPOSABLE_SIMPLE_MOVE. Do our own recursion
602 here, and return -1 to block the parent's recursion. */
603 find_decomposable_subregs (&XEXP (x
, 0), &cmi_mem
);
604 iter
.skip_subrtxes ();
609 /* Decompose REGNO into word-sized components. We smash the REG node
610 in place. This ensures that (1) something goes wrong quickly if we
611 fail to make some replacement, and (2) the debug information inside
612 the symbol table is automatically kept up to date. */
615 decompose_register (unsigned int regno
)
618 unsigned int size
, words
, i
;
621 reg
= regno_reg_rtx
[regno
];
623 regno_reg_rtx
[regno
] = NULL_RTX
;
625 if (!interesting_mode_p (GET_MODE (reg
), &size
, &words
))
628 v
= rtvec_alloc (words
);
629 for (i
= 0; i
< words
; ++i
)
630 RTVEC_ELT (v
, i
) = gen_reg_rtx_offset (reg
, word_mode
, i
* UNITS_PER_WORD
);
632 PUT_CODE (reg
, CONCATN
);
637 fprintf (dump_file
, "; Splitting reg %u ->", regno
);
638 for (i
= 0; i
< words
; ++i
)
639 fprintf (dump_file
, " %u", REGNO (XVECEXP (reg
, 0, i
)));
640 fputc ('\n', dump_file
);
644 /* Get a SUBREG of a CONCATN. */
647 simplify_subreg_concatn (machine_mode outermode
, rtx op
, poly_uint64 orig_byte
)
649 unsigned int outer_size
, outer_words
, inner_size
, inner_words
;
650 machine_mode innermode
, partmode
;
652 unsigned int final_offset
;
655 innermode
= GET_MODE (op
);
656 if (!interesting_mode_p (outermode
, &outer_size
, &outer_words
)
657 || !interesting_mode_p (innermode
, &inner_size
, &inner_words
))
660 /* Must be constant if interesting_mode_p passes. */
661 byte
= orig_byte
.to_constant ();
662 gcc_assert (GET_CODE (op
) == CONCATN
);
663 gcc_assert (byte
% outer_size
== 0);
665 gcc_assert (byte
< inner_size
);
666 if (outer_size
> inner_size
)
669 inner_size
/= XVECLEN (op
, 0);
670 part
= XVECEXP (op
, 0, byte
/ inner_size
);
671 partmode
= GET_MODE (part
);
673 final_offset
= byte
% inner_size
;
674 if (final_offset
+ outer_size
> inner_size
)
677 /* VECTOR_CSTs in debug expressions are expanded into CONCATN instead of
678 regular CONST_VECTORs. They have vector or integer modes, depending
679 on the capabilities of the target. Cope with them. */
680 if (partmode
== VOIDmode
&& VECTOR_MODE_P (innermode
))
681 partmode
= GET_MODE_INNER (innermode
);
682 else if (partmode
== VOIDmode
)
683 partmode
= mode_for_size (inner_size
* BITS_PER_UNIT
,
684 GET_MODE_CLASS (innermode
), 0).require ();
686 return simplify_gen_subreg (outermode
, part
, partmode
, final_offset
);
689 /* Wrapper around simplify_gen_subreg which handles CONCATN. */
692 simplify_gen_subreg_concatn (machine_mode outermode
, rtx op
,
693 machine_mode innermode
, unsigned int byte
)
697 /* We have to handle generating a SUBREG of a SUBREG of a CONCATN.
698 If OP is a SUBREG of a CONCATN, then it must be a simple mode
699 change with the same size and offset 0, or it must extract a
700 part. We shouldn't see anything else here. */
701 if (GET_CODE (op
) == SUBREG
&& GET_CODE (SUBREG_REG (op
)) == CONCATN
)
705 if (known_eq (GET_MODE_SIZE (GET_MODE (op
)),
706 GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
707 && known_eq (SUBREG_BYTE (op
), 0))
708 return simplify_gen_subreg_concatn (outermode
, SUBREG_REG (op
),
709 GET_MODE (SUBREG_REG (op
)), byte
);
711 op2
= simplify_subreg_concatn (GET_MODE (op
), SUBREG_REG (op
),
715 /* We don't handle paradoxical subregs here. */
716 gcc_assert (!paradoxical_subreg_p (outermode
, GET_MODE (op
)));
717 gcc_assert (!paradoxical_subreg_p (op
));
718 op2
= simplify_subreg_concatn (outermode
, SUBREG_REG (op
),
719 byte
+ SUBREG_BYTE (op
));
720 gcc_assert (op2
!= NULL_RTX
);
725 gcc_assert (op
!= NULL_RTX
);
726 gcc_assert (innermode
== GET_MODE (op
));
729 if (GET_CODE (op
) == CONCATN
)
730 return simplify_subreg_concatn (outermode
, op
, byte
);
732 ret
= simplify_gen_subreg (outermode
, op
, innermode
, byte
);
734 /* If we see an insn like (set (reg:DI) (subreg:DI (reg:SI) 0)) then
735 resolve_simple_move will ask for the high part of the paradoxical
736 subreg, which does not have a value. Just return a zero. */
738 && paradoxical_subreg_p (op
))
739 return CONST0_RTX (outermode
);
741 gcc_assert (ret
!= NULL_RTX
);
745 /* Return whether we should resolve X into the registers into which it
749 resolve_reg_p (rtx x
)
751 return GET_CODE (x
) == CONCATN
;
754 /* Return whether X is a SUBREG of a register which we need to
758 resolve_subreg_p (rtx x
)
760 if (GET_CODE (x
) != SUBREG
)
762 return resolve_reg_p (SUBREG_REG (x
));
765 /* Look for SUBREGs in *LOC which need to be decomposed. */
768 resolve_subreg_use (rtx
*loc
, rtx insn
)
770 subrtx_ptr_iterator::array_type array
;
771 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, NONCONST
)
775 if (resolve_subreg_p (x
))
777 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
780 /* It is possible for a note to contain a reference which we can
781 decompose. In this case, return 1 to the caller to indicate
782 that the note must be removed. */
789 validate_change (insn
, loc
, x
, 1);
790 iter
.skip_subrtxes ();
792 else if (resolve_reg_p (x
))
793 /* Return 1 to the caller to indicate that we found a direct
794 reference to a register which is being decomposed. This can
795 happen inside notes, multiword shift or zero-extend
803 /* Resolve any decomposed registers which appear in register notes on
807 resolve_reg_notes (rtx_insn
*insn
)
811 note
= find_reg_equal_equiv_note (insn
);
814 int old_count
= num_validated_changes ();
815 if (resolve_subreg_use (&XEXP (note
, 0), NULL_RTX
))
816 remove_note (insn
, note
);
818 if (old_count
!= num_validated_changes ())
819 df_notes_rescan (insn
);
822 pnote
= ®_NOTES (insn
);
823 while (*pnote
!= NULL_RTX
)
828 switch (REG_NOTE_KIND (note
))
832 if (resolve_reg_p (XEXP (note
, 0)))
841 *pnote
= XEXP (note
, 1);
843 pnote
= &XEXP (note
, 1);
847 /* Return whether X can be decomposed into subwords. */
850 can_decompose_p (rtx x
)
854 unsigned int regno
= REGNO (x
);
856 if (HARD_REGISTER_NUM_P (regno
))
858 unsigned int byte
, num_bytes
, num_words
;
860 if (!interesting_mode_p (GET_MODE (x
), &num_bytes
, &num_words
))
862 for (byte
= 0; byte
< num_bytes
; byte
+= UNITS_PER_WORD
)
863 if (simplify_subreg_regno (regno
, GET_MODE (x
), byte
, word_mode
) < 0)
868 return !bitmap_bit_p (subreg_context
, regno
);
874 /* OPND is a concatn operand this is used with a simple move operator.
875 Return a new rtx with the concatn's operands swapped. */
878 resolve_operand_for_swap_move_operator (rtx opnd
)
880 gcc_assert (GET_CODE (opnd
) == CONCATN
);
881 rtx concatn
= copy_rtx (opnd
);
882 rtx op0
= XVECEXP (concatn
, 0, 0);
883 rtx op1
= XVECEXP (concatn
, 0, 1);
884 XVECEXP (concatn
, 0, 0) = op1
;
885 XVECEXP (concatn
, 0, 1) = op0
;
889 /* Decompose the registers used in a simple move SET within INSN. If
890 we don't change anything, return INSN, otherwise return the start
891 of the sequence of moves. */
894 resolve_simple_move (rtx set
, rtx_insn
*insn
)
896 rtx src
, dest
, real_dest
, src_op
;
898 machine_mode orig_mode
, dest_mode
;
899 unsigned int orig_size
, words
;
903 dest
= SET_DEST (set
);
904 orig_mode
= GET_MODE (dest
);
906 if (!interesting_mode_p (orig_mode
, &orig_size
, &words
))
908 gcc_assert (words
> 1);
912 /* We have to handle copying from a SUBREG of a decomposed reg where
913 the SUBREG is larger than word size. Rather than assume that we
914 can take a word_mode SUBREG of the destination, we copy to a new
915 register and then copy that to the destination. */
917 real_dest
= NULL_RTX
;
919 if ((src_op
= operand_for_swap_move_operator (src
)) != NULL_RTX
)
921 if (resolve_reg_p (dest
))
923 /* DEST is a CONCATN, so swap its operands and strip
925 dest
= resolve_operand_for_swap_move_operator (dest
);
928 else if (resolve_reg_p (src_op
))
930 /* SRC is an operation on a CONCATN, so strip the operator and
931 swap the CONCATN's operands. */
932 src
= resolve_operand_for_swap_move_operator (src_op
);
936 if (GET_CODE (src
) == SUBREG
937 && resolve_reg_p (SUBREG_REG (src
))
938 && (maybe_ne (SUBREG_BYTE (src
), 0)
939 || maybe_ne (orig_size
, GET_MODE_SIZE (GET_MODE (SUBREG_REG (src
))))))
942 dest
= gen_reg_rtx (orig_mode
);
943 if (REG_P (real_dest
))
944 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
947 /* Similarly if we are copying to a SUBREG of a decomposed reg where
948 the SUBREG is larger than word size. */
950 if (GET_CODE (dest
) == SUBREG
951 && resolve_reg_p (SUBREG_REG (dest
))
952 && (maybe_ne (SUBREG_BYTE (dest
), 0)
953 || maybe_ne (orig_size
,
954 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
))))))
959 reg
= gen_reg_rtx (orig_mode
);
960 minsn
= emit_move_insn (reg
, src
);
961 smove
= single_set (minsn
);
962 gcc_assert (smove
!= NULL_RTX
);
963 resolve_simple_move (smove
, minsn
);
967 /* If we didn't have any big SUBREGS of decomposed registers, and
968 neither side of the move is a register we are decomposing, then
969 we don't have to do anything here. */
971 if (src
== SET_SRC (set
)
972 && dest
== SET_DEST (set
)
973 && !resolve_reg_p (src
)
974 && !resolve_subreg_p (src
)
975 && !resolve_reg_p (dest
)
976 && !resolve_subreg_p (dest
))
982 /* It's possible for the code to use a subreg of a decomposed
983 register while forming an address. We need to handle that before
984 passing the address to emit_move_insn. We pass NULL_RTX as the
985 insn parameter to resolve_subreg_use because we cannot validate
987 if (MEM_P (src
) || MEM_P (dest
))
992 resolve_subreg_use (&XEXP (src
, 0), NULL_RTX
);
994 resolve_subreg_use (&XEXP (dest
, 0), NULL_RTX
);
995 acg
= apply_change_group ();
999 /* If SRC is a register which we can't decompose, or has side
1000 effects, we need to move via a temporary register. */
1002 if (!can_decompose_p (src
)
1003 || side_effects_p (src
)
1004 || GET_CODE (src
) == ASM_OPERANDS
)
1008 reg
= gen_reg_rtx (orig_mode
);
1012 rtx_insn
*move
= emit_move_insn (reg
, src
);
1015 rtx note
= find_reg_note (insn
, REG_INC
, NULL_RTX
);
1017 add_reg_note (move
, REG_INC
, XEXP (note
, 0));
1021 emit_move_insn (reg
, src
);
1026 /* If DEST is a register which we can't decompose, or has side
1027 effects, we need to first move to a temporary register. We
1028 handle the common case of pushing an operand directly. We also
1029 go through a temporary register if it holds a floating point
1030 value. This gives us better code on systems which can't move
1031 data easily between integer and floating point registers. */
1033 dest_mode
= orig_mode
;
1034 pushing
= push_operand (dest
, dest_mode
);
1035 if (!can_decompose_p (dest
)
1036 || (side_effects_p (dest
) && !pushing
)
1037 || (!SCALAR_INT_MODE_P (dest_mode
)
1038 && !resolve_reg_p (dest
)
1039 && !resolve_subreg_p (dest
)))
1041 if (real_dest
== NULL_RTX
)
1043 if (!SCALAR_INT_MODE_P (dest_mode
))
1044 dest_mode
= int_mode_for_mode (dest_mode
).require ();
1045 dest
= gen_reg_rtx (dest_mode
);
1046 if (REG_P (real_dest
))
1047 REG_ATTRS (dest
) = REG_ATTRS (real_dest
);
1052 unsigned int i
, j
, jinc
;
1054 gcc_assert (orig_size
% UNITS_PER_WORD
== 0);
1055 gcc_assert (GET_CODE (XEXP (dest
, 0)) != PRE_MODIFY
);
1056 gcc_assert (GET_CODE (XEXP (dest
, 0)) != POST_MODIFY
);
1058 if (WORDS_BIG_ENDIAN
== STACK_GROWS_DOWNWARD
)
1069 for (i
= 0; i
< words
; ++i
, j
+= jinc
)
1073 temp
= copy_rtx (XEXP (dest
, 0));
1074 temp
= adjust_automodify_address_nv (dest
, word_mode
, temp
,
1075 j
* UNITS_PER_WORD
);
1076 emit_move_insn (temp
,
1077 simplify_gen_subreg_concatn (word_mode
, src
,
1079 j
* UNITS_PER_WORD
));
1086 if (REG_P (dest
) && !HARD_REGISTER_NUM_P (REGNO (dest
)))
1087 emit_clobber (dest
);
1089 for (i
= 0; i
< words
; ++i
)
1091 rtx t
= simplify_gen_subreg_concatn (word_mode
, dest
,
1093 i
* UNITS_PER_WORD
);
1094 /* simplify_gen_subreg_concatn can return (const_int 0) for
1095 some sub-objects of paradoxical subregs. As a source operand,
1096 that's fine. As a destination it must be avoided. Those are
1097 supposed to be don't care bits, so we can just drop that store
1099 if (t
!= CONST0_RTX (word_mode
))
1101 simplify_gen_subreg_concatn (word_mode
, src
,
1103 i
* UNITS_PER_WORD
));
1107 if (real_dest
!= NULL_RTX
)
1112 if (dest_mode
== orig_mode
)
1115 mdest
= simplify_gen_subreg (orig_mode
, dest
, GET_MODE (dest
), 0);
1116 minsn
= emit_move_insn (real_dest
, mdest
);
1118 if (AUTO_INC_DEC
&& MEM_P (real_dest
)
1119 && !(resolve_reg_p (real_dest
) || resolve_subreg_p (real_dest
)))
1121 rtx note
= find_reg_note (insn
, REG_INC
, NULL_RTX
);
1123 add_reg_note (minsn
, REG_INC
, XEXP (note
, 0));
1126 smove
= single_set (minsn
);
1127 gcc_assert (smove
!= NULL_RTX
);
1129 resolve_simple_move (smove
, minsn
);
1132 insns
= get_insns ();
1135 copy_reg_eh_region_note_forward (insn
, insns
, NULL_RTX
);
1137 emit_insn_before (insns
, insn
);
1139 /* If we get here via self-recursion, then INSN is not yet in the insns
1140 chain and delete_insn will fail. We only want to remove INSN from the
1141 current sequence. See PR56738. */
1142 if (in_sequence_p ())
1150 /* Change a CLOBBER of a decomposed register into a CLOBBER of the
1151 component registers. Return whether we changed something. */
1154 resolve_clobber (rtx pat
, rtx_insn
*insn
)
1157 machine_mode orig_mode
;
1158 unsigned int orig_size
, words
, i
;
1161 reg
= XEXP (pat
, 0);
1162 /* For clobbers we can look through paradoxical subregs which
1163 we do not handle in simplify_gen_subreg_concatn. */
1164 if (paradoxical_subreg_p (reg
))
1165 reg
= SUBREG_REG (reg
);
1166 if (!resolve_reg_p (reg
) && !resolve_subreg_p (reg
))
1169 orig_mode
= GET_MODE (reg
);
1170 if (!interesting_mode_p (orig_mode
, &orig_size
, &words
))
1173 ret
= validate_change (NULL_RTX
, &XEXP (pat
, 0),
1174 simplify_gen_subreg_concatn (word_mode
, reg
,
1177 df_insn_rescan (insn
);
1178 gcc_assert (ret
!= 0);
1180 for (i
= words
- 1; i
> 0; --i
)
1184 x
= simplify_gen_subreg_concatn (word_mode
, reg
, orig_mode
,
1185 i
* UNITS_PER_WORD
);
1186 x
= gen_rtx_CLOBBER (VOIDmode
, x
);
1187 emit_insn_after (x
, insn
);
1190 resolve_reg_notes (insn
);
1195 /* A USE of a decomposed register is no longer meaningful. Return
1196 whether we changed something. */
1199 resolve_use (rtx pat
, rtx_insn
*insn
)
1201 if (resolve_reg_p (XEXP (pat
, 0)) || resolve_subreg_p (XEXP (pat
, 0)))
1207 resolve_reg_notes (insn
);
1212 /* A VAR_LOCATION can be simplified. */
1215 resolve_debug (rtx_insn
*insn
)
1217 subrtx_ptr_iterator::array_type array
;
1218 FOR_EACH_SUBRTX_PTR (iter
, array
, &PATTERN (insn
), NONCONST
)
1222 if (resolve_subreg_p (x
))
1224 x
= simplify_subreg_concatn (GET_MODE (x
), SUBREG_REG (x
),
1230 x
= copy_rtx (*loc
);
1232 if (resolve_reg_p (x
))
1233 *loc
= copy_rtx (x
);
1236 df_insn_rescan (insn
);
1238 resolve_reg_notes (insn
);
1241 /* Check if INSN is a decomposable multiword-shift or zero-extend and
1242 set the decomposable_context bitmap accordingly. SPEED_P is true
1243 if we are optimizing INSN for speed rather than size. Return true
1244 if INSN is decomposable. */
1247 find_decomposable_shift_zext (rtx_insn
*insn
, bool speed_p
)
1253 set
= single_set (insn
);
1258 if (GET_CODE (op
) != ASHIFT
1259 && GET_CODE (op
) != LSHIFTRT
1260 && GET_CODE (op
) != ASHIFTRT
1261 && GET_CODE (op
) != ZERO_EXTEND
)
1264 op_operand
= XEXP (op
, 0);
1265 if (!REG_P (SET_DEST (set
)) || !REG_P (op_operand
)
1266 || HARD_REGISTER_NUM_P (REGNO (SET_DEST (set
)))
1267 || HARD_REGISTER_NUM_P (REGNO (op_operand
))
1268 || GET_MODE (op
) != twice_word_mode
)
1271 if (GET_CODE (op
) == ZERO_EXTEND
)
1273 if (GET_MODE (op_operand
) != word_mode
1274 || !choices
[speed_p
].splitting_zext
)
1277 else /* left or right shift */
1279 bool *splitting
= (GET_CODE (op
) == ASHIFT
1280 ? choices
[speed_p
].splitting_ashift
1281 : GET_CODE (op
) == ASHIFTRT
1282 ? choices
[speed_p
].splitting_ashiftrt
1283 : choices
[speed_p
].splitting_lshiftrt
);
1284 if (!CONST_INT_P (XEXP (op
, 1))
1285 || !IN_RANGE (INTVAL (XEXP (op
, 1)), BITS_PER_WORD
,
1286 2 * BITS_PER_WORD
- 1)
1287 || !splitting
[INTVAL (XEXP (op
, 1)) - BITS_PER_WORD
])
1290 bitmap_set_bit (decomposable_context
, REGNO (op_operand
));
1293 bitmap_set_bit (decomposable_context
, REGNO (SET_DEST (set
)));
1298 /* Decompose a more than word wide shift (in INSN) of a multiword
1299 pseudo or a multiword zero-extend of a wordmode pseudo into a move
1300 and 'set to zero' insn. Return a pointer to the new insn when a
1301 replacement was done. */
1304 resolve_shift_zext (rtx_insn
*insn
)
1310 rtx src_reg
, dest_reg
, dest_upper
, upper_src
= NULL_RTX
;
1311 int src_reg_num
, dest_reg_num
, offset1
, offset2
, src_offset
;
1312 scalar_int_mode inner_mode
;
1314 set
= single_set (insn
);
1319 if (GET_CODE (op
) != ASHIFT
1320 && GET_CODE (op
) != LSHIFTRT
1321 && GET_CODE (op
) != ASHIFTRT
1322 && GET_CODE (op
) != ZERO_EXTEND
)
1325 op_operand
= XEXP (op
, 0);
1326 if (!is_a
<scalar_int_mode
> (GET_MODE (op_operand
), &inner_mode
))
1329 /* We can tear this operation apart only if the regs were already
1331 if (!resolve_reg_p (SET_DEST (set
)) && !resolve_reg_p (op_operand
))
1334 /* src_reg_num is the number of the word mode register which we
1335 are operating on. For a left shift and a zero_extend on little
1336 endian machines this is register 0. */
1337 src_reg_num
= (GET_CODE (op
) == LSHIFTRT
|| GET_CODE (op
) == ASHIFTRT
)
1340 if (WORDS_BIG_ENDIAN
&& GET_MODE_SIZE (inner_mode
) > UNITS_PER_WORD
)
1341 src_reg_num
= 1 - src_reg_num
;
1343 if (GET_CODE (op
) == ZERO_EXTEND
)
1344 dest_reg_num
= WORDS_BIG_ENDIAN
? 1 : 0;
1346 dest_reg_num
= 1 - src_reg_num
;
1348 offset1
= UNITS_PER_WORD
* dest_reg_num
;
1349 offset2
= UNITS_PER_WORD
* (1 - dest_reg_num
);
1350 src_offset
= UNITS_PER_WORD
* src_reg_num
;
1354 dest_reg
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1355 GET_MODE (SET_DEST (set
)),
1357 dest_upper
= simplify_gen_subreg_concatn (word_mode
, SET_DEST (set
),
1358 GET_MODE (SET_DEST (set
)),
1360 src_reg
= simplify_gen_subreg_concatn (word_mode
, op_operand
,
1361 GET_MODE (op_operand
),
1363 if (GET_CODE (op
) == ASHIFTRT
1364 && INTVAL (XEXP (op
, 1)) != 2 * BITS_PER_WORD
- 1)
1365 upper_src
= expand_shift (RSHIFT_EXPR
, word_mode
, copy_rtx (src_reg
),
1366 BITS_PER_WORD
- 1, NULL_RTX
, 0);
1368 if (GET_CODE (op
) != ZERO_EXTEND
)
1370 int shift_count
= INTVAL (XEXP (op
, 1));
1371 if (shift_count
> BITS_PER_WORD
)
1372 src_reg
= expand_shift (GET_CODE (op
) == ASHIFT
?
1373 LSHIFT_EXPR
: RSHIFT_EXPR
,
1375 shift_count
- BITS_PER_WORD
,
1376 dest_reg
, GET_CODE (op
) != ASHIFTRT
);
1379 if (dest_reg
!= src_reg
)
1380 emit_move_insn (dest_reg
, src_reg
);
1381 if (GET_CODE (op
) != ASHIFTRT
)
1382 emit_move_insn (dest_upper
, CONST0_RTX (word_mode
));
1383 else if (INTVAL (XEXP (op
, 1)) == 2 * BITS_PER_WORD
- 1)
1384 emit_move_insn (dest_upper
, copy_rtx (src_reg
));
1386 emit_move_insn (dest_upper
, upper_src
);
1387 insns
= get_insns ();
1391 emit_insn_before (insns
, insn
);
1396 fprintf (dump_file
, "; Replacing insn: %d with insns: ", INSN_UID (insn
));
1397 for (in
= insns
; in
!= insn
; in
= NEXT_INSN (in
))
1398 fprintf (dump_file
, "%d ", INSN_UID (in
));
1399 fprintf (dump_file
, "\n");
1406 /* Print to dump_file a description of what we're doing with shift code CODE.
1407 SPLITTING[X] is true if we are splitting shifts by X + BITS_PER_WORD. */
1410 dump_shift_choices (enum rtx_code code
, bool *splitting
)
1416 " Splitting mode %s for %s lowering with shift amounts = ",
1417 GET_MODE_NAME (twice_word_mode
), GET_RTX_NAME (code
));
1419 for (i
= 0; i
< BITS_PER_WORD
; i
++)
1422 fprintf (dump_file
, "%s%d", sep
, i
+ BITS_PER_WORD
);
1425 fprintf (dump_file
, "\n");
1428 /* Print to dump_file a description of what we're doing when optimizing
1429 for speed or size; SPEED_P says which. DESCRIPTION is a description
1430 of the SPEED_P choice. */
1433 dump_choices (bool speed_p
, const char *description
)
1435 unsigned int size
, factor
, i
;
1437 fprintf (dump_file
, "Choices when optimizing for %s:\n", description
);
1439 for (i
= 0; i
< MAX_MACHINE_MODE
; i
++)
1440 if (interesting_mode_p ((machine_mode
) i
, &size
, &factor
)
1442 fprintf (dump_file
, " %s mode %s for copy lowering.\n",
1443 choices
[speed_p
].move_modes_to_split
[i
]
1446 GET_MODE_NAME ((machine_mode
) i
));
1448 fprintf (dump_file
, " %s mode %s for zero_extend lowering.\n",
1449 choices
[speed_p
].splitting_zext
? "Splitting" : "Skipping",
1450 GET_MODE_NAME (twice_word_mode
));
1452 dump_shift_choices (ASHIFT
, choices
[speed_p
].splitting_ashift
);
1453 dump_shift_choices (LSHIFTRT
, choices
[speed_p
].splitting_lshiftrt
);
1454 dump_shift_choices (ASHIFTRT
, choices
[speed_p
].splitting_ashiftrt
);
1455 fprintf (dump_file
, "\n");
1458 /* Look for registers which are always accessed via word-sized SUBREGs
1459 or -if DECOMPOSE_COPIES is true- via copies. Decompose these
1460 registers into several word-sized pseudo-registers. */
1463 decompose_multiword_subregs (bool decompose_copies
)
1471 dump_choices (false, "size");
1472 dump_choices (true, "speed");
1475 /* Check if this target even has any modes to consider lowering. */
1476 if (!choices
[false].something_to_do
&& !choices
[true].something_to_do
)
1479 fprintf (dump_file
, "Nothing to do!\n");
1483 max
= max_reg_num ();
1485 /* First see if there are any multi-word pseudo-registers. If there
1486 aren't, there is nothing we can do. This should speed up this
1487 pass in the normal case, since it should be faster than scanning
1491 bool useful_modes_seen
= false;
1493 for (i
= FIRST_PSEUDO_REGISTER
; i
< max
; ++i
)
1494 if (regno_reg_rtx
[i
] != NULL
)
1496 machine_mode mode
= GET_MODE (regno_reg_rtx
[i
]);
1497 if (choices
[false].move_modes_to_split
[(int) mode
]
1498 || choices
[true].move_modes_to_split
[(int) mode
])
1500 useful_modes_seen
= true;
1505 if (!useful_modes_seen
)
1508 fprintf (dump_file
, "Nothing to lower in this function.\n");
1515 df_set_flags (DF_DEFER_INSN_RESCAN
);
1519 /* FIXME: It may be possible to change this code to look for each
1520 multi-word pseudo-register and to find each insn which sets or
1521 uses that register. That should be faster than scanning all the
1524 decomposable_context
= BITMAP_ALLOC (NULL
);
1525 non_decomposable_context
= BITMAP_ALLOC (NULL
);
1526 subreg_context
= BITMAP_ALLOC (NULL
);
1528 reg_copy_graph
.create (max
);
1529 reg_copy_graph
.safe_grow_cleared (max
, true);
1530 memset (reg_copy_graph
.address (), 0, sizeof (bitmap
) * max
);
1532 speed_p
= optimize_function_for_speed_p (cfun
);
1533 FOR_EACH_BB_FN (bb
, cfun
)
1537 FOR_BB_INSNS (bb
, insn
)
1540 enum classify_move_insn cmi
;
1544 || GET_CODE (PATTERN (insn
)) == CLOBBER
1545 || GET_CODE (PATTERN (insn
)) == USE
)
1548 recog_memoized (insn
);
1550 if (find_decomposable_shift_zext (insn
, speed_p
))
1553 extract_insn (insn
);
1555 set
= simple_move (insn
, speed_p
);
1558 cmi
= NOT_SIMPLE_MOVE
;
1561 /* We mark pseudo-to-pseudo copies as decomposable during the
1562 second pass only. The first pass is so early that there is
1563 good chance such moves will be optimized away completely by
1564 subsequent optimizations anyway.
1566 However, we call find_pseudo_copy even during the first pass
1567 so as to properly set up the reg_copy_graph. */
1568 if (find_pseudo_copy (set
))
1569 cmi
= decompose_copies
? DECOMPOSABLE_SIMPLE_MOVE
: SIMPLE_MOVE
;
1574 n
= recog_data
.n_operands
;
1575 for (i
= 0; i
< n
; ++i
)
1577 find_decomposable_subregs (&recog_data
.operand
[i
], &cmi
);
1579 /* We handle ASM_OPERANDS as a special case to support
1580 things like x86 rdtsc which returns a DImode value.
1581 We can decompose the output, which will certainly be
1582 operand 0, but not the inputs. */
1584 if (cmi
== SIMPLE_MOVE
1585 && GET_CODE (SET_SRC (set
)) == ASM_OPERANDS
)
1587 gcc_assert (i
== 0);
1588 cmi
= NOT_SIMPLE_MOVE
;
1594 bitmap_and_compl_into (decomposable_context
, non_decomposable_context
);
1595 if (!bitmap_empty_p (decomposable_context
))
1598 sbitmap_iterator sbi
;
1599 bitmap_iterator iter
;
1602 propagate_pseudo_copies ();
1604 auto_sbitmap
sub_blocks (last_basic_block_for_fn (cfun
));
1605 bitmap_clear (sub_blocks
);
1607 EXECUTE_IF_SET_IN_BITMAP (decomposable_context
, 0, regno
, iter
)
1608 decompose_register (regno
);
1610 FOR_EACH_BB_FN (bb
, cfun
)
1614 FOR_BB_INSNS (bb
, insn
)
1621 pat
= PATTERN (insn
);
1622 if (GET_CODE (pat
) == CLOBBER
)
1623 resolve_clobber (pat
, insn
);
1624 else if (GET_CODE (pat
) == USE
)
1625 resolve_use (pat
, insn
);
1626 else if (DEBUG_INSN_P (insn
))
1627 resolve_debug (insn
);
1633 recog_memoized (insn
);
1634 extract_insn (insn
);
1636 set
= simple_move (insn
, speed_p
);
1639 rtx_insn
*orig_insn
= insn
;
1640 bool cfi
= control_flow_insn_p (insn
);
1642 /* We can end up splitting loads to multi-word pseudos
1643 into separate loads to machine word size pseudos.
1644 When this happens, we first had one load that can
1645 throw, and after resolve_simple_move we'll have a
1646 bunch of loads (at least two). All those loads may
1647 trap if we can have non-call exceptions, so they
1648 all will end the current basic block. We split the
1649 block after the outer loop over all insns, but we
1650 make sure here that we will be able to split the
1651 basic block and still produce the correct control
1652 flow graph for it. */
1654 || (cfun
->can_throw_non_call_exceptions
1655 && can_throw_internal (insn
)));
1657 insn
= resolve_simple_move (set
, insn
);
1658 if (insn
!= orig_insn
)
1660 recog_memoized (insn
);
1661 extract_insn (insn
);
1664 bitmap_set_bit (sub_blocks
, bb
->index
);
1669 rtx_insn
*decomposed_shift
;
1671 decomposed_shift
= resolve_shift_zext (insn
);
1672 if (decomposed_shift
!= NULL_RTX
)
1674 insn
= decomposed_shift
;
1675 recog_memoized (insn
);
1676 extract_insn (insn
);
1680 for (i
= recog_data
.n_operands
- 1; i
>= 0; --i
)
1681 resolve_subreg_use (recog_data
.operand_loc
[i
], insn
);
1683 resolve_reg_notes (insn
);
1685 if (num_validated_changes () > 0)
1687 for (i
= recog_data
.n_dups
- 1; i
>= 0; --i
)
1689 rtx
*pl
= recog_data
.dup_loc
[i
];
1690 int dup_num
= recog_data
.dup_num
[i
];
1691 rtx
*px
= recog_data
.operand_loc
[dup_num
];
1693 validate_unshare_change (insn
, pl
, *px
, 1);
1696 i
= apply_change_group ();
1703 /* If we had insns to split that caused control flow insns in the middle
1704 of a basic block, split those blocks now. Note that we only handle
1705 the case where splitting a load has caused multiple possibly trapping
1707 EXECUTE_IF_SET_IN_BITMAP (sub_blocks
, 0, i
, sbi
)
1709 rtx_insn
*insn
, *end
;
1712 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
1713 insn
= BB_HEAD (bb
);
1718 if (control_flow_insn_p (insn
))
1720 /* Split the block after insn. There will be a fallthru
1721 edge, which is OK so we keep it. We have to create the
1722 exception edges ourselves. */
1723 fallthru
= split_block (bb
, insn
);
1724 rtl_make_eh_edge (NULL
, bb
, BB_END (bb
));
1725 bb
= fallthru
->dest
;
1726 insn
= BB_HEAD (bb
);
1729 insn
= NEXT_INSN (insn
);
1738 FOR_EACH_VEC_ELT (reg_copy_graph
, i
, b
)
1743 reg_copy_graph
.release ();
1745 BITMAP_FREE (decomposable_context
);
1746 BITMAP_FREE (non_decomposable_context
);
1747 BITMAP_FREE (subreg_context
);
1750 /* Implement first lower subreg pass. */
1754 const pass_data pass_data_lower_subreg
=
1756 RTL_PASS
, /* type */
1757 "subreg1", /* name */
1758 OPTGROUP_NONE
, /* optinfo_flags */
1759 TV_LOWER_SUBREG
, /* tv_id */
1760 0, /* properties_required */
1761 0, /* properties_provided */
1762 0, /* properties_destroyed */
1763 0, /* todo_flags_start */
1764 0, /* todo_flags_finish */
1767 class pass_lower_subreg
: public rtl_opt_pass
1770 pass_lower_subreg (gcc::context
*ctxt
)
1771 : rtl_opt_pass (pass_data_lower_subreg
, ctxt
)
1774 /* opt_pass methods: */
1775 virtual bool gate (function
*) { return flag_split_wide_types
!= 0; }
1776 virtual unsigned int execute (function
*)
1778 decompose_multiword_subregs (false);
1782 }; // class pass_lower_subreg
1787 make_pass_lower_subreg (gcc::context
*ctxt
)
1789 return new pass_lower_subreg (ctxt
);
1792 /* Implement second lower subreg pass. */
1796 const pass_data pass_data_lower_subreg2
=
1798 RTL_PASS
, /* type */
1799 "subreg2", /* name */
1800 OPTGROUP_NONE
, /* optinfo_flags */
1801 TV_LOWER_SUBREG
, /* tv_id */
1802 0, /* properties_required */
1803 0, /* properties_provided */
1804 0, /* properties_destroyed */
1805 0, /* todo_flags_start */
1806 TODO_df_finish
, /* todo_flags_finish */
1809 class pass_lower_subreg2
: public rtl_opt_pass
1812 pass_lower_subreg2 (gcc::context
*ctxt
)
1813 : rtl_opt_pass (pass_data_lower_subreg2
, ctxt
)
1816 /* opt_pass methods: */
1817 virtual bool gate (function
*) { return flag_split_wide_types
1818 && flag_split_wide_types_early
; }
1819 virtual unsigned int execute (function
*)
1821 decompose_multiword_subregs (true);
1825 }; // class pass_lower_subreg2
1830 make_pass_lower_subreg2 (gcc::context
*ctxt
)
1832 return new pass_lower_subreg2 (ctxt
);
1835 /* Implement third lower subreg pass. */
1839 const pass_data pass_data_lower_subreg3
=
1841 RTL_PASS
, /* type */
1842 "subreg3", /* name */
1843 OPTGROUP_NONE
, /* optinfo_flags */
1844 TV_LOWER_SUBREG
, /* tv_id */
1845 0, /* properties_required */
1846 0, /* properties_provided */
1847 0, /* properties_destroyed */
1848 0, /* todo_flags_start */
1849 TODO_df_finish
, /* todo_flags_finish */
1852 class pass_lower_subreg3
: public rtl_opt_pass
1855 pass_lower_subreg3 (gcc::context
*ctxt
)
1856 : rtl_opt_pass (pass_data_lower_subreg3
, ctxt
)
1859 /* opt_pass methods: */
1860 virtual bool gate (function
*) { return flag_split_wide_types
; }
1861 virtual unsigned int execute (function
*)
1863 decompose_multiword_subregs (true);
1867 }; // class pass_lower_subreg3
1872 make_pass_lower_subreg3 (gcc::context
*ctxt
)
1874 return new pass_lower_subreg3 (ctxt
);