2 Copyright (C) 1998-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
35 #include "cfgcleanup.h"
36 #include "tree-pass.h"
38 /* We want target macros for the mode switching code to be able to refer
39 to instruction attribute values. */
40 #include "insn-attr.h"
42 #ifdef OPTIMIZE_MODE_SWITCHING
44 /* The algorithm for setting the modes consists of scanning the insn list
45 and finding all the insns which require a specific mode. Each insn gets
46 a unique struct seginfo element. These structures are inserted into a list
47 for each basic block. For each entity, there is an array of bb_info over
48 the flow graph basic blocks (local var 'bb_info'), which contains a list
49 of all insns within that basic block, in the order they are encountered.
51 For each entity, any basic block WITHOUT any insns requiring a specific
52 mode are given a single entry without a mode (each basic block in the
53 flow graph must have at least one entry in the segment table).
55 The LCM algorithm is then run over the flow graph to determine where to
56 place the sets to the highest-priority mode with respect to the first
57 insn in any one block. Any adjustments required to the transparency
58 vectors are made, then the next iteration starts for the next-lower
59 priority mode, till for each entity all modes are exhausted.
61 More details can be found in the code of optimize_mode_switching. */
63 /* This structure contains the information for each insn which requires
64 either single or double mode to be set.
65 MODE is the mode this insn must be executed in.
66 INSN_PTR is the insn to be executed (may be the note that marks the
67 beginning of a basic block).
68 NEXT is the next insn in the same basic block. */
75 HARD_REG_SET regs_live
;
80 struct seginfo
*seginfo
;
87 /* Clear ode I from entity J in bitmap B. */
88 #define clear_mode_bit(b, j, i) \
89 bitmap_clear_bit (b, (j * max_num_modes) + i)
91 /* Test mode I from entity J in bitmap B. */
92 #define mode_bit_p(b, j, i) \
93 bitmap_bit_p (b, (j * max_num_modes) + i)
95 /* Set mode I from entity J in bitmal B. */
96 #define set_mode_bit(b, j, i) \
97 bitmap_set_bit (b, (j * max_num_modes) + i)
99 /* Emit modes segments from EDGE_LIST associated with entity E.
100 INFO gives mode availability for each mode. */
103 commit_mode_sets (struct edge_list
*edge_list
, int e
, struct bb_info
*info
)
105 bool need_commit
= false;
107 for (int ed
= NUM_EDGES (edge_list
) - 1; ed
>= 0; ed
--)
109 edge eg
= INDEX_EDGE (edge_list
, ed
);
113 int mode
= (int) (intptr_t) eg
->aux
- 1;
114 HARD_REG_SET live_at_edge
;
115 basic_block src_bb
= eg
->src
;
116 int cur_mode
= info
[src_bb
->index
].mode_out
;
119 REG_SET_TO_HARD_REG_SET (live_at_edge
, df_get_live_out (src_bb
));
121 rtl_profile_for_edge (eg
);
124 targetm
.mode_switching
.emit (e
, mode
, cur_mode
, live_at_edge
);
126 mode_set
= get_insns ();
128 default_rtl_profile ();
130 /* Do not bother to insert empty sequence. */
131 if (mode_set
== NULL
)
134 /* We should not get an abnormal edge here. */
135 gcc_assert (! (eg
->flags
& EDGE_ABNORMAL
));
138 insert_insn_on_edge (mode_set
, eg
);
145 /* Allocate a new BBINFO structure, initialized with the PREV_MODE, MODE,
146 INSN, and REGS_LIVE parameters.
147 INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
148 basic block; that allows us later to insert instructions in a FIFO-like
151 static struct seginfo
*
152 new_seginfo (int prev_mode
, int mode
, rtx_insn
*insn
,
153 const HARD_REG_SET
®s_live
)
157 gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn
)
158 || insn
== BB_END (NOTE_BASIC_BLOCK (insn
)));
159 ptr
= XNEW (struct seginfo
);
160 ptr
->prev_mode
= prev_mode
;
162 ptr
->insn_ptr
= insn
;
164 ptr
->regs_live
= regs_live
;
168 /* Add a seginfo element to the end of a list.
169 TAIL is a pointer to the list's null terminator.
170 INFO is the structure to be linked in. */
173 add_seginfo (struct seginfo
***tail_ptr
, struct seginfo
*info
)
176 *tail_ptr
= &info
->next
;
179 /* Record in LIVE that register REG died. */
182 reg_dies (rtx reg
, HARD_REG_SET
*live
)
190 if (regno
< FIRST_PSEUDO_REGISTER
)
191 remove_from_hard_reg_set (live
, GET_MODE (reg
), regno
);
194 /* Record in LIVE that register REG became live.
195 This is called via note_stores. */
198 reg_becomes_live (rtx reg
, const_rtx setter ATTRIBUTE_UNUSED
, void *live
)
202 if (GET_CODE (reg
) == SUBREG
)
203 reg
= SUBREG_REG (reg
);
209 if (regno
< FIRST_PSEUDO_REGISTER
)
210 add_to_hard_reg_set ((HARD_REG_SET
*) live
, GET_MODE (reg
), regno
);
213 /* Split the fallthrough edge to the exit block, so that we can note
214 that there NORMAL_MODE is required. Return the new block if it's
215 inserted before the exit block. Otherwise return null. */
218 create_pre_exit (int n_entities
, int *entity_map
, const int *num_modes
)
222 basic_block pre_exit
;
224 /* The only non-call predecessor at this stage is a block with a
225 fallthrough edge; there can be at most one, but there could be
226 none at all, e.g. when exit is called. */
228 FOR_EACH_EDGE (eg
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
229 if (eg
->flags
& EDGE_FALLTHRU
)
231 basic_block src_bb
= eg
->src
;
235 gcc_assert (!pre_exit
);
236 /* If this function returns a value at the end, we have to
237 insert the final mode switch before the return value copy
238 to its hard register.
240 x86 targets use mode-switching infrastructure to
241 conditionally insert vzeroupper instruction at the exit
242 from the function where there is no need to switch the
243 mode before the return value copy. The vzeroupper insertion
244 pass runs after reload, so use !reload_completed as a stand-in
245 for x86 to skip the search for the return value copy insn.
247 N.b.: the code below assumes that the return copy insn
248 immediately precedes its corresponding use insn. This
249 assumption does not hold after reload, since sched1 pass
250 can schedule the return copy insn away from its
251 corresponding use insn. */
252 if (!reload_completed
253 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) == 1
254 && NONJUMP_INSN_P ((last_insn
= BB_END (src_bb
)))
255 && GET_CODE (PATTERN (last_insn
)) == USE
256 && GET_CODE ((ret_reg
= XEXP (PATTERN (last_insn
), 0))) == REG
)
259 df_simulate_initialize_backwards (src_bb
, live
);
261 int ret_start
= REGNO (ret_reg
);
262 int nregs
= REG_NREGS (ret_reg
);
263 int ret_end
= ret_start
+ nregs
;
264 bool short_block
= false;
265 bool multi_reg_return
= false;
266 bool forced_late_switch
= false;
267 rtx_insn
*before_return_copy
;
269 df_simulate_one_insn_backwards (src_bb
, last_insn
, live
);
273 rtx_insn
*return_copy
= PREV_INSN (last_insn
);
274 rtx return_copy_pat
, copy_reg
;
275 int copy_start
, copy_num
;
278 df_simulate_one_insn_backwards (src_bb
, return_copy
, live
);
280 if (NONDEBUG_INSN_P (return_copy
))
282 /* When using SJLJ exceptions, the call to the
283 unregister function is inserted between the
284 clobber of the return value and the copy.
285 We do not want to split the block before this
286 or any other call; if we have not found the
287 copy yet, the copy must have been deleted. */
288 if (CALL_P (return_copy
))
293 return_copy_pat
= PATTERN (return_copy
);
294 switch (GET_CODE (return_copy_pat
))
297 /* Skip USEs of multiple return registers.
298 __builtin_apply pattern is also handled here. */
299 if (GET_CODE (XEXP (return_copy_pat
, 0)) == REG
300 && (targetm
.calls
.function_value_regno_p
301 (REGNO (XEXP (return_copy_pat
, 0)))))
303 multi_reg_return
= true;
304 last_insn
= return_copy
;
310 /* Skip barrier insns. */
311 if (!MEM_VOLATILE_P (return_copy_pat
))
317 case UNSPEC_VOLATILE
:
318 last_insn
= return_copy
;
325 /* If the return register is not (in its entirety)
326 likely spilled, the return copy might be
327 partially or completely optimized away. */
328 return_copy_pat
= single_set (return_copy
);
329 if (!return_copy_pat
)
331 return_copy_pat
= PATTERN (return_copy
);
332 if (GET_CODE (return_copy_pat
) != CLOBBER
)
336 /* This might be (clobber (reg [<result>]))
337 when not optimizing. Then check if
338 the previous insn is the clobber for
339 the return register. */
340 copy_reg
= SET_DEST (return_copy_pat
);
341 if (GET_CODE (copy_reg
) == REG
342 && !HARD_REGISTER_NUM_P (REGNO (copy_reg
)))
344 if (INSN_P (PREV_INSN (return_copy
)))
346 return_copy
= PREV_INSN (return_copy
);
347 return_copy_pat
= PATTERN (return_copy
);
348 if (GET_CODE (return_copy_pat
) != CLOBBER
)
354 copy_reg
= SET_DEST (return_copy_pat
);
355 if (GET_CODE (copy_reg
) == REG
)
356 copy_start
= REGNO (copy_reg
);
357 else if (GET_CODE (copy_reg
) == SUBREG
358 && GET_CODE (SUBREG_REG (copy_reg
)) == REG
)
359 copy_start
= REGNO (SUBREG_REG (copy_reg
));
362 /* When control reaches end of non-void function,
363 there are no return copy insns at all. This
364 avoids an ice on that invalid function. */
365 if (ret_start
+ nregs
== ret_end
)
369 if (!targetm
.calls
.function_value_regno_p (copy_start
))
372 copy_num
= hard_regno_nregs (copy_start
,
373 GET_MODE (copy_reg
));
375 /* If the return register is not likely spilled, - as is
376 the case for floating point on SH4 - then it might
377 be set by an arithmetic operation that needs a
378 different mode than the exit block. */
379 HARD_REG_SET hard_regs_live
;
380 REG_SET_TO_HARD_REG_SET (hard_regs_live
, live
);
381 for (j
= n_entities
- 1; j
>= 0; j
--)
383 int e
= entity_map
[j
];
385 targetm
.mode_switching
.needed (e
, return_copy
,
388 if (mode
!= num_modes
[e
]
389 && mode
!= targetm
.mode_switching
.exit (e
))
394 /* __builtin_return emits a sequence of loads to all
395 return registers. One of them might require
396 another mode than MODE_EXIT, even if it is
397 unrelated to the return value, so we want to put
398 the final mode switch after it. */
400 && targetm
.calls
.function_value_regno_p
402 forced_late_switch
= true;
404 /* For the SH4, floating point loads depend on fpscr,
405 thus we might need to put the final mode switch
406 after the return value copy. That is still OK,
407 because a floating point return value does not
408 conflict with address reloads. */
409 if (copy_start
>= ret_start
410 && copy_start
+ copy_num
<= ret_end
411 && GET_CODE (return_copy_pat
) == SET
412 && OBJECT_P (SET_SRC (return_copy_pat
)))
413 forced_late_switch
= true;
418 last_insn
= return_copy
;
422 if (copy_start
>= ret_start
423 && copy_start
+ copy_num
<= ret_end
)
425 else if (!multi_reg_return
426 || !targetm
.calls
.function_value_regno_p
429 last_insn
= return_copy
;
431 /* ??? Exception handling can lead to the return value
432 copy being already separated from the return value use,
434 Similarly, conditionally returning without a value,
435 and conditionally using builtin_return can lead to an
437 if (return_copy
== BB_HEAD (src_bb
))
442 last_insn
= return_copy
;
446 /* If we didn't see a full return value copy, verify that there
447 is a plausible reason for this. If some, but not all of the
448 return register is likely spilled, we can expect that there
449 is a copy for the likely spilled part. */
451 || forced_late_switch
453 || !(targetm
.class_likely_spilled_p
454 (REGNO_REG_CLASS (ret_start
)))
455 || nregs
!= REG_NREGS (ret_reg
)
456 /* For multi-hard-register floating point
457 values, sometimes the likely-spilled part
458 is ordinarily copied first, then the other
459 part is set with an arithmetic operation.
460 This doesn't actually cause reload
461 failures, so let it pass. */
462 || (GET_MODE_CLASS (GET_MODE (ret_reg
)) != MODE_INT
465 if (!NOTE_INSN_BASIC_BLOCK_P (last_insn
))
468 = emit_note_before (NOTE_INSN_DELETED
, last_insn
);
469 /* Instructions preceding LAST_INSN in the same block might
470 require a different mode than MODE_EXIT, so if we might
471 have such instructions, keep them in a separate block
473 src_bb
= split_block (src_bb
,
474 PREV_INSN (before_return_copy
))->dest
;
477 before_return_copy
= last_insn
;
478 pre_exit
= split_block (src_bb
, before_return_copy
)->src
;
482 pre_exit
= split_edge (eg
);
489 /* Return the confluence of modes MODE1 and MODE2 for entity ENTITY,
490 using NO_MODE to represent an unknown mode if nothing more precise
494 mode_confluence (int entity
, int mode1
, int mode2
, int no_mode
)
501 && targetm
.mode_switching
.confluence
)
502 return targetm
.mode_switching
.confluence (entity
, mode1
, mode2
);
507 /* Information for the dataflow problems below. */
510 /* Information about each basic block, indexed by block id. */
511 struct bb_info
*bb_info
;
513 /* A bitmap of blocks for which the current entity is transparent. */
516 /* The entity that we're processing. */
519 /* The number of modes defined for the entity, and thus the identifier
520 of the "don't know" mode. */
524 /* Propagate information about any mode change on edge E to the
525 destination block's mode_in. Return true if something changed.
527 The mode_in and mode_out fields use no_mode + 1 to mean "not yet set". */
530 forward_confluence_n (edge e
)
532 /* The entry and exit blocks have no useful mode information. */
533 if (e
->src
->index
== ENTRY_BLOCK
|| e
->dest
->index
== EXIT_BLOCK
)
536 /* We don't control mode changes across abnormal edges. */
537 if (e
->flags
& EDGE_ABNORMAL
)
540 /* E->aux is nonzero if we have computed the LCM problem and scheduled
541 E to change the mode to E->aux - 1. Otherwise model the change
542 from the source to the destination. */
543 struct bb_info
*bb_info
= confluence_info
.bb_info
;
544 int no_mode
= confluence_info
.no_mode
;
545 int src_mode
= bb_info
[e
->src
->index
].mode_out
;
547 src_mode
= (int) (intptr_t) e
->aux
- 1;
548 if (src_mode
== no_mode
+ 1)
551 int dest_mode
= bb_info
[e
->dest
->index
].mode_in
;
552 if (dest_mode
== no_mode
+ 1)
554 bb_info
[e
->dest
->index
].mode_in
= src_mode
;
558 int entity
= confluence_info
.entity
;
559 int new_mode
= mode_confluence (entity
, src_mode
, dest_mode
, no_mode
);
560 if (dest_mode
== new_mode
)
563 bb_info
[e
->dest
->index
].mode_in
= new_mode
;
567 /* Update block BB_INDEX's mode_out based on its mode_in. Return true if
568 something changed. */
571 forward_transfer (int bb_index
)
573 /* The entry and exit blocks have no useful mode information. */
574 if (bb_index
== ENTRY_BLOCK
|| bb_index
== EXIT_BLOCK
)
577 /* Only propagate through a block if the entity is transparent. */
578 struct bb_info
*bb_info
= confluence_info
.bb_info
;
579 if (bb_info
[bb_index
].computing
!= confluence_info
.no_mode
580 || bb_info
[bb_index
].mode_out
== bb_info
[bb_index
].mode_in
)
583 bb_info
[bb_index
].mode_out
= bb_info
[bb_index
].mode_in
;
587 /* A backwards confluence function. Update the the bb_info single_succ
588 field for E's source block, based on changes to E's destination block.
589 At the end of the dataflow problem, single_succ is the single mode
590 that all successors require (directly or indirectly), or no_mode
591 if there are conflicting requirements.
593 Initially, a value of no_mode + 1 means "don't know". */
596 single_succ_confluence_n (edge e
)
598 /* The entry block has no associated mode information. */
599 if (e
->src
->index
== ENTRY_BLOCK
)
602 /* We don't control mode changes across abnormal edges. */
603 if (e
->flags
& EDGE_ABNORMAL
)
606 /* Do nothing if we've already found a conflict. */
607 struct bb_info
*bb_info
= confluence_info
.bb_info
;
608 int no_mode
= confluence_info
.no_mode
;
609 int src_mode
= bb_info
[e
->src
->index
].single_succ
;
610 if (src_mode
== no_mode
)
613 /* Work out what mode the destination block (or its successors) require. */
615 if (e
->dest
->index
== EXIT_BLOCK
)
617 else if (bitmap_bit_p (confluence_info
.transp
, e
->dest
->index
))
618 dest_mode
= bb_info
[e
->dest
->index
].single_succ
;
620 dest_mode
= bb_info
[e
->dest
->index
].seginfo
->mode
;
622 /* Do nothing if the destination block has no new information. */
623 if (dest_mode
== no_mode
+ 1 || dest_mode
== src_mode
)
626 /* Detect conflicting modes. */
627 if (src_mode
!= no_mode
+ 1)
630 bb_info
[e
->src
->index
].single_succ
= dest_mode
;
634 /* A backward transfer function for computing the bb_info single_succ
635 fields, as described above single_succ_confluence. */
638 single_succ_transfer (int bb_index
)
640 /* We don't have any field to transfer to. Assume that, after the
641 first iteration, we are only called if single_succ has changed.
642 We should then process incoming edges if the entity is transparent. */
643 return bitmap_bit_p (confluence_info
.transp
, bb_index
);
646 /* Check whether the target wants to back-propagate a mode change across
647 edge E, and update the source block's computed mode if so. Return true
648 if something changed. */
651 backprop_confluence_n (edge e
)
653 /* The entry and exit blocks have no useful mode information. */
654 if (e
->src
->index
== ENTRY_BLOCK
|| e
->dest
->index
== EXIT_BLOCK
)
657 /* We don't control mode changes across abnormal edges. */
658 if (e
->flags
& EDGE_ABNORMAL
)
661 /* We can only require a new mode in the source block if the entity
662 was originally transparent there. */
663 if (!bitmap_bit_p (confluence_info
.transp
, e
->src
->index
))
666 /* Exit now if there is no required mode, or if all paths into the
667 source block leave the entity in the required mode. */
668 struct bb_info
*bb_info
= confluence_info
.bb_info
;
669 int no_mode
= confluence_info
.no_mode
;
670 int src_mode
= bb_info
[e
->src
->index
].mode_out
;
671 int dest_mode
= bb_info
[e
->dest
->index
].mode_in
;
672 if (dest_mode
== no_mode
|| src_mode
== dest_mode
)
675 /* See what the target thinks about this transition. */
676 int entity
= confluence_info
.entity
;
677 int new_mode
= targetm
.mode_switching
.backprop (entity
, src_mode
,
679 if (new_mode
== no_mode
)
682 /* The target doesn't like the current transition, but would be happy
683 with a transition from NEW_MODE.
685 If we force the source block to use NEW_MODE, we might introduce a
686 double transition on at least one path through the function (one to
687 NEW_MODE and then one to DEST_MODE). Therefore, if all destination
688 blocks require the same mode, it is usually better to bring that
689 mode requirement forward.
691 If that isn't possible, merge the preference for this edge with
692 the preferences for other edges. no_mode + 1 indicates that there
693 was no previous preference. */
694 int old_mode
= bb_info
[e
->src
->index
].computing
;
695 if (bb_info
[e
->src
->index
].single_succ
!= no_mode
)
696 new_mode
= bb_info
[e
->src
->index
].single_succ
;
697 else if (old_mode
!= no_mode
+ 1)
698 new_mode
= mode_confluence (entity
, old_mode
, new_mode
, no_mode
);
700 if (old_mode
== new_mode
)
703 bb_info
[e
->src
->index
].computing
= new_mode
;
707 /* If the current entity was originally transparent in block BB_INDEX,
708 update the incoming mode to match the outgoing mode. Register a mode
709 change if the entity is no longer transparent.
711 Also, as an on-the-fly optimization, check whether the entity was
712 originally transparent in BB_INDEX and if all successor blocks require
713 the same mode. If so, anticipate the mode change in BB_INDEX if
714 doing it on the incoming edges would require no more mode changes than
715 doing it on the outgoing edges. The aim is to reduce the total number
716 of mode changes emitted for the function (and thus reduce code size and
717 cfg complexity) without increasing the number of mode changes on any
718 given path through the function. A typical case where it helps is:
726 where the entity is transparent in the T blocks and is required to have
727 mode M in the M blocks. If there are no redundancies leading up to this,
728 there will be two mutually-exclusive changes to mode M, one on each of
729 the T->M edges. The optimization instead converts it to:
737 which creates a single transition to M for both paths through the diamond.
739 Return true if something changed. */
742 backprop_transfer (int bb_index
)
744 /* The entry and exit blocks have no useful mode information. */
745 if (bb_index
== ENTRY_BLOCK
|| bb_index
== EXIT_BLOCK
)
748 /* We can only require a new mode if the entity was previously
750 if (!bitmap_bit_p (confluence_info
.transp
, bb_index
))
753 struct bb_info
*bb_info
= confluence_info
.bb_info
;
754 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, bb_index
);
755 int no_mode
= confluence_info
.no_mode
;
756 int mode_in
= bb_info
[bb_index
].mode_in
;
757 int mode_out
= bb_info
[bb_index
].computing
;
758 if (mode_out
== no_mode
+ 1)
760 /* The entity is still transparent for this block. See whether
761 all successor blocks need the same mode, either directly or
763 mode_out
= bb_info
[bb_index
].single_succ
;
764 if (mode_out
== no_mode
)
767 /* Get a minimum bound on the number of transitions that would be
768 removed if BB itself required MODE_OUT. */
769 unsigned int moved
= 0;
770 for (edge e
: bb
->succs
)
771 if (e
->dest
->index
!= EXIT_BLOCK
772 && mode_out
== bb_info
[e
->dest
->index
].seginfo
->mode
)
775 /* See whether making the mode change on all incoming edges would
776 be no worse than making it on MOVED outgoing edges. */
777 if (moved
< EDGE_COUNT (bb
->preds
))
780 bb_info
[bb_index
].mode_out
= mode_out
;
781 bb_info
[bb_index
].computing
= mode_out
;
783 else if (mode_out
== mode_in
)
786 bb_info
[bb_index
].mode_in
= mode_out
;
787 bb_info
[bb_index
].seginfo
->mode
= mode_out
;
791 /* Find all insns that need a particular mode setting, and insert the
792 necessary mode switches. Return true if we did work. */
795 optimize_mode_switching (void)
799 bool need_commit
= false;
800 static const int num_modes
[] = NUM_MODES_FOR_MODE_SWITCHING
;
801 #define N_ENTITIES ARRAY_SIZE (num_modes)
802 int entity_map
[N_ENTITIES
] = {};
803 struct bb_info
*bb_info
[N_ENTITIES
] = {};
806 int max_num_modes
= 0;
807 bool emitted ATTRIBUTE_UNUSED
= false;
808 basic_block post_entry
= 0;
809 basic_block pre_exit
= 0;
810 struct edge_list
*edge_list
= 0;
812 /* These bitmaps are used for the LCM algorithm. */
813 sbitmap
*kill
, *del
, *insert
, *antic
, *transp
, *comp
;
814 sbitmap
*avin
, *avout
;
816 for (e
= N_ENTITIES
- 1; e
>= 0; e
--)
817 if (OPTIMIZE_MODE_SWITCHING (e
))
819 int entry_exit_extra
= 0;
821 /* Create the list of segments within each basic block.
822 If NORMAL_MODE is defined, allow for two extra
823 blocks split from the entry and exit block. */
824 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
825 entry_exit_extra
= 3;
828 = XCNEWVEC (struct bb_info
,
829 last_basic_block_for_fn (cfun
) + entry_exit_extra
);
830 entity_map
[n_entities
++] = e
;
831 if (num_modes
[e
] > max_num_modes
)
832 max_num_modes
= num_modes
[e
];
838 /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */
839 gcc_assert ((targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
840 || (!targetm
.mode_switching
.entry
841 && !targetm
.mode_switching
.exit
));
843 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
845 /* Split the edge from the entry block, so that we can note that
846 there NORMAL_MODE is supplied. */
847 post_entry
= split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
848 pre_exit
= create_pre_exit (n_entities
, entity_map
, num_modes
);
851 df_note_add_problem ();
854 /* Create the bitmap vectors. */
855 antic
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
856 n_entities
* max_num_modes
);
857 transp
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
858 n_entities
* max_num_modes
);
859 comp
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
860 n_entities
* max_num_modes
);
861 avin
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
862 n_entities
* max_num_modes
);
863 avout
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
864 n_entities
* max_num_modes
);
865 kill
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
866 n_entities
* max_num_modes
);
868 bitmap_vector_ones (transp
, last_basic_block_for_fn (cfun
));
869 bitmap_vector_clear (antic
, last_basic_block_for_fn (cfun
));
870 bitmap_vector_clear (comp
, last_basic_block_for_fn (cfun
));
872 auto_sbitmap
transp_all (last_basic_block_for_fn (cfun
));
876 /* Forward-propagate mode information through blocks where the entity
877 is transparent, so that mode_in describes the mode on entry to each
878 block and mode_out describes the mode on exit from each block. */
879 auto forwprop_mode_info
= [&](struct bb_info
*info
,
880 int entity
, int no_mode
)
882 /* Use no_mode + 1 to mean "not yet set". */
883 FOR_EACH_BB_FN (bb
, cfun
)
885 if (bb_has_abnormal_pred (bb
))
886 info
[bb
->index
].mode_in
= info
[bb
->index
].seginfo
->mode
;
888 info
[bb
->index
].mode_in
= no_mode
+ 1;
889 if (info
[bb
->index
].computing
!= no_mode
)
890 info
[bb
->index
].mode_out
= info
[bb
->index
].computing
;
892 info
[bb
->index
].mode_out
= no_mode
+ 1;
895 confluence_info
.bb_info
= info
;
896 confluence_info
.transp
= nullptr;
897 confluence_info
.entity
= entity
;
898 confluence_info
.no_mode
= no_mode
;
900 bitmap_set_range (blocks
, 0, last_basic_block_for_fn (cfun
));
901 df_simple_dataflow (DF_FORWARD
, NULL
, NULL
, forward_confluence_n
,
902 forward_transfer
, blocks
,
903 df_get_postorder (DF_FORWARD
),
904 df_get_n_blocks (DF_FORWARD
));
908 if (targetm
.mode_switching
.backprop
)
909 clear_aux_for_edges ();
911 for (j
= n_entities
- 1; j
>= 0; j
--)
913 int e
= entity_map
[j
];
914 int no_mode
= num_modes
[e
];
915 struct bb_info
*info
= bb_info
[j
];
918 bitmap_ones (transp_all
);
920 /* Determine what the first use (if any) need for a mode of entity E is.
921 This will be the mode that is anticipatable for this block.
922 Also compute the initial transparency settings. */
923 FOR_EACH_BB_FN (bb
, cfun
)
925 struct seginfo
**tail_ptr
= &info
[bb
->index
].seginfo
;
927 int last_mode
= no_mode
;
928 bool any_set_required
= false;
929 HARD_REG_SET live_now
;
931 info
[bb
->index
].mode_out
= info
[bb
->index
].mode_in
= no_mode
;
933 REG_SET_TO_HARD_REG_SET (live_now
, df_get_live_in (bb
));
935 /* Pretend the mode is clobbered across abnormal edges. */
939 FOR_EACH_EDGE (eg
, ei
, bb
->preds
)
940 if (eg
->flags
& EDGE_COMPLEX
)
944 rtx_insn
*ins_pos
= BB_HEAD (bb
);
945 if (LABEL_P (ins_pos
))
946 ins_pos
= NEXT_INSN (ins_pos
);
947 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos
));
948 if (ins_pos
!= BB_END (bb
))
949 ins_pos
= NEXT_INSN (ins_pos
);
950 if (bb_has_eh_pred (bb
)
951 && targetm
.mode_switching
.eh_handler
)
952 last_mode
= targetm
.mode_switching
.eh_handler (e
);
953 ptr
= new_seginfo (no_mode
, last_mode
, ins_pos
, live_now
);
954 add_seginfo (&tail_ptr
, ptr
);
955 bitmap_clear_bit (transp_all
, bb
->index
);
959 FOR_BB_INSNS (bb
, insn
)
963 int mode
= targetm
.mode_switching
.needed (e
, insn
, live_now
);
966 if (mode
!= no_mode
&& mode
!= last_mode
)
968 ptr
= new_seginfo (last_mode
, mode
, insn
, live_now
);
969 add_seginfo (&tail_ptr
, ptr
);
970 bitmap_clear_bit (transp_all
, bb
->index
);
971 any_set_required
= true;
975 /* Update LIVE_NOW. */
976 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
977 if (REG_NOTE_KIND (link
) == REG_DEAD
)
978 reg_dies (XEXP (link
, 0), &live_now
);
980 note_stores (insn
, reg_becomes_live
, &live_now
);
981 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
982 if (REG_NOTE_KIND (link
) == REG_UNUSED
)
983 reg_dies (XEXP (link
, 0), &live_now
);
985 if (targetm
.mode_switching
.after
)
986 last_mode
= targetm
.mode_switching
.after (e
, last_mode
,
991 info
[bb
->index
].computing
= last_mode
;
992 /* Check for blocks without ANY mode requirements.
993 N.B. because of MODE_AFTER, last_mode might still
994 be different from no_mode, in which case we need to
995 mark the block as nontransparent. */
996 if (!any_set_required
)
998 ptr
= new_seginfo (last_mode
, no_mode
, BB_END (bb
), live_now
);
999 add_seginfo (&tail_ptr
, ptr
);
1000 if (last_mode
!= no_mode
)
1001 bitmap_clear_bit (transp_all
, bb
->index
);
1004 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
1006 info
[post_entry
->index
].mode_out
=
1007 info
[post_entry
->index
].mode_in
= no_mode
;
1009 int mode
= targetm
.mode_switching
.entry (e
);
1010 if (mode
!= no_mode
)
1012 /* Insert a fake computing definition of MODE into entry
1013 blocks which compute no mode. This represents the mode on
1015 info
[post_entry
->index
].computing
= mode
;
1016 bitmap_clear_bit (transp_all
, post_entry
->index
);
1021 info
[pre_exit
->index
].mode_out
=
1022 info
[pre_exit
->index
].mode_in
= no_mode
;
1024 int mode
= targetm
.mode_switching
.exit (e
);
1025 if (mode
!= no_mode
)
1027 info
[pre_exit
->index
].seginfo
->mode
= mode
;
1028 bitmap_clear_bit (transp_all
, pre_exit
->index
);
1033 /* If the target requests it, back-propagate selected mode requirements
1034 through transparent blocks. */
1035 if (targetm
.mode_switching
.backprop
)
1037 /* First work out the mode on entry to and exit from each block. */
1038 forwprop_mode_info (info
, e
, no_mode
);
1040 /* Compute the single_succ fields, as described above
1041 single_succ_confluence. */
1042 FOR_EACH_BB_FN (bb
, cfun
)
1043 info
[bb
->index
].single_succ
= no_mode
+ 1;
1045 confluence_info
.transp
= transp_all
;
1046 bitmap_set_range (blocks
, 0, last_basic_block_for_fn (cfun
));
1047 df_simple_dataflow (DF_BACKWARD
, NULL
, NULL
,
1048 single_succ_confluence_n
,
1049 single_succ_transfer
, blocks
,
1050 df_get_postorder (DF_BACKWARD
),
1051 df_get_n_blocks (DF_BACKWARD
));
1053 FOR_EACH_BB_FN (bb
, cfun
)
1055 /* Repurpose mode_in as the first mode required by the block,
1056 or the output mode if none. */
1057 if (info
[bb
->index
].seginfo
->mode
!= no_mode
)
1058 info
[bb
->index
].mode_in
= info
[bb
->index
].seginfo
->mode
;
1060 /* In transparent blocks, use computing == no_mode + 1
1061 to indicate that no propagation has taken place. */
1062 if (info
[bb
->index
].computing
== no_mode
)
1063 info
[bb
->index
].computing
= no_mode
+ 1;
1066 bitmap_set_range (blocks
, 0, last_basic_block_for_fn (cfun
));
1067 df_simple_dataflow (DF_BACKWARD
, NULL
, NULL
, backprop_confluence_n
,
1068 backprop_transfer
, blocks
,
1069 df_get_postorder (DF_BACKWARD
),
1070 df_get_n_blocks (DF_BACKWARD
));
1072 /* Any block that now computes a mode is no longer transparent. */
1073 FOR_EACH_BB_FN (bb
, cfun
)
1074 if (info
[bb
->index
].computing
== no_mode
+ 1)
1075 info
[bb
->index
].computing
= no_mode
;
1076 else if (info
[bb
->index
].computing
!= no_mode
)
1077 bitmap_clear_bit (transp_all
, bb
->index
);
1080 /* Set the anticipatable and computing arrays. */
1081 for (i
= 0; i
< no_mode
; i
++)
1083 int m
= targetm
.mode_switching
.priority (entity_map
[j
], i
);
1085 FOR_EACH_BB_FN (bb
, cfun
)
1087 if (!bitmap_bit_p (transp_all
, bb
->index
))
1088 clear_mode_bit (transp
[bb
->index
], j
, m
);
1090 if (info
[bb
->index
].seginfo
->mode
== m
)
1091 set_mode_bit (antic
[bb
->index
], j
, m
);
1093 if (info
[bb
->index
].computing
== m
)
1094 set_mode_bit (comp
[bb
->index
], j
, m
);
1099 /* Calculate the optimal locations for the
1100 placement mode switches to modes with priority I. */
1102 FOR_EACH_BB_FN (bb
, cfun
)
1103 bitmap_not (kill
[bb
->index
], transp
[bb
->index
]);
1105 edge_list
= pre_edge_lcm_avs (n_entities
* max_num_modes
, transp
, comp
, antic
,
1106 kill
, avin
, avout
, &insert
, &del
);
1108 for (j
= n_entities
- 1; j
>= 0; j
--)
1110 int no_mode
= num_modes
[entity_map
[j
]];
1111 struct bb_info
*info
= bb_info
[j
];
1113 /* Insert all mode sets that have been inserted by lcm. */
1115 for (int ed
= NUM_EDGES (edge_list
) - 1; ed
>= 0; ed
--)
1117 edge eg
= INDEX_EDGE (edge_list
, ed
);
1119 eg
->aux
= (void *) (intptr_t) 0;
1121 for (i
= 0; i
< no_mode
; i
++)
1123 int m
= targetm
.mode_switching
.priority (entity_map
[j
], i
);
1124 if (mode_bit_p (insert
[ed
], j
, m
))
1126 eg
->aux
= (void *) (intptr_t) (m
+ 1);
1132 /* mode_in and mode_out can be calculated directly from avin and
1133 avout if all the modes are mutually exclusive. Use the target-
1134 provided confluence function otherwise. */
1135 if (targetm
.mode_switching
.confluence
)
1136 forwprop_mode_info (info
, entity_map
[j
], no_mode
);
1138 FOR_EACH_BB_FN (bb
, cfun
)
1140 auto modes_confluence
= [&](sbitmap
*av
)
1142 for (int i
= 0; i
< no_mode
; ++i
)
1143 if (mode_bit_p (av
[bb
->index
], j
, i
))
1145 for (int i2
= i
+ 1; i2
< no_mode
; ++i2
)
1146 if (mode_bit_p (av
[bb
->index
], j
, i2
))
1153 /* intialize mode in/out availability for bb. */
1154 if (!targetm
.mode_switching
.confluence
)
1156 info
[bb
->index
].mode_out
= modes_confluence (avout
);
1157 info
[bb
->index
].mode_in
= modes_confluence (avin
);
1160 for (i
= 0; i
< no_mode
; i
++)
1161 if (mode_bit_p (del
[bb
->index
], j
, i
))
1162 info
[bb
->index
].seginfo
->mode
= no_mode
;
1164 /* See whether the target can perform the first transition.
1165 If not, push it onto the incoming edges. The earlier backprop
1166 pass should ensure that the resulting transitions are valid. */
1167 if (targetm
.mode_switching
.backprop
)
1169 int from_mode
= info
[bb
->index
].mode_in
;
1170 int to_mode
= info
[bb
->index
].seginfo
->mode
;
1171 if (targetm
.mode_switching
.backprop (entity_map
[j
], from_mode
,
1172 to_mode
) != no_mode
)
1174 for (edge e
: bb
->preds
)
1175 e
->aux
= (void *) (intptr_t) (to_mode
+ 1);
1176 info
[bb
->index
].mode_in
= to_mode
;
1181 /* Now output the remaining mode sets in all the segments. */
1183 /* In case there was no mode inserted. the mode information on the edge
1184 might not be complete.
1185 Update mode info on edges and commit pending mode sets. */
1186 need_commit
|= commit_mode_sets (edge_list
, entity_map
[j
], bb_info
[j
]);
1188 /* Reset modes for next entity. */
1189 clear_aux_for_edges ();
1191 FOR_EACH_BB_FN (bb
, cfun
)
1193 struct seginfo
*ptr
, *next
;
1194 struct seginfo
*first
= bb_info
[j
][bb
->index
].seginfo
;
1196 for (ptr
= first
; ptr
; ptr
= next
)
1199 if (ptr
->mode
!= no_mode
)
1203 rtl_profile_for_bb (bb
);
1206 int cur_mode
= (ptr
== first
&& ptr
->prev_mode
== no_mode
1207 ? bb_info
[j
][bb
->index
].mode_in
1210 targetm
.mode_switching
.emit (entity_map
[j
], ptr
->mode
,
1211 cur_mode
, ptr
->regs_live
);
1212 mode_set
= get_insns ();
1215 /* Insert MODE_SET only if it is nonempty. */
1216 if (mode_set
!= NULL_RTX
)
1219 if (NOTE_INSN_BASIC_BLOCK_P (ptr
->insn_ptr
))
1220 /* We need to emit the insns in a FIFO-like manner,
1221 i.e. the first to be emitted at our insertion
1222 point ends up first in the instruction steam.
1223 Because we made sure that NOTE_INSN_BASIC_BLOCK is
1224 only used for initially empty basic blocks, we
1225 can achieve this by appending at the end of
1228 (mode_set
, BB_END (NOTE_BASIC_BLOCK (ptr
->insn_ptr
)));
1230 emit_insn_before (mode_set
, ptr
->insn_ptr
);
1233 default_rtl_profile ();
1243 free_edge_list (edge_list
);
1245 /* Finished. Free up all the things we've allocated. */
1246 sbitmap_vector_free (del
);
1247 sbitmap_vector_free (insert
);
1248 sbitmap_vector_free (kill
);
1249 sbitmap_vector_free (antic
);
1250 sbitmap_vector_free (transp
);
1251 sbitmap_vector_free (comp
);
1252 sbitmap_vector_free (avin
);
1253 sbitmap_vector_free (avout
);
1256 commit_edge_insertions ();
1258 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
1260 free_dominance_info (CDI_DOMINATORS
);
1261 cleanup_cfg (CLEANUP_NO_INSN_DEL
);
1263 else if (!need_commit
&& !emitted
)
1269 #endif /* OPTIMIZE_MODE_SWITCHING */
1273 const pass_data pass_data_mode_switching
=
1275 RTL_PASS
, /* type */
1276 "mode_sw", /* name */
1277 OPTGROUP_NONE
, /* optinfo_flags */
1278 TV_MODE_SWITCH
, /* tv_id */
1279 0, /* properties_required */
1280 0, /* properties_provided */
1281 0, /* properties_destroyed */
1282 0, /* todo_flags_start */
1283 TODO_df_finish
, /* todo_flags_finish */
1286 class pass_mode_switching
: public rtl_opt_pass
1289 pass_mode_switching (gcc::context
*ctxt
)
1290 : rtl_opt_pass (pass_data_mode_switching
, ctxt
)
1293 /* opt_pass methods: */
1294 /* The epiphany backend creates a second instance of this pass, so we need
1296 opt_pass
* clone () final override
{ return new pass_mode_switching (m_ctxt
); }
1297 bool gate (function
*) final override
1299 #ifdef OPTIMIZE_MODE_SWITCHING
1306 unsigned int execute (function
*) final override
1308 #ifdef OPTIMIZE_MODE_SWITCHING
1309 optimize_mode_switching ();
1310 #endif /* OPTIMIZE_MODE_SWITCHING */
1314 }; // class pass_mode_switching
1319 make_pass_mode_switching (gcc::context
*ctxt
)
1321 return new pass_mode_switching (ctxt
);