2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "hard-reg-set.h"
29 #include "insn-config.h"
38 #include "dominance.h"
43 #include "cfgcleanup.h"
44 #include "basic-block.h"
46 #include "tree-pass.h"
50 /* We want target macros for the mode switching code to be able to refer
51 to instruction attribute values. */
52 #include "insn-attr.h"
54 #ifdef OPTIMIZE_MODE_SWITCHING
56 /* The algorithm for setting the modes consists of scanning the insn list
57 and finding all the insns which require a specific mode. Each insn gets
58 a unique struct seginfo element. These structures are inserted into a list
59 for each basic block. For each entity, there is an array of bb_info over
60 the flow graph basic blocks (local var 'bb_info'), which contains a list
61 of all insns within that basic block, in the order they are encountered.
63 For each entity, any basic block WITHOUT any insns requiring a specific
64 mode are given a single entry without a mode (each basic block in the
65 flow graph must have at least one entry in the segment table).
67 The LCM algorithm is then run over the flow graph to determine where to
68 place the sets to the highest-priority mode with respect to the first
69 insn in any one block. Any adjustments required to the transparency
70 vectors are made, then the next iteration starts for the next-lower
71 priority mode, till for each entity all modes are exhausted.
73 More details can be found in the code of optimize_mode_switching. */
75 /* This structure contains the information for each insn which requires
76 either single or double mode to be set.
77 MODE is the mode this insn must be executed in.
78 INSN_PTR is the insn to be executed (may be the note that marks the
79 beginning of a basic block).
80 BBNUM is the flow graph basic block this insn occurs in.
81 NEXT is the next insn in the same basic block. */
88 HARD_REG_SET regs_live
;
93 struct seginfo
*seginfo
;
99 static struct seginfo
* new_seginfo (int, rtx_insn
*, int, HARD_REG_SET
);
100 static void add_seginfo (struct bb_info
*, struct seginfo
*);
101 static void reg_dies (rtx
, HARD_REG_SET
*);
102 static void reg_becomes_live (rtx
, const_rtx
, void *);
104 /* Clear ode I from entity J in bitmap B. */
105 #define clear_mode_bit(b, j, i) \
106 bitmap_clear_bit (b, (j * max_num_modes) + i)
108 /* Test mode I from entity J in bitmap B. */
109 #define mode_bit_p(b, j, i) \
110 bitmap_bit_p (b, (j * max_num_modes) + i)
112 /* Set mode I from entity J in bitmal B. */
113 #define set_mode_bit(b, j, i) \
114 bitmap_set_bit (b, (j * max_num_modes) + i)
116 /* Emit modes segments from EDGE_LIST associated with entity E.
117 INFO gives mode availability for each mode. */
120 commit_mode_sets (struct edge_list
*edge_list
, int e
, struct bb_info
*info
)
122 bool need_commit
= false;
124 for (int ed
= NUM_EDGES (edge_list
) - 1; ed
>= 0; ed
--)
126 edge eg
= INDEX_EDGE (edge_list
, ed
);
129 if ((mode
= (int)(intptr_t)(eg
->aux
)) != -1)
131 HARD_REG_SET live_at_edge
;
132 basic_block src_bb
= eg
->src
;
133 int cur_mode
= info
[src_bb
->index
].mode_out
;
136 REG_SET_TO_HARD_REG_SET (live_at_edge
, df_get_live_out (src_bb
));
138 rtl_profile_for_edge (eg
);
141 targetm
.mode_switching
.emit (e
, mode
, cur_mode
, live_at_edge
);
143 mode_set
= get_insns ();
145 default_rtl_profile ();
147 /* Do not bother to insert empty sequence. */
148 if (mode_set
== NULL_RTX
)
151 /* We should not get an abnormal edge here. */
152 gcc_assert (! (eg
->flags
& EDGE_ABNORMAL
));
155 insert_insn_on_edge (mode_set
, eg
);
162 /* Allocate a new BBINFO structure, initialized with the MODE, INSN,
163 and basic block BB parameters.
164 INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
165 basic block; that allows us later to insert instructions in a FIFO-like
168 static struct seginfo
*
169 new_seginfo (int mode
, rtx_insn
*insn
, int bb
, HARD_REG_SET regs_live
)
173 gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn
)
174 || insn
== BB_END (NOTE_BASIC_BLOCK (insn
)));
175 ptr
= XNEW (struct seginfo
);
177 ptr
->insn_ptr
= insn
;
180 COPY_HARD_REG_SET (ptr
->regs_live
, regs_live
);
184 /* Add a seginfo element to the end of a list.
185 HEAD is a pointer to the list beginning.
186 INFO is the structure to be linked in. */
189 add_seginfo (struct bb_info
*head
, struct seginfo
*info
)
193 if (head
->seginfo
== NULL
)
194 head
->seginfo
= info
;
198 while (ptr
->next
!= NULL
)
204 /* Record in LIVE that register REG died. */
207 reg_dies (rtx reg
, HARD_REG_SET
*live
)
215 if (regno
< FIRST_PSEUDO_REGISTER
)
216 remove_from_hard_reg_set (live
, GET_MODE (reg
), regno
);
219 /* Record in LIVE that register REG became live.
220 This is called via note_stores. */
223 reg_becomes_live (rtx reg
, const_rtx setter ATTRIBUTE_UNUSED
, void *live
)
227 if (GET_CODE (reg
) == SUBREG
)
228 reg
= SUBREG_REG (reg
);
234 if (regno
< FIRST_PSEUDO_REGISTER
)
235 add_to_hard_reg_set ((HARD_REG_SET
*) live
, GET_MODE (reg
), regno
);
238 /* Split the fallthrough edge to the exit block, so that we can note
239 that there NORMAL_MODE is required. Return the new block if it's
240 inserted before the exit block. Otherwise return null. */
243 create_pre_exit (int n_entities
, int *entity_map
, const int *num_modes
)
247 basic_block pre_exit
;
249 /* The only non-call predecessor at this stage is a block with a
250 fallthrough edge; there can be at most one, but there could be
251 none at all, e.g. when exit is called. */
253 FOR_EACH_EDGE (eg
, ei
, EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
)
254 if (eg
->flags
& EDGE_FALLTHRU
)
256 basic_block src_bb
= eg
->src
;
260 gcc_assert (!pre_exit
);
261 /* If this function returns a value at the end, we have to
262 insert the final mode switch before the return value copy
263 to its hard register. */
264 if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) == 1
265 && NONJUMP_INSN_P ((last_insn
= BB_END (src_bb
)))
266 && GET_CODE (PATTERN (last_insn
)) == USE
267 && GET_CODE ((ret_reg
= XEXP (PATTERN (last_insn
), 0))) == REG
)
269 int ret_start
= REGNO (ret_reg
);
270 int nregs
= hard_regno_nregs
[ret_start
][GET_MODE (ret_reg
)];
271 int ret_end
= ret_start
+ nregs
;
272 bool short_block
= false;
273 bool multi_reg_return
= false;
274 bool forced_late_switch
= false;
275 rtx_insn
*before_return_copy
;
279 rtx_insn
*return_copy
= PREV_INSN (last_insn
);
280 rtx return_copy_pat
, copy_reg
;
281 int copy_start
, copy_num
;
284 if (NONDEBUG_INSN_P (return_copy
))
286 /* When using SJLJ exceptions, the call to the
287 unregister function is inserted between the
288 clobber of the return value and the copy.
289 We do not want to split the block before this
290 or any other call; if we have not found the
291 copy yet, the copy must have been deleted. */
292 if (CALL_P (return_copy
))
297 return_copy_pat
= PATTERN (return_copy
);
298 switch (GET_CODE (return_copy_pat
))
301 /* Skip USEs of multiple return registers.
302 __builtin_apply pattern is also handled here. */
303 if (GET_CODE (XEXP (return_copy_pat
, 0)) == REG
304 && (targetm
.calls
.function_value_regno_p
305 (REGNO (XEXP (return_copy_pat
, 0)))))
307 multi_reg_return
= true;
308 last_insn
= return_copy
;
314 /* Skip barrier insns. */
315 if (!MEM_VOLATILE_P (return_copy_pat
))
321 case UNSPEC_VOLATILE
:
322 last_insn
= return_copy
;
329 /* If the return register is not (in its entirety)
330 likely spilled, the return copy might be
331 partially or completely optimized away. */
332 return_copy_pat
= single_set (return_copy
);
333 if (!return_copy_pat
)
335 return_copy_pat
= PATTERN (return_copy
);
336 if (GET_CODE (return_copy_pat
) != CLOBBER
)
340 /* This might be (clobber (reg [<result>]))
341 when not optimizing. Then check if
342 the previous insn is the clobber for
343 the return register. */
344 copy_reg
= SET_DEST (return_copy_pat
);
345 if (GET_CODE (copy_reg
) == REG
346 && !HARD_REGISTER_NUM_P (REGNO (copy_reg
)))
348 if (INSN_P (PREV_INSN (return_copy
)))
350 return_copy
= PREV_INSN (return_copy
);
351 return_copy_pat
= PATTERN (return_copy
);
352 if (GET_CODE (return_copy_pat
) != CLOBBER
)
358 copy_reg
= SET_DEST (return_copy_pat
);
359 if (GET_CODE (copy_reg
) == REG
)
360 copy_start
= REGNO (copy_reg
);
361 else if (GET_CODE (copy_reg
) == SUBREG
362 && GET_CODE (SUBREG_REG (copy_reg
)) == REG
)
363 copy_start
= REGNO (SUBREG_REG (copy_reg
));
366 /* When control reaches end of non-void function,
367 there are no return copy insns at all. This
368 avoids an ice on that invalid function. */
369 if (ret_start
+ nregs
== ret_end
)
373 if (!targetm
.calls
.function_value_regno_p (copy_start
))
377 = hard_regno_nregs
[copy_start
][GET_MODE (copy_reg
)];
379 /* If the return register is not likely spilled, - as is
380 the case for floating point on SH4 - then it might
381 be set by an arithmetic operation that needs a
382 different mode than the exit block. */
383 for (j
= n_entities
- 1; j
>= 0; j
--)
385 int e
= entity_map
[j
];
387 targetm
.mode_switching
.needed (e
, return_copy
);
389 if (mode
!= num_modes
[e
]
390 && mode
!= targetm
.mode_switching
.exit (e
))
395 /* __builtin_return emits a sequence of loads to all
396 return registers. One of them might require
397 another mode than MODE_EXIT, even if it is
398 unrelated to the return value, so we want to put
399 the final mode switch after it. */
401 && targetm
.calls
.function_value_regno_p
403 forced_late_switch
= true;
405 /* For the SH4, floating point loads depend on fpscr,
406 thus we might need to put the final mode switch
407 after the return value copy. That is still OK,
408 because a floating point return value does not
409 conflict with address reloads. */
410 if (copy_start
>= ret_start
411 && copy_start
+ copy_num
<= ret_end
412 && OBJECT_P (SET_SRC (return_copy_pat
)))
413 forced_late_switch
= true;
418 last_insn
= return_copy
;
422 if (copy_start
>= ret_start
423 && copy_start
+ copy_num
<= ret_end
)
425 else if (!multi_reg_return
426 || !targetm
.calls
.function_value_regno_p
429 last_insn
= return_copy
;
431 /* ??? Exception handling can lead to the return value
432 copy being already separated from the return value use,
434 Similarly, conditionally returning without a value,
435 and conditionally using builtin_return can lead to an
437 if (return_copy
== BB_HEAD (src_bb
))
442 last_insn
= return_copy
;
446 /* If we didn't see a full return value copy, verify that there
447 is a plausible reason for this. If some, but not all of the
448 return register is likely spilled, we can expect that there
449 is a copy for the likely spilled part. */
451 || forced_late_switch
453 || !(targetm
.class_likely_spilled_p
454 (REGNO_REG_CLASS (ret_start
)))
456 != hard_regno_nregs
[ret_start
][GET_MODE (ret_reg
)])
457 /* For multi-hard-register floating point
458 values, sometimes the likely-spilled part
459 is ordinarily copied first, then the other
460 part is set with an arithmetic operation.
461 This doesn't actually cause reload
462 failures, so let it pass. */
463 || (GET_MODE_CLASS (GET_MODE (ret_reg
)) != MODE_INT
466 if (!NOTE_INSN_BASIC_BLOCK_P (last_insn
))
469 = emit_note_before (NOTE_INSN_DELETED
, last_insn
);
470 /* Instructions preceding LAST_INSN in the same block might
471 require a different mode than MODE_EXIT, so if we might
472 have such instructions, keep them in a separate block
474 src_bb
= split_block (src_bb
,
475 PREV_INSN (before_return_copy
))->dest
;
478 before_return_copy
= last_insn
;
479 pre_exit
= split_block (src_bb
, before_return_copy
)->src
;
483 pre_exit
= split_edge (eg
);
490 /* Find all insns that need a particular mode setting, and insert the
491 necessary mode switches. Return true if we did work. */
494 optimize_mode_switching (void)
498 bool need_commit
= false;
499 static const int num_modes
[] = NUM_MODES_FOR_MODE_SWITCHING
;
500 #define N_ENTITIES ARRAY_SIZE (num_modes)
501 int entity_map
[N_ENTITIES
];
502 struct bb_info
*bb_info
[N_ENTITIES
];
505 int max_num_modes
= 0;
506 bool emitted ATTRIBUTE_UNUSED
= false;
507 basic_block post_entry
= 0;
508 basic_block pre_exit
= 0;
509 struct edge_list
*edge_list
= 0;
511 /* These bitmaps are used for the LCM algorithm. */
512 sbitmap
*kill
, *del
, *insert
, *antic
, *transp
, *comp
;
513 sbitmap
*avin
, *avout
;
515 for (e
= N_ENTITIES
- 1; e
>= 0; e
--)
516 if (OPTIMIZE_MODE_SWITCHING (e
))
518 int entry_exit_extra
= 0;
520 /* Create the list of segments within each basic block.
521 If NORMAL_MODE is defined, allow for two extra
522 blocks split from the entry and exit block. */
523 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
524 entry_exit_extra
= 3;
527 = XCNEWVEC (struct bb_info
,
528 last_basic_block_for_fn (cfun
) + entry_exit_extra
);
529 entity_map
[n_entities
++] = e
;
530 if (num_modes
[e
] > max_num_modes
)
531 max_num_modes
= num_modes
[e
];
537 /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */
538 gcc_assert ((targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
539 || (!targetm
.mode_switching
.entry
540 && !targetm
.mode_switching
.exit
));
542 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
544 /* Split the edge from the entry block, so that we can note that
545 there NORMAL_MODE is supplied. */
546 post_entry
= split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
)));
547 pre_exit
= create_pre_exit (n_entities
, entity_map
, num_modes
);
552 /* Create the bitmap vectors. */
553 antic
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
554 n_entities
* max_num_modes
);
555 transp
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
556 n_entities
* max_num_modes
);
557 comp
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
558 n_entities
* max_num_modes
);
559 avin
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
560 n_entities
* max_num_modes
);
561 avout
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
562 n_entities
* max_num_modes
);
563 kill
= sbitmap_vector_alloc (last_basic_block_for_fn (cfun
),
564 n_entities
* max_num_modes
);
566 bitmap_vector_ones (transp
, last_basic_block_for_fn (cfun
));
567 bitmap_vector_clear (antic
, last_basic_block_for_fn (cfun
));
568 bitmap_vector_clear (comp
, last_basic_block_for_fn (cfun
));
570 for (j
= n_entities
- 1; j
>= 0; j
--)
572 int e
= entity_map
[j
];
573 int no_mode
= num_modes
[e
];
574 struct bb_info
*info
= bb_info
[j
];
577 /* Determine what the first use (if any) need for a mode of entity E is.
578 This will be the mode that is anticipatable for this block.
579 Also compute the initial transparency settings. */
580 FOR_EACH_BB_FN (bb
, cfun
)
583 int last_mode
= no_mode
;
584 bool any_set_required
= false;
585 HARD_REG_SET live_now
;
587 info
[bb
->index
].mode_out
= info
[bb
->index
].mode_in
= no_mode
;
589 REG_SET_TO_HARD_REG_SET (live_now
, df_get_live_in (bb
));
591 /* Pretend the mode is clobbered across abnormal edges. */
595 FOR_EACH_EDGE (eg
, ei
, bb
->preds
)
596 if (eg
->flags
& EDGE_COMPLEX
)
600 rtx_insn
*ins_pos
= BB_HEAD (bb
);
601 if (LABEL_P (ins_pos
))
602 ins_pos
= NEXT_INSN (ins_pos
);
603 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos
));
604 if (ins_pos
!= BB_END (bb
))
605 ins_pos
= NEXT_INSN (ins_pos
);
606 ptr
= new_seginfo (no_mode
, ins_pos
, bb
->index
, live_now
);
607 add_seginfo (info
+ bb
->index
, ptr
);
608 for (i
= 0; i
< no_mode
; i
++)
609 clear_mode_bit (transp
[bb
->index
], j
, i
);
613 FOR_BB_INSNS (bb
, insn
)
617 int mode
= targetm
.mode_switching
.needed (e
, insn
);
620 if (mode
!= no_mode
&& mode
!= last_mode
)
622 any_set_required
= true;
624 ptr
= new_seginfo (mode
, insn
, bb
->index
, live_now
);
625 add_seginfo (info
+ bb
->index
, ptr
);
626 for (i
= 0; i
< no_mode
; i
++)
627 clear_mode_bit (transp
[bb
->index
], j
, i
);
630 if (targetm
.mode_switching
.after
)
631 last_mode
= targetm
.mode_switching
.after (e
, last_mode
,
634 /* Update LIVE_NOW. */
635 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
636 if (REG_NOTE_KIND (link
) == REG_DEAD
)
637 reg_dies (XEXP (link
, 0), &live_now
);
639 note_stores (PATTERN (insn
), reg_becomes_live
, &live_now
);
640 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
641 if (REG_NOTE_KIND (link
) == REG_UNUSED
)
642 reg_dies (XEXP (link
, 0), &live_now
);
646 info
[bb
->index
].computing
= last_mode
;
647 /* Check for blocks without ANY mode requirements.
648 N.B. because of MODE_AFTER, last_mode might still
649 be different from no_mode, in which case we need to
650 mark the block as nontransparent. */
651 if (!any_set_required
)
653 ptr
= new_seginfo (no_mode
, BB_END (bb
), bb
->index
, live_now
);
654 add_seginfo (info
+ bb
->index
, ptr
);
655 if (last_mode
!= no_mode
)
656 for (i
= 0; i
< no_mode
; i
++)
657 clear_mode_bit (transp
[bb
->index
], j
, i
);
660 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
662 int mode
= targetm
.mode_switching
.entry (e
);
664 info
[post_entry
->index
].mode_out
=
665 info
[post_entry
->index
].mode_in
= no_mode
;
668 info
[pre_exit
->index
].mode_out
=
669 info
[pre_exit
->index
].mode_in
= no_mode
;
676 /* By always making this nontransparent, we save
677 an extra check in make_preds_opaque. We also
678 need this to avoid confusing pre_edge_lcm when
679 antic is cleared but transp and comp are set. */
680 for (i
= 0; i
< no_mode
; i
++)
681 clear_mode_bit (transp
[bb
->index
], j
, i
);
683 /* Insert a fake computing definition of MODE into entry
684 blocks which compute no mode. This represents the mode on
686 info
[bb
->index
].computing
= mode
;
689 info
[pre_exit
->index
].seginfo
->mode
=
690 targetm
.mode_switching
.exit (e
);
694 /* Set the anticipatable and computing arrays. */
695 for (i
= 0; i
< no_mode
; i
++)
697 int m
= targetm
.mode_switching
.priority (entity_map
[j
], i
);
699 FOR_EACH_BB_FN (bb
, cfun
)
701 if (info
[bb
->index
].seginfo
->mode
== m
)
702 set_mode_bit (antic
[bb
->index
], j
, m
);
704 if (info
[bb
->index
].computing
== m
)
705 set_mode_bit (comp
[bb
->index
], j
, m
);
710 /* Calculate the optimal locations for the
711 placement mode switches to modes with priority I. */
713 FOR_EACH_BB_FN (bb
, cfun
)
714 bitmap_not (kill
[bb
->index
], transp
[bb
->index
]);
716 edge_list
= pre_edge_lcm_avs (n_entities
* max_num_modes
, transp
, comp
, antic
,
717 kill
, avin
, avout
, &insert
, &del
);
719 for (j
= n_entities
- 1; j
>= 0; j
--)
721 int no_mode
= num_modes
[entity_map
[j
]];
723 /* Insert all mode sets that have been inserted by lcm. */
725 for (int ed
= NUM_EDGES (edge_list
) - 1; ed
>= 0; ed
--)
727 edge eg
= INDEX_EDGE (edge_list
, ed
);
729 eg
->aux
= (void *)(intptr_t)-1;
731 for (i
= 0; i
< no_mode
; i
++)
733 int m
= targetm
.mode_switching
.priority (entity_map
[j
], i
);
734 if (mode_bit_p (insert
[ed
], j
, m
))
736 eg
->aux
= (void *)(intptr_t)m
;
742 FOR_EACH_BB_FN (bb
, cfun
)
744 struct bb_info
*info
= bb_info
[j
];
745 int last_mode
= no_mode
;
747 /* intialize mode in availability for bb. */
748 for (i
= 0; i
< no_mode
; i
++)
749 if (mode_bit_p (avout
[bb
->index
], j
, i
))
751 if (last_mode
== no_mode
)
759 info
[bb
->index
].mode_out
= last_mode
;
761 /* intialize mode out availability for bb. */
763 for (i
= 0; i
< no_mode
; i
++)
764 if (mode_bit_p (avin
[bb
->index
], j
, i
))
766 if (last_mode
== no_mode
)
774 info
[bb
->index
].mode_in
= last_mode
;
776 for (i
= 0; i
< no_mode
; i
++)
777 if (mode_bit_p (del
[bb
->index
], j
, i
))
778 info
[bb
->index
].seginfo
->mode
= no_mode
;
781 /* Now output the remaining mode sets in all the segments. */
783 /* In case there was no mode inserted. the mode information on the edge
784 might not be complete.
785 Update mode info on edges and commit pending mode sets. */
786 need_commit
|= commit_mode_sets (edge_list
, entity_map
[j
], bb_info
[j
]);
788 /* Reset modes for next entity. */
789 clear_aux_for_edges ();
791 FOR_EACH_BB_FN (bb
, cfun
)
793 struct seginfo
*ptr
, *next
;
794 int cur_mode
= bb_info
[j
][bb
->index
].mode_in
;
796 for (ptr
= bb_info
[j
][bb
->index
].seginfo
; ptr
; ptr
= next
)
799 if (ptr
->mode
!= no_mode
)
803 rtl_profile_for_bb (bb
);
806 targetm
.mode_switching
.emit (entity_map
[j
], ptr
->mode
,
807 cur_mode
, ptr
->regs_live
);
808 mode_set
= get_insns ();
811 /* modes kill each other inside a basic block. */
812 cur_mode
= ptr
->mode
;
814 /* Insert MODE_SET only if it is nonempty. */
815 if (mode_set
!= NULL_RTX
)
818 if (NOTE_INSN_BASIC_BLOCK_P (ptr
->insn_ptr
))
819 /* We need to emit the insns in a FIFO-like manner,
820 i.e. the first to be emitted at our insertion
821 point ends up first in the instruction steam.
822 Because we made sure that NOTE_INSN_BASIC_BLOCK is
823 only used for initially empty basic blocks, we
824 can achieve this by appending at the end of
827 (mode_set
, BB_END (NOTE_BASIC_BLOCK (ptr
->insn_ptr
)));
829 emit_insn_before (mode_set
, ptr
->insn_ptr
);
832 default_rtl_profile ();
842 free_edge_list (edge_list
);
844 /* Finished. Free up all the things we've allocated. */
845 sbitmap_vector_free (del
);
846 sbitmap_vector_free (insert
);
847 sbitmap_vector_free (kill
);
848 sbitmap_vector_free (antic
);
849 sbitmap_vector_free (transp
);
850 sbitmap_vector_free (comp
);
851 sbitmap_vector_free (avin
);
852 sbitmap_vector_free (avout
);
855 commit_edge_insertions ();
857 if (targetm
.mode_switching
.entry
&& targetm
.mode_switching
.exit
)
858 cleanup_cfg (CLEANUP_NO_INSN_DEL
);
859 else if (!need_commit
&& !emitted
)
865 #endif /* OPTIMIZE_MODE_SWITCHING */
869 const pass_data pass_data_mode_switching
=
872 "mode_sw", /* name */
873 OPTGROUP_NONE
, /* optinfo_flags */
874 TV_MODE_SWITCH
, /* tv_id */
875 0, /* properties_required */
876 0, /* properties_provided */
877 0, /* properties_destroyed */
878 0, /* todo_flags_start */
879 TODO_df_finish
, /* todo_flags_finish */
882 class pass_mode_switching
: public rtl_opt_pass
885 pass_mode_switching (gcc::context
*ctxt
)
886 : rtl_opt_pass (pass_data_mode_switching
, ctxt
)
889 /* opt_pass methods: */
890 /* The epiphany backend creates a second instance of this pass, so we need
892 opt_pass
* clone () { return new pass_mode_switching (m_ctxt
); }
893 virtual bool gate (function
*)
895 #ifdef OPTIMIZE_MODE_SWITCHING
902 virtual unsigned int execute (function
*)
904 #ifdef OPTIMIZE_MODE_SWITCHING
905 optimize_mode_switching ();
906 #endif /* OPTIMIZE_MODE_SWITCHING */
910 }; // class pass_mode_switching
915 make_pass_mode_switching (gcc::context
*ctxt
)
917 return new pass_mode_switching (ctxt
);