* config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to
[official-gcc.git] / gcc / mode-switching.c
blob4ffdc7e303b2863b839c7cfab9aa0952e245e033
1 /* CPU mode switching
2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "flags.h"
29 #include "insn-config.h"
30 #include "recog.h"
31 #include "predict.h"
32 #include "function.h"
33 #include "dominance.h"
34 #include "cfg.h"
35 #include "cfgrtl.h"
36 #include "cfganal.h"
37 #include "lcm.h"
38 #include "cfgcleanup.h"
39 #include "basic-block.h"
40 #include "tm_p.h"
41 #include "tree-pass.h"
42 #include "df.h"
43 #include "emit-rtl.h"
45 /* We want target macros for the mode switching code to be able to refer
46 to instruction attribute values. */
47 #include "insn-attr.h"
49 #ifdef OPTIMIZE_MODE_SWITCHING
51 /* The algorithm for setting the modes consists of scanning the insn list
52 and finding all the insns which require a specific mode. Each insn gets
53 a unique struct seginfo element. These structures are inserted into a list
54 for each basic block. For each entity, there is an array of bb_info over
55 the flow graph basic blocks (local var 'bb_info'), which contains a list
56 of all insns within that basic block, in the order they are encountered.
58 For each entity, any basic block WITHOUT any insns requiring a specific
59 mode are given a single entry without a mode (each basic block in the
60 flow graph must have at least one entry in the segment table).
62 The LCM algorithm is then run over the flow graph to determine where to
63 place the sets to the highest-priority mode with respect to the first
64 insn in any one block. Any adjustments required to the transparency
65 vectors are made, then the next iteration starts for the next-lower
66 priority mode, till for each entity all modes are exhausted.
68 More details can be found in the code of optimize_mode_switching. */
70 /* This structure contains the information for each insn which requires
71 either single or double mode to be set.
72 MODE is the mode this insn must be executed in.
73 INSN_PTR is the insn to be executed (may be the note that marks the
74 beginning of a basic block).
75 BBNUM is the flow graph basic block this insn occurs in.
76 NEXT is the next insn in the same basic block. */
77 struct seginfo
79 int mode;
80 rtx_insn *insn_ptr;
81 int bbnum;
82 struct seginfo *next;
83 HARD_REG_SET regs_live;
86 struct bb_info
88 struct seginfo *seginfo;
89 int computing;
90 int mode_out;
91 int mode_in;
94 static struct seginfo * new_seginfo (int, rtx_insn *, int, HARD_REG_SET);
95 static void add_seginfo (struct bb_info *, struct seginfo *);
96 static void reg_dies (rtx, HARD_REG_SET *);
97 static void reg_becomes_live (rtx, const_rtx, void *);
99 /* Clear ode I from entity J in bitmap B. */
100 #define clear_mode_bit(b, j, i) \
101 bitmap_clear_bit (b, (j * max_num_modes) + i)
103 /* Test mode I from entity J in bitmap B. */
104 #define mode_bit_p(b, j, i) \
105 bitmap_bit_p (b, (j * max_num_modes) + i)
107 /* Set mode I from entity J in bitmal B. */
108 #define set_mode_bit(b, j, i) \
109 bitmap_set_bit (b, (j * max_num_modes) + i)
111 /* Emit modes segments from EDGE_LIST associated with entity E.
112 INFO gives mode availability for each mode. */
114 static bool
115 commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info)
117 bool need_commit = false;
119 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
121 edge eg = INDEX_EDGE (edge_list, ed);
122 int mode;
124 if ((mode = (int)(intptr_t)(eg->aux)) != -1)
126 HARD_REG_SET live_at_edge;
127 basic_block src_bb = eg->src;
128 int cur_mode = info[src_bb->index].mode_out;
129 rtx_insn *mode_set;
131 REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
133 rtl_profile_for_edge (eg);
134 start_sequence ();
136 targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge);
138 mode_set = get_insns ();
139 end_sequence ();
140 default_rtl_profile ();
142 /* Do not bother to insert empty sequence. */
143 if (mode_set == NULL)
144 continue;
146 /* We should not get an abnormal edge here. */
147 gcc_assert (! (eg->flags & EDGE_ABNORMAL));
149 need_commit = true;
150 insert_insn_on_edge (mode_set, eg);
154 return need_commit;
157 /* Allocate a new BBINFO structure, initialized with the MODE, INSN,
158 and basic block BB parameters.
159 INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
160 basic block; that allows us later to insert instructions in a FIFO-like
161 manner. */
163 static struct seginfo *
164 new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
166 struct seginfo *ptr;
168 gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
169 || insn == BB_END (NOTE_BASIC_BLOCK (insn)));
170 ptr = XNEW (struct seginfo);
171 ptr->mode = mode;
172 ptr->insn_ptr = insn;
173 ptr->bbnum = bb;
174 ptr->next = NULL;
175 COPY_HARD_REG_SET (ptr->regs_live, regs_live);
176 return ptr;
179 /* Add a seginfo element to the end of a list.
180 HEAD is a pointer to the list beginning.
181 INFO is the structure to be linked in. */
183 static void
184 add_seginfo (struct bb_info *head, struct seginfo *info)
186 struct seginfo *ptr;
188 if (head->seginfo == NULL)
189 head->seginfo = info;
190 else
192 ptr = head->seginfo;
193 while (ptr->next != NULL)
194 ptr = ptr->next;
195 ptr->next = info;
199 /* Record in LIVE that register REG died. */
201 static void
202 reg_dies (rtx reg, HARD_REG_SET *live)
204 int regno;
206 if (!REG_P (reg))
207 return;
209 regno = REGNO (reg);
210 if (regno < FIRST_PSEUDO_REGISTER)
211 remove_from_hard_reg_set (live, GET_MODE (reg), regno);
214 /* Record in LIVE that register REG became live.
215 This is called via note_stores. */
217 static void
218 reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
220 int regno;
222 if (GET_CODE (reg) == SUBREG)
223 reg = SUBREG_REG (reg);
225 if (!REG_P (reg))
226 return;
228 regno = REGNO (reg);
229 if (regno < FIRST_PSEUDO_REGISTER)
230 add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
233 /* Split the fallthrough edge to the exit block, so that we can note
234 that there NORMAL_MODE is required. Return the new block if it's
235 inserted before the exit block. Otherwise return null. */
237 static basic_block
238 create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
240 edge eg;
241 edge_iterator ei;
242 basic_block pre_exit;
244 /* The only non-call predecessor at this stage is a block with a
245 fallthrough edge; there can be at most one, but there could be
246 none at all, e.g. when exit is called. */
247 pre_exit = 0;
248 FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
249 if (eg->flags & EDGE_FALLTHRU)
251 basic_block src_bb = eg->src;
252 rtx_insn *last_insn;
253 rtx ret_reg;
255 gcc_assert (!pre_exit);
256 /* If this function returns a value at the end, we have to
257 insert the final mode switch before the return value copy
258 to its hard register. */
259 if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
260 && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
261 && GET_CODE (PATTERN (last_insn)) == USE
262 && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
264 int ret_start = REGNO (ret_reg);
265 int nregs = REG_NREGS (ret_reg);
266 int ret_end = ret_start + nregs;
267 bool short_block = false;
268 bool multi_reg_return = false;
269 bool forced_late_switch = false;
270 rtx_insn *before_return_copy;
274 rtx_insn *return_copy = PREV_INSN (last_insn);
275 rtx return_copy_pat, copy_reg;
276 int copy_start, copy_num;
277 int j;
279 if (NONDEBUG_INSN_P (return_copy))
281 /* When using SJLJ exceptions, the call to the
282 unregister function is inserted between the
283 clobber of the return value and the copy.
284 We do not want to split the block before this
285 or any other call; if we have not found the
286 copy yet, the copy must have been deleted. */
287 if (CALL_P (return_copy))
289 short_block = true;
290 break;
292 return_copy_pat = PATTERN (return_copy);
293 switch (GET_CODE (return_copy_pat))
295 case USE:
296 /* Skip USEs of multiple return registers.
297 __builtin_apply pattern is also handled here. */
298 if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
299 && (targetm.calls.function_value_regno_p
300 (REGNO (XEXP (return_copy_pat, 0)))))
302 multi_reg_return = true;
303 last_insn = return_copy;
304 continue;
306 break;
308 case ASM_OPERANDS:
309 /* Skip barrier insns. */
310 if (!MEM_VOLATILE_P (return_copy_pat))
311 break;
313 /* Fall through. */
315 case ASM_INPUT:
316 case UNSPEC_VOLATILE:
317 last_insn = return_copy;
318 continue;
320 default:
321 break;
324 /* If the return register is not (in its entirety)
325 likely spilled, the return copy might be
326 partially or completely optimized away. */
327 return_copy_pat = single_set (return_copy);
328 if (!return_copy_pat)
330 return_copy_pat = PATTERN (return_copy);
331 if (GET_CODE (return_copy_pat) != CLOBBER)
332 break;
333 else if (!optimize)
335 /* This might be (clobber (reg [<result>]))
336 when not optimizing. Then check if
337 the previous insn is the clobber for
338 the return register. */
339 copy_reg = SET_DEST (return_copy_pat);
340 if (GET_CODE (copy_reg) == REG
341 && !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
343 if (INSN_P (PREV_INSN (return_copy)))
345 return_copy = PREV_INSN (return_copy);
346 return_copy_pat = PATTERN (return_copy);
347 if (GET_CODE (return_copy_pat) != CLOBBER)
348 break;
353 copy_reg = SET_DEST (return_copy_pat);
354 if (GET_CODE (copy_reg) == REG)
355 copy_start = REGNO (copy_reg);
356 else if (GET_CODE (copy_reg) == SUBREG
357 && GET_CODE (SUBREG_REG (copy_reg)) == REG)
358 copy_start = REGNO (SUBREG_REG (copy_reg));
359 else
361 /* When control reaches end of non-void function,
362 there are no return copy insns at all. This
363 avoids an ice on that invalid function. */
364 if (ret_start + nregs == ret_end)
365 short_block = true;
366 break;
368 if (!targetm.calls.function_value_regno_p (copy_start))
369 copy_num = 0;
370 else
371 copy_num
372 = hard_regno_nregs[copy_start][GET_MODE (copy_reg)];
374 /* If the return register is not likely spilled, - as is
375 the case for floating point on SH4 - then it might
376 be set by an arithmetic operation that needs a
377 different mode than the exit block. */
378 for (j = n_entities - 1; j >= 0; j--)
380 int e = entity_map[j];
381 int mode =
382 targetm.mode_switching.needed (e, return_copy);
384 if (mode != num_modes[e]
385 && mode != targetm.mode_switching.exit (e))
386 break;
388 if (j >= 0)
390 /* __builtin_return emits a sequence of loads to all
391 return registers. One of them might require
392 another mode than MODE_EXIT, even if it is
393 unrelated to the return value, so we want to put
394 the final mode switch after it. */
395 if (multi_reg_return
396 && targetm.calls.function_value_regno_p
397 (copy_start))
398 forced_late_switch = true;
400 /* For the SH4, floating point loads depend on fpscr,
401 thus we might need to put the final mode switch
402 after the return value copy. That is still OK,
403 because a floating point return value does not
404 conflict with address reloads. */
405 if (copy_start >= ret_start
406 && copy_start + copy_num <= ret_end
407 && OBJECT_P (SET_SRC (return_copy_pat)))
408 forced_late_switch = true;
409 break;
411 if (copy_num == 0)
413 last_insn = return_copy;
414 continue;
417 if (copy_start >= ret_start
418 && copy_start + copy_num <= ret_end)
419 nregs -= copy_num;
420 else if (!multi_reg_return
421 || !targetm.calls.function_value_regno_p
422 (copy_start))
423 break;
424 last_insn = return_copy;
426 /* ??? Exception handling can lead to the return value
427 copy being already separated from the return value use,
428 as in unwind-dw2.c .
429 Similarly, conditionally returning without a value,
430 and conditionally using builtin_return can lead to an
431 isolated use. */
432 if (return_copy == BB_HEAD (src_bb))
434 short_block = true;
435 break;
437 last_insn = return_copy;
439 while (nregs);
441 /* If we didn't see a full return value copy, verify that there
442 is a plausible reason for this. If some, but not all of the
443 return register is likely spilled, we can expect that there
444 is a copy for the likely spilled part. */
445 gcc_assert (!nregs
446 || forced_late_switch
447 || short_block
448 || !(targetm.class_likely_spilled_p
449 (REGNO_REG_CLASS (ret_start)))
450 || (nregs
451 != hard_regno_nregs[ret_start][GET_MODE (ret_reg)])
452 /* For multi-hard-register floating point
453 values, sometimes the likely-spilled part
454 is ordinarily copied first, then the other
455 part is set with an arithmetic operation.
456 This doesn't actually cause reload
457 failures, so let it pass. */
458 || (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
459 && nregs != 1));
461 if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
463 before_return_copy
464 = emit_note_before (NOTE_INSN_DELETED, last_insn);
465 /* Instructions preceding LAST_INSN in the same block might
466 require a different mode than MODE_EXIT, so if we might
467 have such instructions, keep them in a separate block
468 from pre_exit. */
469 src_bb = split_block (src_bb,
470 PREV_INSN (before_return_copy))->dest;
472 else
473 before_return_copy = last_insn;
474 pre_exit = split_block (src_bb, before_return_copy)->src;
476 else
478 pre_exit = split_edge (eg);
482 return pre_exit;
485 /* Find all insns that need a particular mode setting, and insert the
486 necessary mode switches. Return true if we did work. */
488 static int
489 optimize_mode_switching (void)
491 int e;
492 basic_block bb;
493 bool need_commit = false;
494 static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
495 #define N_ENTITIES ARRAY_SIZE (num_modes)
496 int entity_map[N_ENTITIES];
497 struct bb_info *bb_info[N_ENTITIES];
498 int i, j;
499 int n_entities = 0;
500 int max_num_modes = 0;
501 bool emitted ATTRIBUTE_UNUSED = false;
502 basic_block post_entry = 0;
503 basic_block pre_exit = 0;
504 struct edge_list *edge_list = 0;
506 /* These bitmaps are used for the LCM algorithm. */
507 sbitmap *kill, *del, *insert, *antic, *transp, *comp;
508 sbitmap *avin, *avout;
510 for (e = N_ENTITIES - 1; e >= 0; e--)
511 if (OPTIMIZE_MODE_SWITCHING (e))
513 int entry_exit_extra = 0;
515 /* Create the list of segments within each basic block.
516 If NORMAL_MODE is defined, allow for two extra
517 blocks split from the entry and exit block. */
518 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
519 entry_exit_extra = 3;
521 bb_info[n_entities]
522 = XCNEWVEC (struct bb_info,
523 last_basic_block_for_fn (cfun) + entry_exit_extra);
524 entity_map[n_entities++] = e;
525 if (num_modes[e] > max_num_modes)
526 max_num_modes = num_modes[e];
529 if (! n_entities)
530 return 0;
532 /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */
533 gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
534 || (!targetm.mode_switching.entry
535 && !targetm.mode_switching.exit));
537 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
539 /* Split the edge from the entry block, so that we can note that
540 there NORMAL_MODE is supplied. */
541 post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
542 pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
545 df_analyze ();
547 /* Create the bitmap vectors. */
548 antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
549 n_entities * max_num_modes);
550 transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
551 n_entities * max_num_modes);
552 comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
553 n_entities * max_num_modes);
554 avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
555 n_entities * max_num_modes);
556 avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
557 n_entities * max_num_modes);
558 kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
559 n_entities * max_num_modes);
561 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
562 bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
563 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
565 for (j = n_entities - 1; j >= 0; j--)
567 int e = entity_map[j];
568 int no_mode = num_modes[e];
569 struct bb_info *info = bb_info[j];
570 rtx_insn *insn;
572 /* Determine what the first use (if any) need for a mode of entity E is.
573 This will be the mode that is anticipatable for this block.
574 Also compute the initial transparency settings. */
575 FOR_EACH_BB_FN (bb, cfun)
577 struct seginfo *ptr;
578 int last_mode = no_mode;
579 bool any_set_required = false;
580 HARD_REG_SET live_now;
582 info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
584 REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
586 /* Pretend the mode is clobbered across abnormal edges. */
588 edge_iterator ei;
589 edge eg;
590 FOR_EACH_EDGE (eg, ei, bb->preds)
591 if (eg->flags & EDGE_COMPLEX)
592 break;
593 if (eg)
595 rtx_insn *ins_pos = BB_HEAD (bb);
596 if (LABEL_P (ins_pos))
597 ins_pos = NEXT_INSN (ins_pos);
598 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
599 if (ins_pos != BB_END (bb))
600 ins_pos = NEXT_INSN (ins_pos);
601 ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
602 add_seginfo (info + bb->index, ptr);
603 for (i = 0; i < no_mode; i++)
604 clear_mode_bit (transp[bb->index], j, i);
608 FOR_BB_INSNS (bb, insn)
610 if (INSN_P (insn))
612 int mode = targetm.mode_switching.needed (e, insn);
613 rtx link;
615 if (mode != no_mode && mode != last_mode)
617 any_set_required = true;
618 last_mode = mode;
619 ptr = new_seginfo (mode, insn, bb->index, live_now);
620 add_seginfo (info + bb->index, ptr);
621 for (i = 0; i < no_mode; i++)
622 clear_mode_bit (transp[bb->index], j, i);
625 if (targetm.mode_switching.after)
626 last_mode = targetm.mode_switching.after (e, last_mode,
627 insn);
629 /* Update LIVE_NOW. */
630 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
631 if (REG_NOTE_KIND (link) == REG_DEAD)
632 reg_dies (XEXP (link, 0), &live_now);
634 note_stores (PATTERN (insn), reg_becomes_live, &live_now);
635 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
636 if (REG_NOTE_KIND (link) == REG_UNUSED)
637 reg_dies (XEXP (link, 0), &live_now);
641 info[bb->index].computing = last_mode;
642 /* Check for blocks without ANY mode requirements.
643 N.B. because of MODE_AFTER, last_mode might still
644 be different from no_mode, in which case we need to
645 mark the block as nontransparent. */
646 if (!any_set_required)
648 ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
649 add_seginfo (info + bb->index, ptr);
650 if (last_mode != no_mode)
651 for (i = 0; i < no_mode; i++)
652 clear_mode_bit (transp[bb->index], j, i);
655 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
657 int mode = targetm.mode_switching.entry (e);
659 info[post_entry->index].mode_out =
660 info[post_entry->index].mode_in = no_mode;
661 if (pre_exit)
663 info[pre_exit->index].mode_out =
664 info[pre_exit->index].mode_in = no_mode;
667 if (mode != no_mode)
669 bb = post_entry;
671 /* By always making this nontransparent, we save
672 an extra check in make_preds_opaque. We also
673 need this to avoid confusing pre_edge_lcm when
674 antic is cleared but transp and comp are set. */
675 for (i = 0; i < no_mode; i++)
676 clear_mode_bit (transp[bb->index], j, i);
678 /* Insert a fake computing definition of MODE into entry
679 blocks which compute no mode. This represents the mode on
680 entry. */
681 info[bb->index].computing = mode;
683 if (pre_exit)
684 info[pre_exit->index].seginfo->mode =
685 targetm.mode_switching.exit (e);
689 /* Set the anticipatable and computing arrays. */
690 for (i = 0; i < no_mode; i++)
692 int m = targetm.mode_switching.priority (entity_map[j], i);
694 FOR_EACH_BB_FN (bb, cfun)
696 if (info[bb->index].seginfo->mode == m)
697 set_mode_bit (antic[bb->index], j, m);
699 if (info[bb->index].computing == m)
700 set_mode_bit (comp[bb->index], j, m);
705 /* Calculate the optimal locations for the
706 placement mode switches to modes with priority I. */
708 FOR_EACH_BB_FN (bb, cfun)
709 bitmap_not (kill[bb->index], transp[bb->index]);
711 edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
712 kill, avin, avout, &insert, &del);
714 for (j = n_entities - 1; j >= 0; j--)
716 int no_mode = num_modes[entity_map[j]];
718 /* Insert all mode sets that have been inserted by lcm. */
720 for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
722 edge eg = INDEX_EDGE (edge_list, ed);
724 eg->aux = (void *)(intptr_t)-1;
726 for (i = 0; i < no_mode; i++)
728 int m = targetm.mode_switching.priority (entity_map[j], i);
729 if (mode_bit_p (insert[ed], j, m))
731 eg->aux = (void *)(intptr_t)m;
732 break;
737 FOR_EACH_BB_FN (bb, cfun)
739 struct bb_info *info = bb_info[j];
740 int last_mode = no_mode;
742 /* intialize mode in availability for bb. */
743 for (i = 0; i < no_mode; i++)
744 if (mode_bit_p (avout[bb->index], j, i))
746 if (last_mode == no_mode)
747 last_mode = i;
748 if (last_mode != i)
750 last_mode = no_mode;
751 break;
754 info[bb->index].mode_out = last_mode;
756 /* intialize mode out availability for bb. */
757 last_mode = no_mode;
758 for (i = 0; i < no_mode; i++)
759 if (mode_bit_p (avin[bb->index], j, i))
761 if (last_mode == no_mode)
762 last_mode = i;
763 if (last_mode != i)
765 last_mode = no_mode;
766 break;
769 info[bb->index].mode_in = last_mode;
771 for (i = 0; i < no_mode; i++)
772 if (mode_bit_p (del[bb->index], j, i))
773 info[bb->index].seginfo->mode = no_mode;
776 /* Now output the remaining mode sets in all the segments. */
778 /* In case there was no mode inserted. the mode information on the edge
779 might not be complete.
780 Update mode info on edges and commit pending mode sets. */
781 need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);
783 /* Reset modes for next entity. */
784 clear_aux_for_edges ();
786 FOR_EACH_BB_FN (bb, cfun)
788 struct seginfo *ptr, *next;
789 int cur_mode = bb_info[j][bb->index].mode_in;
791 for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
793 next = ptr->next;
794 if (ptr->mode != no_mode)
796 rtx_insn *mode_set;
798 rtl_profile_for_bb (bb);
799 start_sequence ();
801 targetm.mode_switching.emit (entity_map[j], ptr->mode,
802 cur_mode, ptr->regs_live);
803 mode_set = get_insns ();
804 end_sequence ();
806 /* modes kill each other inside a basic block. */
807 cur_mode = ptr->mode;
809 /* Insert MODE_SET only if it is nonempty. */
810 if (mode_set != NULL_RTX)
812 emitted = true;
813 if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
814 /* We need to emit the insns in a FIFO-like manner,
815 i.e. the first to be emitted at our insertion
816 point ends up first in the instruction steam.
817 Because we made sure that NOTE_INSN_BASIC_BLOCK is
818 only used for initially empty basic blocks, we
819 can achieve this by appending at the end of
820 the block. */
821 emit_insn_after
822 (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
823 else
824 emit_insn_before (mode_set, ptr->insn_ptr);
827 default_rtl_profile ();
830 free (ptr);
834 free (bb_info[j]);
837 free_edge_list (edge_list);
839 /* Finished. Free up all the things we've allocated. */
840 sbitmap_vector_free (del);
841 sbitmap_vector_free (insert);
842 sbitmap_vector_free (kill);
843 sbitmap_vector_free (antic);
844 sbitmap_vector_free (transp);
845 sbitmap_vector_free (comp);
846 sbitmap_vector_free (avin);
847 sbitmap_vector_free (avout);
849 if (need_commit)
850 commit_edge_insertions ();
852 if (targetm.mode_switching.entry && targetm.mode_switching.exit)
853 cleanup_cfg (CLEANUP_NO_INSN_DEL);
854 else if (!need_commit && !emitted)
855 return 0;
857 return 1;
860 #endif /* OPTIMIZE_MODE_SWITCHING */
862 namespace {
864 const pass_data pass_data_mode_switching =
866 RTL_PASS, /* type */
867 "mode_sw", /* name */
868 OPTGROUP_NONE, /* optinfo_flags */
869 TV_MODE_SWITCH, /* tv_id */
870 0, /* properties_required */
871 0, /* properties_provided */
872 0, /* properties_destroyed */
873 0, /* todo_flags_start */
874 TODO_df_finish, /* todo_flags_finish */
877 class pass_mode_switching : public rtl_opt_pass
879 public:
880 pass_mode_switching (gcc::context *ctxt)
881 : rtl_opt_pass (pass_data_mode_switching, ctxt)
884 /* opt_pass methods: */
885 /* The epiphany backend creates a second instance of this pass, so we need
886 a clone method. */
887 opt_pass * clone () { return new pass_mode_switching (m_ctxt); }
888 virtual bool gate (function *)
890 #ifdef OPTIMIZE_MODE_SWITCHING
891 return true;
892 #else
893 return false;
894 #endif
897 virtual unsigned int execute (function *)
899 #ifdef OPTIMIZE_MODE_SWITCHING
900 optimize_mode_switching ();
901 #endif /* OPTIMIZE_MODE_SWITCHING */
902 return 0;
905 }; // class pass_mode_switching
907 } // anon namespace
909 rtl_opt_pass *
910 make_pass_mode_switching (gcc::context *ctxt)
912 return new pass_mode_switching (ctxt);