Remove outermost loop parameter.
[official-gcc/graphite-test-results.git] / gcc / sel-sched-ir.c
blobe831a785cd4ef92ff524c617c1e411c9702243b6
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "toplev.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "toplev.h"
35 #include "recog.h"
36 #include "params.h"
37 #include "target.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
48 #ifdef INSN_SCHEDULING
49 #include "sel-sched-ir.h"
50 /* We don't have to use it except for sel_print_insn. */
51 #include "sel-sched-dump.h"
53 /* A vector holding bb info for whole scheduling pass. */
54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
56 /* A vector holding bb info. */
57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
59 /* A pool for allocating all lists. */
60 alloc_pool sched_lists_pool;
62 /* This contains information about successors for compute_av_set. */
63 struct succs_info current_succs;
65 /* Data structure to describe interaction with the generic scheduler utils. */
66 static struct common_sched_info_def sel_common_sched_info;
68 /* The loop nest being pipelined. */
69 struct loop *current_loop_nest;
71 /* LOOP_NESTS is a vector containing the corresponding loop nest for
72 each region. */
73 static VEC(loop_p, heap) *loop_nests = NULL;
75 /* Saves blocks already in loop regions, indexed by bb->index. */
76 static sbitmap bbs_in_loop_rgns = NULL;
78 /* CFG hooks that are saved before changing create_basic_block hook. */
79 static struct cfg_hooks orig_cfg_hooks;
82 /* Array containing reverse topological index of function basic blocks,
83 indexed by BB->INDEX. */
84 static int *rev_top_order_index = NULL;
86 /* Length of the above array. */
87 static int rev_top_order_index_len = -1;
89 /* A regset pool structure. */
90 static struct
92 /* The stack to which regsets are returned. */
93 regset *v;
95 /* Its pointer. */
96 int n;
98 /* Its size. */
99 int s;
101 /* In VV we save all generated regsets so that, when destructing the
102 pool, we can compare it with V and check that every regset was returned
103 back to pool. */
104 regset *vv;
106 /* The pointer of VV stack. */
107 int nn;
109 /* Its size. */
110 int ss;
112 /* The difference between allocated and returned regsets. */
113 int diff;
114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
116 /* This represents the nop pool. */
117 static struct
119 /* The vector which holds previously emitted nops. */
120 insn_t *v;
122 /* Its pointer. */
123 int n;
125 /* Its size. */
126 int s;
127 } nop_pool = { NULL, 0, 0 };
129 /* The pool for basic block notes. */
130 static rtx_vec_t bb_note_pool;
132 /* A NOP pattern used to emit placeholder insns. */
133 rtx nop_pattern = NULL_RTX;
134 /* A special instruction that resides in EXIT_BLOCK.
135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
136 rtx exit_insn = NULL_RTX;
138 /* TRUE if while scheduling current region, which is loop, its preheader
139 was removed. */
140 bool preheader_removed = false;
143 /* Forward static declarations. */
144 static void fence_clear (fence_t);
146 static void deps_init_id (idata_t, insn_t, bool);
147 static void init_id_from_df (idata_t, insn_t, bool);
148 static expr_t set_insn_init (expr_t, vinsn_t, int);
150 static void cfg_preds (basic_block, insn_t **, int *);
151 static void prepare_insn_expr (insn_t, int);
152 static void free_history_vect (VEC (expr_history_def, heap) **);
154 static void move_bb_info (basic_block, basic_block);
155 static void remove_empty_bb (basic_block, bool);
156 static void sel_remove_loop_preheader (void);
158 static bool insn_is_the_only_one_in_bb_p (insn_t);
159 static void create_initial_data_sets (basic_block);
161 static void free_av_set (basic_block);
162 static void invalidate_av_set (basic_block);
163 static void extend_insn_data (void);
164 static void sel_init_new_insn (insn_t, int);
165 static void finish_insns (void);
167 /* Various list functions. */
169 /* Copy an instruction list L. */
170 ilist_t
171 ilist_copy (ilist_t l)
173 ilist_t head = NULL, *tailp = &head;
175 while (l)
177 ilist_add (tailp, ILIST_INSN (l));
178 tailp = &ILIST_NEXT (*tailp);
179 l = ILIST_NEXT (l);
182 return head;
185 /* Invert an instruction list L. */
186 ilist_t
187 ilist_invert (ilist_t l)
189 ilist_t res = NULL;
191 while (l)
193 ilist_add (&res, ILIST_INSN (l));
194 l = ILIST_NEXT (l);
197 return res;
200 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
201 void
202 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
204 bnd_t bnd;
206 _list_add (lp);
207 bnd = BLIST_BND (*lp);
209 BND_TO (bnd) = to;
210 BND_PTR (bnd) = ptr;
211 BND_AV (bnd) = NULL;
212 BND_AV1 (bnd) = NULL;
213 BND_DC (bnd) = dc;
216 /* Remove the list note pointed to by LP. */
217 void
218 blist_remove (blist_t *lp)
220 bnd_t b = BLIST_BND (*lp);
222 av_set_clear (&BND_AV (b));
223 av_set_clear (&BND_AV1 (b));
224 ilist_clear (&BND_PTR (b));
226 _list_remove (lp);
229 /* Init a fence tail L. */
230 void
231 flist_tail_init (flist_tail_t l)
233 FLIST_TAIL_HEAD (l) = NULL;
234 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
237 /* Try to find fence corresponding to INSN in L. */
238 fence_t
239 flist_lookup (flist_t l, insn_t insn)
241 while (l)
243 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
244 return FLIST_FENCE (l);
246 l = FLIST_NEXT (l);
249 return NULL;
252 /* Init the fields of F before running fill_insns. */
253 static void
254 init_fence_for_scheduling (fence_t f)
256 FENCE_BNDS (f) = NULL;
257 FENCE_PROCESSED_P (f) = false;
258 FENCE_SCHEDULED_P (f) = false;
261 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
262 static void
263 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
264 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
265 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
266 int cycle, int cycle_issued_insns, int issue_more,
267 bool starts_cycle_p, bool after_stall_p)
269 fence_t f;
271 _list_add (lp);
272 f = FLIST_FENCE (*lp);
274 FENCE_INSN (f) = insn;
276 gcc_assert (state != NULL);
277 FENCE_STATE (f) = state;
279 FENCE_CYCLE (f) = cycle;
280 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
281 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
282 FENCE_AFTER_STALL_P (f) = after_stall_p;
284 gcc_assert (dc != NULL);
285 FENCE_DC (f) = dc;
287 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
288 FENCE_TC (f) = tc;
290 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
291 FENCE_ISSUE_MORE (f) = issue_more;
292 FENCE_EXECUTING_INSNS (f) = executing_insns;
293 FENCE_READY_TICKS (f) = ready_ticks;
294 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
295 FENCE_SCHED_NEXT (f) = sched_next;
297 init_fence_for_scheduling (f);
300 /* Remove the head node of the list pointed to by LP. */
301 static void
302 flist_remove (flist_t *lp)
304 if (FENCE_INSN (FLIST_FENCE (*lp)))
305 fence_clear (FLIST_FENCE (*lp));
306 _list_remove (lp);
309 /* Clear the fence list pointed to by LP. */
310 void
311 flist_clear (flist_t *lp)
313 while (*lp)
314 flist_remove (lp);
317 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
318 void
319 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
321 def_t d;
323 _list_add (dl);
324 d = DEF_LIST_DEF (*dl);
326 d->orig_insn = original_insn;
327 d->crosses_call = crosses_call;
331 /* Functions to work with target contexts. */
333 /* Bulk target context. It is convenient for debugging purposes to ensure
334 that there are no uninitialized (null) target contexts. */
335 static tc_t bulk_tc = (tc_t) 1;
337 /* Target hooks wrappers. In the future we can provide some default
338 implementations for them. */
340 /* Allocate a store for the target context. */
341 static tc_t
342 alloc_target_context (void)
344 return (targetm.sched.alloc_sched_context
345 ? targetm.sched.alloc_sched_context () : bulk_tc);
348 /* Init target context TC.
349 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
350 Overwise, copy current backend context to TC. */
351 static void
352 init_target_context (tc_t tc, bool clean_p)
354 if (targetm.sched.init_sched_context)
355 targetm.sched.init_sched_context (tc, clean_p);
358 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
359 int init_target_context (). */
360 tc_t
361 create_target_context (bool clean_p)
363 tc_t tc = alloc_target_context ();
365 init_target_context (tc, clean_p);
366 return tc;
369 /* Copy TC to the current backend context. */
370 void
371 set_target_context (tc_t tc)
373 if (targetm.sched.set_sched_context)
374 targetm.sched.set_sched_context (tc);
377 /* TC is about to be destroyed. Free any internal data. */
378 static void
379 clear_target_context (tc_t tc)
381 if (targetm.sched.clear_sched_context)
382 targetm.sched.clear_sched_context (tc);
385 /* Clear and free it. */
386 static void
387 delete_target_context (tc_t tc)
389 clear_target_context (tc);
391 if (targetm.sched.free_sched_context)
392 targetm.sched.free_sched_context (tc);
395 /* Make a copy of FROM in TO.
396 NB: May be this should be a hook. */
397 static void
398 copy_target_context (tc_t to, tc_t from)
400 tc_t tmp = create_target_context (false);
402 set_target_context (from);
403 init_target_context (to, false);
405 set_target_context (tmp);
406 delete_target_context (tmp);
409 /* Create a copy of TC. */
410 static tc_t
411 create_copy_of_target_context (tc_t tc)
413 tc_t copy = alloc_target_context ();
415 copy_target_context (copy, tc);
417 return copy;
420 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
421 is the same as in init_target_context (). */
422 void
423 reset_target_context (tc_t tc, bool clean_p)
425 clear_target_context (tc);
426 init_target_context (tc, clean_p);
429 /* Functions to work with dependence contexts.
430 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
431 context. It accumulates information about processed insns to decide if
432 current insn is dependent on the processed ones. */
434 /* Make a copy of FROM in TO. */
435 static void
436 copy_deps_context (deps_t to, deps_t from)
438 init_deps (to, false);
439 deps_join (to, from);
442 /* Allocate store for dep context. */
443 static deps_t
444 alloc_deps_context (void)
446 return XNEW (struct deps_desc);
449 /* Allocate and initialize dep context. */
450 static deps_t
451 create_deps_context (void)
453 deps_t dc = alloc_deps_context ();
455 init_deps (dc, false);
456 return dc;
459 /* Create a copy of FROM. */
460 static deps_t
461 create_copy_of_deps_context (deps_t from)
463 deps_t to = alloc_deps_context ();
465 copy_deps_context (to, from);
466 return to;
469 /* Clean up internal data of DC. */
470 static void
471 clear_deps_context (deps_t dc)
473 free_deps (dc);
476 /* Clear and free DC. */
477 static void
478 delete_deps_context (deps_t dc)
480 clear_deps_context (dc);
481 free (dc);
484 /* Clear and init DC. */
485 static void
486 reset_deps_context (deps_t dc)
488 clear_deps_context (dc);
489 init_deps (dc, false);
492 /* This structure describes the dependence analysis hooks for advancing
493 dependence context. */
494 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
496 NULL,
498 NULL, /* start_insn */
499 NULL, /* finish_insn */
500 NULL, /* start_lhs */
501 NULL, /* finish_lhs */
502 NULL, /* start_rhs */
503 NULL, /* finish_rhs */
504 haifa_note_reg_set,
505 haifa_note_reg_clobber,
506 haifa_note_reg_use,
507 NULL, /* note_mem_dep */
508 NULL, /* note_dep */
510 0, 0, 0
513 /* Process INSN and add its impact on DC. */
514 void
515 advance_deps_context (deps_t dc, insn_t insn)
517 sched_deps_info = &advance_deps_context_sched_deps_info;
518 deps_analyze_insn (dc, insn);
522 /* Functions to work with DFA states. */
524 /* Allocate store for a DFA state. */
525 static state_t
526 state_alloc (void)
528 return xmalloc (dfa_state_size);
531 /* Allocate and initialize DFA state. */
532 static state_t
533 state_create (void)
535 state_t state = state_alloc ();
537 state_reset (state);
538 advance_state (state);
539 return state;
542 /* Free DFA state. */
543 static void
544 state_free (state_t state)
546 free (state);
549 /* Make a copy of FROM in TO. */
550 static void
551 state_copy (state_t to, state_t from)
553 memcpy (to, from, dfa_state_size);
556 /* Create a copy of FROM. */
557 static state_t
558 state_create_copy (state_t from)
560 state_t to = state_alloc ();
562 state_copy (to, from);
563 return to;
567 /* Functions to work with fences. */
569 /* Clear the fence. */
570 static void
571 fence_clear (fence_t f)
573 state_t s = FENCE_STATE (f);
574 deps_t dc = FENCE_DC (f);
575 void *tc = FENCE_TC (f);
577 ilist_clear (&FENCE_BNDS (f));
579 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
580 || (s == NULL && dc == NULL && tc == NULL));
582 if (s != NULL)
583 free (s);
585 if (dc != NULL)
586 delete_deps_context (dc);
588 if (tc != NULL)
589 delete_target_context (tc);
590 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
591 free (FENCE_READY_TICKS (f));
592 FENCE_READY_TICKS (f) = NULL;
595 /* Init a list of fences with successors of OLD_FENCE. */
596 void
597 init_fences (insn_t old_fence)
599 insn_t succ;
600 succ_iterator si;
601 bool first = true;
602 int ready_ticks_size = get_max_uid () + 1;
604 FOR_EACH_SUCC_1 (succ, si, old_fence,
605 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
608 if (first)
609 first = false;
610 else
611 gcc_assert (flag_sel_sched_pipelining_outer_loops);
613 flist_add (&fences, succ,
614 state_create (),
615 create_deps_context () /* dc */,
616 create_target_context (true) /* tc */,
617 NULL_RTX /* last_scheduled_insn */,
618 NULL, /* executing_insns */
619 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
620 ready_ticks_size,
621 NULL_RTX /* sched_next */,
622 1 /* cycle */, 0 /* cycle_issued_insns */,
623 issue_rate, /* issue_more */
624 1 /* starts_cycle_p */, 0 /* after_stall_p */);
628 /* Merges two fences (filling fields of fence F with resulting values) by
629 following rules: 1) state, target context and last scheduled insn are
630 propagated from fallthrough edge if it is available;
631 2) deps context and cycle is propagated from more probable edge;
632 3) all other fields are set to corresponding constant values.
634 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
635 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
636 and AFTER_STALL_P are the corresponding fields of the second fence. */
637 static void
638 merge_fences (fence_t f, insn_t insn,
639 state_t state, deps_t dc, void *tc,
640 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
641 int *ready_ticks, int ready_ticks_size,
642 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
644 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
646 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
647 && !sched_next && !FENCE_SCHED_NEXT (f));
649 /* Check if we can decide which path fences came.
650 If we can't (or don't want to) - reset all. */
651 if (last_scheduled_insn == NULL
652 || last_scheduled_insn_old == NULL
653 /* This is a case when INSN is reachable on several paths from
654 one insn (this can happen when pipelining of outer loops is on and
655 there are two edges: one going around of inner loop and the other -
656 right through it; in such case just reset everything). */
657 || last_scheduled_insn == last_scheduled_insn_old)
659 state_reset (FENCE_STATE (f));
660 state_free (state);
662 reset_deps_context (FENCE_DC (f));
663 delete_deps_context (dc);
665 reset_target_context (FENCE_TC (f), true);
666 delete_target_context (tc);
668 if (cycle > FENCE_CYCLE (f))
669 FENCE_CYCLE (f) = cycle;
671 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
672 FENCE_ISSUE_MORE (f) = issue_rate;
673 VEC_free (rtx, gc, executing_insns);
674 free (ready_ticks);
675 if (FENCE_EXECUTING_INSNS (f))
676 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
677 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
678 if (FENCE_READY_TICKS (f))
679 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
681 else
683 edge edge_old = NULL, edge_new = NULL;
684 edge candidate;
685 succ_iterator si;
686 insn_t succ;
688 /* Find fallthrough edge. */
689 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
690 candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
692 if (!candidate
693 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
694 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
696 /* No fallthrough edge leading to basic block of INSN. */
697 state_reset (FENCE_STATE (f));
698 state_free (state);
700 reset_target_context (FENCE_TC (f), true);
701 delete_target_context (tc);
703 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
704 FENCE_ISSUE_MORE (f) = issue_rate;
706 else
707 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
709 /* Would be weird if same insn is successor of several fallthrough
710 edges. */
711 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
712 != BLOCK_FOR_INSN (last_scheduled_insn_old));
714 state_free (FENCE_STATE (f));
715 FENCE_STATE (f) = state;
717 delete_target_context (FENCE_TC (f));
718 FENCE_TC (f) = tc;
720 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
721 FENCE_ISSUE_MORE (f) = issue_more;
723 else
725 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
726 state_free (state);
727 delete_target_context (tc);
729 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
730 != BLOCK_FOR_INSN (last_scheduled_insn));
733 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
734 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
735 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
737 if (succ == insn)
739 /* No same successor allowed from several edges. */
740 gcc_assert (!edge_old);
741 edge_old = si.e1;
744 /* Find edge of second predecessor (last_scheduled_insn->insn). */
745 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
746 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
748 if (succ == insn)
750 /* No same successor allowed from several edges. */
751 gcc_assert (!edge_new);
752 edge_new = si.e1;
756 /* Check if we can choose most probable predecessor. */
757 if (edge_old == NULL || edge_new == NULL)
759 reset_deps_context (FENCE_DC (f));
760 delete_deps_context (dc);
761 VEC_free (rtx, gc, executing_insns);
762 free (ready_ticks);
764 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
765 if (FENCE_EXECUTING_INSNS (f))
766 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
767 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
768 if (FENCE_READY_TICKS (f))
769 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
771 else
772 if (edge_new->probability > edge_old->probability)
774 delete_deps_context (FENCE_DC (f));
775 FENCE_DC (f) = dc;
776 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
777 FENCE_EXECUTING_INSNS (f) = executing_insns;
778 free (FENCE_READY_TICKS (f));
779 FENCE_READY_TICKS (f) = ready_ticks;
780 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
781 FENCE_CYCLE (f) = cycle;
783 else
785 /* Leave DC and CYCLE untouched. */
786 delete_deps_context (dc);
787 VEC_free (rtx, gc, executing_insns);
788 free (ready_ticks);
792 /* Fill remaining invariant fields. */
793 if (after_stall_p)
794 FENCE_AFTER_STALL_P (f) = 1;
796 FENCE_ISSUED_INSNS (f) = 0;
797 FENCE_STARTS_CYCLE_P (f) = 1;
798 FENCE_SCHED_NEXT (f) = NULL;
801 /* Add a new fence to NEW_FENCES list, initializing it from all
802 other parameters. */
803 static void
804 add_to_fences (flist_tail_t new_fences, insn_t insn,
805 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
806 VEC(rtx, gc) *executing_insns, int *ready_ticks,
807 int ready_ticks_size, rtx sched_next, int cycle,
808 int cycle_issued_insns, int issue_rate,
809 bool starts_cycle_p, bool after_stall_p)
811 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
813 if (! f)
815 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
816 last_scheduled_insn, executing_insns, ready_ticks,
817 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
818 issue_rate, starts_cycle_p, after_stall_p);
820 FLIST_TAIL_TAILP (new_fences)
821 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
823 else
825 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
826 executing_insns, ready_ticks, ready_ticks_size,
827 sched_next, cycle, issue_rate, after_stall_p);
831 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
832 void
833 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
835 fence_t f, old;
836 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
838 old = FLIST_FENCE (old_fences);
839 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
840 FENCE_INSN (FLIST_FENCE (old_fences)));
841 if (f)
843 merge_fences (f, old->insn, old->state, old->dc, old->tc,
844 old->last_scheduled_insn, old->executing_insns,
845 old->ready_ticks, old->ready_ticks_size,
846 old->sched_next, old->cycle, old->issue_more,
847 old->after_stall_p);
849 else
851 _list_add (tailp);
852 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
853 *FLIST_FENCE (*tailp) = *old;
854 init_fence_for_scheduling (FLIST_FENCE (*tailp));
856 FENCE_INSN (old) = NULL;
859 /* Add a new fence to NEW_FENCES list and initialize most of its data
860 as a clean one. */
861 void
862 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
864 int ready_ticks_size = get_max_uid () + 1;
866 add_to_fences (new_fences,
867 succ, state_create (), create_deps_context (),
868 create_target_context (true),
869 NULL_RTX, NULL,
870 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
871 NULL_RTX, FENCE_CYCLE (fence) + 1,
872 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
875 /* Add a new fence to NEW_FENCES list and initialize all of its data
876 from FENCE and SUCC. */
877 void
878 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
880 int * new_ready_ticks
881 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
883 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
884 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
885 add_to_fences (new_fences,
886 succ, state_create_copy (FENCE_STATE (fence)),
887 create_copy_of_deps_context (FENCE_DC (fence)),
888 create_copy_of_target_context (FENCE_TC (fence)),
889 FENCE_LAST_SCHEDULED_INSN (fence),
890 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
891 new_ready_ticks,
892 FENCE_READY_TICKS_SIZE (fence),
893 FENCE_SCHED_NEXT (fence),
894 FENCE_CYCLE (fence),
895 FENCE_ISSUED_INSNS (fence),
896 FENCE_ISSUE_MORE (fence),
897 FENCE_STARTS_CYCLE_P (fence),
898 FENCE_AFTER_STALL_P (fence));
902 /* Functions to work with regset and nop pools. */
904 /* Returns the new regset from pool. It might have some of the bits set
905 from the previous usage. */
906 regset
907 get_regset_from_pool (void)
909 regset rs;
911 if (regset_pool.n != 0)
912 rs = regset_pool.v[--regset_pool.n];
913 else
914 /* We need to create the regset. */
916 rs = ALLOC_REG_SET (&reg_obstack);
918 if (regset_pool.nn == regset_pool.ss)
919 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
920 (regset_pool.ss = 2 * regset_pool.ss + 1));
921 regset_pool.vv[regset_pool.nn++] = rs;
924 regset_pool.diff++;
926 return rs;
929 /* Same as above, but returns the empty regset. */
930 regset
931 get_clear_regset_from_pool (void)
933 regset rs = get_regset_from_pool ();
935 CLEAR_REG_SET (rs);
936 return rs;
939 /* Return regset RS to the pool for future use. */
940 void
941 return_regset_to_pool (regset rs)
943 regset_pool.diff--;
945 if (regset_pool.n == regset_pool.s)
946 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
947 (regset_pool.s = 2 * regset_pool.s + 1));
948 regset_pool.v[regset_pool.n++] = rs;
951 #ifdef ENABLE_CHECKING
952 /* This is used as a qsort callback for sorting regset pool stacks.
953 X and XX are addresses of two regsets. They are never equal. */
954 static int
955 cmp_v_in_regset_pool (const void *x, const void *xx)
957 return *((const regset *) x) - *((const regset *) xx);
959 #endif
961 /* Free the regset pool possibly checking for memory leaks. */
962 void
963 free_regset_pool (void)
965 #ifdef ENABLE_CHECKING
967 regset *v = regset_pool.v;
968 int i = 0;
969 int n = regset_pool.n;
971 regset *vv = regset_pool.vv;
972 int ii = 0;
973 int nn = regset_pool.nn;
975 int diff = 0;
977 gcc_assert (n <= nn);
979 /* Sort both vectors so it will be possible to compare them. */
980 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
981 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
983 while (ii < nn)
985 if (v[i] == vv[ii])
986 i++;
987 else
988 /* VV[II] was lost. */
989 diff++;
991 ii++;
994 gcc_assert (diff == regset_pool.diff);
996 #endif
998 /* If not true - we have a memory leak. */
999 gcc_assert (regset_pool.diff == 0);
1001 while (regset_pool.n)
1003 --regset_pool.n;
1004 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1007 free (regset_pool.v);
1008 regset_pool.v = NULL;
1009 regset_pool.s = 0;
1011 free (regset_pool.vv);
1012 regset_pool.vv = NULL;
1013 regset_pool.nn = 0;
1014 regset_pool.ss = 0;
1016 regset_pool.diff = 0;
1020 /* Functions to work with nop pools. NOP insns are used as temporary
1021 placeholders of the insns being scheduled to allow correct update of
1022 the data sets. When update is finished, NOPs are deleted. */
1024 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1025 nops sel-sched generates. */
1026 static vinsn_t nop_vinsn = NULL;
1028 /* Emit a nop before INSN, taking it from pool. */
1029 insn_t
1030 get_nop_from_pool (insn_t insn)
1032 insn_t nop;
1033 bool old_p = nop_pool.n != 0;
1034 int flags;
1036 if (old_p)
1037 nop = nop_pool.v[--nop_pool.n];
1038 else
1039 nop = nop_pattern;
1041 nop = emit_insn_before (nop, insn);
1043 if (old_p)
1044 flags = INSN_INIT_TODO_SSID;
1045 else
1046 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1048 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1049 sel_init_new_insn (nop, flags);
1051 return nop;
1054 /* Remove NOP from the instruction stream and return it to the pool. */
1055 void
1056 return_nop_to_pool (insn_t nop, bool full_tidying)
1058 gcc_assert (INSN_IN_STREAM_P (nop));
1059 sel_remove_insn (nop, false, full_tidying);
1061 if (nop_pool.n == nop_pool.s)
1062 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1063 (nop_pool.s = 2 * nop_pool.s + 1));
1064 nop_pool.v[nop_pool.n++] = nop;
1067 /* Free the nop pool. */
1068 void
1069 free_nop_pool (void)
1071 nop_pool.n = 0;
1072 nop_pool.s = 0;
1073 free (nop_pool.v);
1074 nop_pool.v = NULL;
1078 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1079 The callback is given two rtxes XX and YY and writes the new rtxes
1080 to NX and NY in case some needs to be skipped. */
1081 static int
1082 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1084 const_rtx x = *xx;
1085 const_rtx y = *yy;
1087 if (GET_CODE (x) == UNSPEC
1088 && (targetm.sched.skip_rtx_p == NULL
1089 || targetm.sched.skip_rtx_p (x)))
1091 *nx = XVECEXP (x, 0, 0);
1092 *ny = CONST_CAST_RTX (y);
1093 return 1;
1096 if (GET_CODE (y) == UNSPEC
1097 && (targetm.sched.skip_rtx_p == NULL
1098 || targetm.sched.skip_rtx_p (y)))
1100 *nx = CONST_CAST_RTX (x);
1101 *ny = XVECEXP (y, 0, 0);
1102 return 1;
1105 return 0;
1108 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1109 to support ia64 speculation. When changes are needed, new rtx X and new mode
1110 NMODE are written, and the callback returns true. */
1111 static int
1112 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1113 rtx *nx, enum machine_mode* nmode)
1115 if (GET_CODE (x) == UNSPEC
1116 && targetm.sched.skip_rtx_p
1117 && targetm.sched.skip_rtx_p (x))
1119 *nx = XVECEXP (x, 0 ,0);
1120 *nmode = VOIDmode;
1121 return 1;
1124 return 0;
1127 /* Returns LHS and RHS are ok to be scheduled separately. */
1128 static bool
1129 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1131 if (lhs == NULL || rhs == NULL)
1132 return false;
1134 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1135 to use reg, if const can be used. Moreover, scheduling const as rhs may
1136 lead to mode mismatch cause consts don't have modes but they could be
1137 merged from branches where the same const used in different modes. */
1138 if (CONSTANT_P (rhs))
1139 return false;
1141 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1142 if (COMPARISON_P (rhs))
1143 return false;
1145 /* Do not allow single REG to be an rhs. */
1146 if (REG_P (rhs))
1147 return false;
1149 /* See comment at find_used_regs_1 (*1) for explanation of this
1150 restriction. */
1151 /* FIXME: remove this later. */
1152 if (MEM_P (lhs))
1153 return false;
1155 /* This will filter all tricky things like ZERO_EXTRACT etc.
1156 For now we don't handle it. */
1157 if (!REG_P (lhs) && !MEM_P (lhs))
1158 return false;
1160 return true;
1163 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1164 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1165 used e.g. for insns from recovery blocks. */
1166 static void
1167 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1169 hash_rtx_callback_function hrcf;
1170 int insn_class;
1172 VINSN_INSN_RTX (vi) = insn;
1173 VINSN_COUNT (vi) = 0;
1174 vi->cost = -1;
1176 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1177 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1178 else
1179 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1181 /* Hash vinsn depending on whether it is separable or not. */
1182 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1183 if (VINSN_SEPARABLE_P (vi))
1185 rtx rhs = VINSN_RHS (vi);
1187 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1188 NULL, NULL, false, hrcf);
1189 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1190 VOIDmode, NULL, NULL,
1191 false, hrcf);
1193 else
1195 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1196 NULL, NULL, false, hrcf);
1197 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1200 insn_class = haifa_classify_insn (insn);
1201 if (insn_class >= 2
1202 && (!targetm.sched.get_insn_spec_ds
1203 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1204 == 0)))
1205 VINSN_MAY_TRAP_P (vi) = true;
1206 else
1207 VINSN_MAY_TRAP_P (vi) = false;
1210 /* Indicate that VI has become the part of an rtx object. */
1211 void
1212 vinsn_attach (vinsn_t vi)
1214 /* Assert that VI is not pending for deletion. */
1215 gcc_assert (VINSN_INSN_RTX (vi));
1217 VINSN_COUNT (vi)++;
1220 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1221 VINSN_TYPE (VI). */
1222 static vinsn_t
1223 vinsn_create (insn_t insn, bool force_unique_p)
1225 vinsn_t vi = XCNEW (struct vinsn_def);
1227 vinsn_init (vi, insn, force_unique_p);
1228 return vi;
1231 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1232 the copy. */
1233 vinsn_t
1234 vinsn_copy (vinsn_t vi, bool reattach_p)
1236 rtx copy;
1237 bool unique = VINSN_UNIQUE_P (vi);
1238 vinsn_t new_vi;
1240 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1241 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1242 if (reattach_p)
1244 vinsn_detach (vi);
1245 vinsn_attach (new_vi);
1248 return new_vi;
1251 /* Delete the VI vinsn and free its data. */
1252 static void
1253 vinsn_delete (vinsn_t vi)
1255 gcc_assert (VINSN_COUNT (vi) == 0);
1257 return_regset_to_pool (VINSN_REG_SETS (vi));
1258 return_regset_to_pool (VINSN_REG_USES (vi));
1259 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1261 free (vi);
1264 /* Indicate that VI is no longer a part of some rtx object.
1265 Remove VI if it is no longer needed. */
1266 void
1267 vinsn_detach (vinsn_t vi)
1269 gcc_assert (VINSN_COUNT (vi) > 0);
1271 if (--VINSN_COUNT (vi) == 0)
1272 vinsn_delete (vi);
1275 /* Returns TRUE if VI is a branch. */
1276 bool
1277 vinsn_cond_branch_p (vinsn_t vi)
1279 insn_t insn;
1281 if (!VINSN_UNIQUE_P (vi))
1282 return false;
1284 insn = VINSN_INSN_RTX (vi);
1285 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1286 return false;
1288 return control_flow_insn_p (insn);
1291 /* Return latency of INSN. */
1292 static int
1293 sel_insn_rtx_cost (rtx insn)
1295 int cost;
1297 /* A USE insn, or something else we don't need to
1298 understand. We can't pass these directly to
1299 result_ready_cost or insn_default_latency because it will
1300 trigger a fatal error for unrecognizable insns. */
1301 if (recog_memoized (insn) < 0)
1302 cost = 0;
1303 else
1305 cost = insn_default_latency (insn);
1307 if (cost < 0)
1308 cost = 0;
1311 return cost;
1314 /* Return the cost of the VI.
1315 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1317 sel_vinsn_cost (vinsn_t vi)
1319 int cost = vi->cost;
1321 if (cost < 0)
1323 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1324 vi->cost = cost;
1327 return cost;
1331 /* Functions for insn emitting. */
1333 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1334 EXPR and SEQNO. */
1335 insn_t
1336 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1338 insn_t new_insn;
1340 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1342 new_insn = emit_insn_after (pattern, after);
1343 set_insn_init (expr, NULL, seqno);
1344 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1346 return new_insn;
1349 /* Force newly generated vinsns to be unique. */
1350 static bool init_insn_force_unique_p = false;
1352 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1353 initialize its data from EXPR and SEQNO. */
1354 insn_t
1355 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1356 insn_t after)
1358 insn_t insn;
1360 gcc_assert (!init_insn_force_unique_p);
1362 init_insn_force_unique_p = true;
1363 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1364 CANT_MOVE (insn) = 1;
1365 init_insn_force_unique_p = false;
1367 return insn;
1370 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1371 take it as a new vinsn instead of EXPR's vinsn.
1372 We simplify insns later, after scheduling region in
1373 simplify_changed_insns. */
1374 insn_t
1375 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1376 insn_t after)
1378 expr_t emit_expr;
1379 insn_t insn;
1380 int flags;
1382 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1383 seqno);
1384 insn = EXPR_INSN_RTX (emit_expr);
1385 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1387 flags = INSN_INIT_TODO_SSID;
1388 if (INSN_LUID (insn) == 0)
1389 flags |= INSN_INIT_TODO_LUID;
1390 sel_init_new_insn (insn, flags);
1392 return insn;
1395 /* Move insn from EXPR after AFTER. */
1396 insn_t
1397 sel_move_insn (expr_t expr, int seqno, insn_t after)
1399 insn_t insn = EXPR_INSN_RTX (expr);
1400 basic_block bb = BLOCK_FOR_INSN (after);
1401 insn_t next = NEXT_INSN (after);
1403 /* Assert that in move_op we disconnected this insn properly. */
1404 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1405 PREV_INSN (insn) = after;
1406 NEXT_INSN (insn) = next;
1408 NEXT_INSN (after) = insn;
1409 PREV_INSN (next) = insn;
1411 /* Update links from insn to bb and vice versa. */
1412 df_insn_change_bb (insn, bb);
1413 if (BB_END (bb) == after)
1414 BB_END (bb) = insn;
1416 prepare_insn_expr (insn, seqno);
1417 return insn;
1421 /* Functions to work with right-hand sides. */
1423 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1424 VECT and return true when found. Use NEW_VINSN for comparison only when
1425 COMPARE_VINSNS is true. Write to INDP the index on which
1426 the search has stopped, such that inserting the new element at INDP will
1427 retain VECT's sort order. */
1428 static bool
1429 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1430 unsigned uid, vinsn_t new_vinsn,
1431 bool compare_vinsns, int *indp)
1433 expr_history_def *arr;
1434 int i, j, len = VEC_length (expr_history_def, vect);
1436 if (len == 0)
1438 *indp = 0;
1439 return false;
1442 arr = VEC_address (expr_history_def, vect);
1443 i = 0, j = len - 1;
1445 while (i <= j)
1447 unsigned auid = arr[i].uid;
1448 vinsn_t avinsn = arr[i].new_expr_vinsn;
1450 if (auid == uid
1451 /* When undoing transformation on a bookkeeping copy, the new vinsn
1452 may not be exactly equal to the one that is saved in the vector.
1453 This is because the insn whose copy we're checking was possibly
1454 substituted itself. */
1455 && (! compare_vinsns
1456 || vinsn_equal_p (avinsn, new_vinsn)))
1458 *indp = i;
1459 return true;
1461 else if (auid > uid)
1462 break;
1463 i++;
1466 *indp = i;
1467 return false;
1470 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1471 the position found or -1, if no such value is in vector.
1472 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1474 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1475 vinsn_t new_vinsn, bool originators_p)
1477 int ind;
1479 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1480 false, &ind))
1481 return ind;
1483 if (INSN_ORIGINATORS (insn) && originators_p)
1485 unsigned uid;
1486 bitmap_iterator bi;
1488 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1489 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1490 return ind;
1493 return -1;
1496 /* Insert new element in a sorted history vector pointed to by PVECT,
1497 if it is not there already. The element is searched using
1498 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1499 the history of a transformation. */
1500 void
1501 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1502 unsigned uid, enum local_trans_type type,
1503 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1504 ds_t spec_ds)
1506 VEC(expr_history_def, heap) *vect = *pvect;
1507 expr_history_def temp;
1508 bool res;
1509 int ind;
1511 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1513 if (res)
1515 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1517 /* It is possible that speculation types of expressions that were
1518 propagated through different paths will be different here. In this
1519 case, merge the status to get the correct check later. */
1520 if (phist->spec_ds != spec_ds)
1521 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1522 return;
1525 temp.uid = uid;
1526 temp.old_expr_vinsn = old_expr_vinsn;
1527 temp.new_expr_vinsn = new_expr_vinsn;
1528 temp.spec_ds = spec_ds;
1529 temp.type = type;
1531 vinsn_attach (old_expr_vinsn);
1532 vinsn_attach (new_expr_vinsn);
1533 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1534 *pvect = vect;
1537 /* Free history vector PVECT. */
1538 static void
1539 free_history_vect (VEC (expr_history_def, heap) **pvect)
1541 unsigned i;
1542 expr_history_def *phist;
1544 if (! *pvect)
1545 return;
1547 for (i = 0;
1548 VEC_iterate (expr_history_def, *pvect, i, phist);
1549 i++)
1551 vinsn_detach (phist->old_expr_vinsn);
1552 vinsn_detach (phist->new_expr_vinsn);
1555 VEC_free (expr_history_def, heap, *pvect);
1556 *pvect = NULL;
1560 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1561 bool
1562 vinsn_equal_p (vinsn_t x, vinsn_t y)
1564 rtx_equal_p_callback_function repcf;
1566 if (x == y)
1567 return true;
1569 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1570 return false;
1572 if (VINSN_HASH (x) != VINSN_HASH (y))
1573 return false;
1575 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1576 if (VINSN_SEPARABLE_P (x))
1578 /* Compare RHSes of VINSNs. */
1579 gcc_assert (VINSN_RHS (x));
1580 gcc_assert (VINSN_RHS (y));
1582 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1585 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1589 /* Functions for working with expressions. */
1591 /* Initialize EXPR. */
1592 static void
1593 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1594 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1595 ds_t spec_to_check_ds, int orig_sched_cycle,
1596 VEC(expr_history_def, heap) *history, bool target_available,
1597 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1598 bool cant_move)
1600 vinsn_attach (vi);
1602 EXPR_VINSN (expr) = vi;
1603 EXPR_SPEC (expr) = spec;
1604 EXPR_USEFULNESS (expr) = use;
1605 EXPR_PRIORITY (expr) = priority;
1606 EXPR_PRIORITY_ADJ (expr) = 0;
1607 EXPR_SCHED_TIMES (expr) = sched_times;
1608 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1609 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1610 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1611 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1613 if (history)
1614 EXPR_HISTORY_OF_CHANGES (expr) = history;
1615 else
1616 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1618 EXPR_TARGET_AVAILABLE (expr) = target_available;
1619 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1620 EXPR_WAS_RENAMED (expr) = was_renamed;
1621 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1622 EXPR_CANT_MOVE (expr) = cant_move;
1625 /* Make a copy of the expr FROM into the expr TO. */
1626 void
1627 copy_expr (expr_t to, expr_t from)
1629 VEC(expr_history_def, heap) *temp = NULL;
1631 if (EXPR_HISTORY_OF_CHANGES (from))
1633 unsigned i;
1634 expr_history_def *phist;
1636 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1637 for (i = 0;
1638 VEC_iterate (expr_history_def, temp, i, phist);
1639 i++)
1641 vinsn_attach (phist->old_expr_vinsn);
1642 vinsn_attach (phist->new_expr_vinsn);
1646 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1647 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1648 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1649 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1650 EXPR_ORIG_SCHED_CYCLE (from), temp,
1651 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1652 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1653 EXPR_CANT_MOVE (from));
1656 /* Same, but the final expr will not ever be in av sets, so don't copy
1657 "uninteresting" data such as bitmap cache. */
1658 void
1659 copy_expr_onside (expr_t to, expr_t from)
1661 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1662 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1663 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1664 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1665 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1666 EXPR_CANT_MOVE (from));
1669 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1670 initializing new insns. */
1671 static void
1672 prepare_insn_expr (insn_t insn, int seqno)
1674 expr_t expr = INSN_EXPR (insn);
1675 ds_t ds;
1677 INSN_SEQNO (insn) = seqno;
1678 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1679 EXPR_SPEC (expr) = 0;
1680 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1681 EXPR_WAS_SUBSTITUTED (expr) = 0;
1682 EXPR_WAS_RENAMED (expr) = 0;
1683 EXPR_TARGET_AVAILABLE (expr) = 1;
1684 INSN_LIVE_VALID_P (insn) = false;
1686 /* ??? If this expression is speculative, make its dependence
1687 as weak as possible. We can filter this expression later
1688 in process_spec_exprs, because we do not distinguish
1689 between the status we got during compute_av_set and the
1690 existing status. To be fixed. */
1691 ds = EXPR_SPEC_DONE_DS (expr);
1692 if (ds)
1693 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1695 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1698 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1699 is non-null when expressions are merged from different successors at
1700 a split point. */
1701 static void
1702 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1704 if (EXPR_TARGET_AVAILABLE (to) < 0
1705 || EXPR_TARGET_AVAILABLE (from) < 0)
1706 EXPR_TARGET_AVAILABLE (to) = -1;
1707 else
1709 /* We try to detect the case when one of the expressions
1710 can only be reached through another one. In this case,
1711 we can do better. */
1712 if (split_point == NULL)
1714 int toind, fromind;
1716 toind = EXPR_ORIG_BB_INDEX (to);
1717 fromind = EXPR_ORIG_BB_INDEX (from);
1719 if (toind && toind == fromind)
1720 /* Do nothing -- everything is done in
1721 merge_with_other_exprs. */
1723 else
1724 EXPR_TARGET_AVAILABLE (to) = -1;
1726 else
1727 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1731 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1732 is non-null when expressions are merged from different successors at
1733 a split point. */
1734 static void
1735 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1737 ds_t old_to_ds, old_from_ds;
1739 old_to_ds = EXPR_SPEC_DONE_DS (to);
1740 old_from_ds = EXPR_SPEC_DONE_DS (from);
1742 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1743 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1744 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1746 /* When merging e.g. control & data speculative exprs, or a control
1747 speculative with a control&data speculative one, we really have
1748 to change vinsn too. Also, when speculative status is changed,
1749 we also need to record this as a transformation in expr's history. */
1750 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1752 old_to_ds = ds_get_speculation_types (old_to_ds);
1753 old_from_ds = ds_get_speculation_types (old_from_ds);
1755 if (old_to_ds != old_from_ds)
1757 ds_t record_ds;
1759 /* When both expressions are speculative, we need to change
1760 the vinsn first. */
1761 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1763 int res;
1765 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1766 gcc_assert (res >= 0);
1769 if (split_point != NULL)
1771 /* Record the change with proper status. */
1772 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1773 record_ds &= ~(old_to_ds & SPECULATIVE);
1774 record_ds &= ~(old_from_ds & SPECULATIVE);
1776 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1777 INSN_UID (split_point), TRANS_SPECULATION,
1778 EXPR_VINSN (from), EXPR_VINSN (to),
1779 record_ds);
1786 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1787 this is done along different paths. */
1788 void
1789 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1791 int i;
1792 expr_history_def *phist;
1794 /* For now, we just set the spec of resulting expr to be minimum of the specs
1795 of merged exprs. */
1796 if (EXPR_SPEC (to) > EXPR_SPEC (from))
1797 EXPR_SPEC (to) = EXPR_SPEC (from);
1799 if (split_point)
1800 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1801 else
1802 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1803 EXPR_USEFULNESS (from));
1805 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1806 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1808 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1809 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1811 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1812 EXPR_ORIG_BB_INDEX (to) = 0;
1814 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1815 EXPR_ORIG_SCHED_CYCLE (from));
1817 /* We keep this vector sorted. */
1818 for (i = 0;
1819 VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
1820 i, phist);
1821 i++)
1822 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1823 phist->uid, phist->type,
1824 phist->old_expr_vinsn, phist->new_expr_vinsn,
1825 phist->spec_ds);
1827 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1828 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1829 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1831 update_target_availability (to, from, split_point);
1832 update_speculative_bits (to, from, split_point);
1835 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1836 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1837 are merged from different successors at a split point. */
1838 void
1839 merge_expr (expr_t to, expr_t from, insn_t split_point)
1841 vinsn_t to_vi = EXPR_VINSN (to);
1842 vinsn_t from_vi = EXPR_VINSN (from);
1844 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1846 /* Make sure that speculative pattern is propagated into exprs that
1847 have non-speculative one. This will provide us with consistent
1848 speculative bits and speculative patterns inside expr. */
1849 if (EXPR_SPEC_DONE_DS (to) == 0
1850 && EXPR_SPEC_DONE_DS (from) != 0)
1851 change_vinsn_in_expr (to, EXPR_VINSN (from));
1853 merge_expr_data (to, from, split_point);
1854 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1857 /* Clear the information of this EXPR. */
1858 void
1859 clear_expr (expr_t expr)
1862 vinsn_detach (EXPR_VINSN (expr));
1863 EXPR_VINSN (expr) = NULL;
1865 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1868 /* For a given LV_SET, mark EXPR having unavailable target register. */
1869 static void
1870 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1872 if (EXPR_SEPARABLE_P (expr))
1874 if (REG_P (EXPR_LHS (expr))
1875 && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
1877 /* If it's an insn like r1 = use (r1, ...), and it exists in
1878 different forms in each of the av_sets being merged, we can't say
1879 whether original destination register is available or not.
1880 However, this still works if destination register is not used
1881 in the original expression: if the branch at which LV_SET we're
1882 looking here is not actually 'other branch' in sense that same
1883 expression is available through it (but it can't be determined
1884 at computation stage because of transformations on one of the
1885 branches), it still won't affect the availability.
1886 Liveness of a register somewhere on a code motion path means
1887 it's either read somewhere on a codemotion path, live on
1888 'other' branch, live at the point immediately following
1889 the original operation, or is read by the original operation.
1890 The latter case is filtered out in the condition below.
1891 It still doesn't cover the case when register is defined and used
1892 somewhere within the code motion path, and in this case we could
1893 miss a unifying code motion along both branches using a renamed
1894 register, but it won't affect a code correctness since upon
1895 an actual code motion a bookkeeping code would be generated. */
1896 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1897 REGNO (EXPR_LHS (expr))))
1898 EXPR_TARGET_AVAILABLE (expr) = -1;
1899 else
1900 EXPR_TARGET_AVAILABLE (expr) = false;
1903 else
1905 unsigned regno;
1906 reg_set_iterator rsi;
1908 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1909 0, regno, rsi)
1910 if (bitmap_bit_p (lv_set, regno))
1912 EXPR_TARGET_AVAILABLE (expr) = false;
1913 break;
1916 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1917 0, regno, rsi)
1918 if (bitmap_bit_p (lv_set, regno))
1920 EXPR_TARGET_AVAILABLE (expr) = false;
1921 break;
1926 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1927 or dependence status have changed, 2 when also the target register
1928 became unavailable, 0 if nothing had to be changed. */
1930 speculate_expr (expr_t expr, ds_t ds)
1932 int res;
1933 rtx orig_insn_rtx;
1934 rtx spec_pat;
1935 ds_t target_ds, current_ds;
1937 /* Obtain the status we need to put on EXPR. */
1938 target_ds = (ds & SPECULATIVE);
1939 current_ds = EXPR_SPEC_DONE_DS (expr);
1940 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1942 orig_insn_rtx = EXPR_INSN_RTX (expr);
1944 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1946 switch (res)
1948 case 0:
1949 EXPR_SPEC_DONE_DS (expr) = ds;
1950 return current_ds != ds ? 1 : 0;
1952 case 1:
1954 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1955 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1957 change_vinsn_in_expr (expr, spec_vinsn);
1958 EXPR_SPEC_DONE_DS (expr) = ds;
1959 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1961 /* Do not allow clobbering the address register of speculative
1962 insns. */
1963 if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1964 expr_dest_regno (expr)))
1966 EXPR_TARGET_AVAILABLE (expr) = false;
1967 return 2;
1970 return 1;
1973 case -1:
1974 return -1;
1976 default:
1977 gcc_unreachable ();
1978 return -1;
1982 /* Return a destination register, if any, of EXPR. */
1984 expr_dest_reg (expr_t expr)
1986 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1988 if (dest != NULL_RTX && REG_P (dest))
1989 return dest;
1991 return NULL_RTX;
1994 /* Returns the REGNO of the R's destination. */
1995 unsigned
1996 expr_dest_regno (expr_t expr)
1998 rtx dest = expr_dest_reg (expr);
2000 gcc_assert (dest != NULL_RTX);
2001 return REGNO (dest);
2004 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2005 AV_SET having unavailable target register. */
2006 void
2007 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2009 expr_t expr;
2010 av_set_iterator avi;
2012 FOR_EACH_EXPR (expr, avi, join_set)
2013 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2014 set_unavailable_target_for_expr (expr, lv_set);
2018 /* Av set functions. */
2020 /* Add a new element to av set SETP.
2021 Return the element added. */
2022 static av_set_t
2023 av_set_add_element (av_set_t *setp)
2025 /* Insert at the beginning of the list. */
2026 _list_add (setp);
2027 return *setp;
2030 /* Add EXPR to SETP. */
2031 void
2032 av_set_add (av_set_t *setp, expr_t expr)
2034 av_set_t elem;
2036 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2037 elem = av_set_add_element (setp);
2038 copy_expr (_AV_SET_EXPR (elem), expr);
2041 /* Same, but do not copy EXPR. */
2042 static void
2043 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2045 av_set_t elem;
2047 elem = av_set_add_element (setp);
2048 *_AV_SET_EXPR (elem) = *expr;
2051 /* Remove expr pointed to by IP from the av_set. */
2052 void
2053 av_set_iter_remove (av_set_iterator *ip)
2055 clear_expr (_AV_SET_EXPR (*ip->lp));
2056 _list_iter_remove (ip);
2059 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2060 sense of vinsn_equal_p function. Return NULL if no such expr is
2061 in SET was found. */
2062 expr_t
2063 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2065 expr_t expr;
2066 av_set_iterator i;
2068 FOR_EACH_EXPR (expr, i, set)
2069 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2070 return expr;
2071 return NULL;
2074 /* Same, but also remove the EXPR found. */
2075 static expr_t
2076 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2078 expr_t expr;
2079 av_set_iterator i;
2081 FOR_EACH_EXPR_1 (expr, i, setp)
2082 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2084 _list_iter_remove_nofree (&i);
2085 return expr;
2087 return NULL;
2090 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2091 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2092 Returns NULL if no such expr is in SET was found. */
2093 static expr_t
2094 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2096 expr_t cur_expr;
2097 av_set_iterator i;
2099 FOR_EACH_EXPR (cur_expr, i, set)
2101 if (cur_expr == expr)
2102 continue;
2103 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2104 return cur_expr;
2107 return NULL;
2110 /* If other expression is already in AVP, remove one of them. */
2111 expr_t
2112 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2114 expr_t expr2;
2116 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2117 if (expr2 != NULL)
2119 /* Reset target availability on merge, since taking it only from one
2120 of the exprs would be controversial for different code. */
2121 EXPR_TARGET_AVAILABLE (expr2) = -1;
2122 EXPR_USEFULNESS (expr2) = 0;
2124 merge_expr (expr2, expr, NULL);
2126 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2127 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2129 av_set_iter_remove (ip);
2130 return expr2;
2133 return expr;
2136 /* Return true if there is an expr that correlates to VI in SET. */
2137 bool
2138 av_set_is_in_p (av_set_t set, vinsn_t vi)
2140 return av_set_lookup (set, vi) != NULL;
2143 /* Return a copy of SET. */
2144 av_set_t
2145 av_set_copy (av_set_t set)
2147 expr_t expr;
2148 av_set_iterator i;
2149 av_set_t res = NULL;
2151 FOR_EACH_EXPR (expr, i, set)
2152 av_set_add (&res, expr);
2154 return res;
2157 /* Join two av sets that do not have common elements by attaching second set
2158 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2159 _AV_SET_NEXT of first set's last element). */
2160 static void
2161 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2163 gcc_assert (*to_tailp == NULL);
2164 *to_tailp = *fromp;
2165 *fromp = NULL;
2168 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2169 pointed to by FROMP afterwards. */
2170 void
2171 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2173 expr_t expr1;
2174 av_set_iterator i;
2176 /* Delete from TOP all exprs, that present in FROMP. */
2177 FOR_EACH_EXPR_1 (expr1, i, top)
2179 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2181 if (expr2)
2183 merge_expr (expr2, expr1, insn);
2184 av_set_iter_remove (&i);
2188 join_distinct_sets (i.lp, fromp);
2191 /* Same as above, but also update availability of target register in
2192 TOP judging by TO_LV_SET and FROM_LV_SET. */
2193 void
2194 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2195 regset from_lv_set, insn_t insn)
2197 expr_t expr1;
2198 av_set_iterator i;
2199 av_set_t *to_tailp, in_both_set = NULL;
2201 /* Delete from TOP all expres, that present in FROMP. */
2202 FOR_EACH_EXPR_1 (expr1, i, top)
2204 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2206 if (expr2)
2208 /* It may be that the expressions have different destination
2209 registers, in which case we need to check liveness here. */
2210 if (EXPR_SEPARABLE_P (expr1))
2212 int regno1 = (REG_P (EXPR_LHS (expr1))
2213 ? (int) expr_dest_regno (expr1) : -1);
2214 int regno2 = (REG_P (EXPR_LHS (expr2))
2215 ? (int) expr_dest_regno (expr2) : -1);
2217 /* ??? We don't have a way to check restrictions for
2218 *other* register on the current path, we did it only
2219 for the current target register. Give up. */
2220 if (regno1 != regno2)
2221 EXPR_TARGET_AVAILABLE (expr2) = -1;
2223 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2224 EXPR_TARGET_AVAILABLE (expr2) = -1;
2226 merge_expr (expr2, expr1, insn);
2227 av_set_add_nocopy (&in_both_set, expr2);
2228 av_set_iter_remove (&i);
2230 else
2231 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2232 FROM_LV_SET. */
2233 set_unavailable_target_for_expr (expr1, from_lv_set);
2235 to_tailp = i.lp;
2237 /* These expressions are not present in TOP. Check liveness
2238 restrictions on TO_LV_SET. */
2239 FOR_EACH_EXPR (expr1, i, *fromp)
2240 set_unavailable_target_for_expr (expr1, to_lv_set);
2242 join_distinct_sets (i.lp, &in_both_set);
2243 join_distinct_sets (to_tailp, fromp);
2246 /* Clear av_set pointed to by SETP. */
2247 void
2248 av_set_clear (av_set_t *setp)
2250 expr_t expr;
2251 av_set_iterator i;
2253 FOR_EACH_EXPR_1 (expr, i, setp)
2254 av_set_iter_remove (&i);
2256 gcc_assert (*setp == NULL);
2259 /* Leave only one non-speculative element in the SETP. */
2260 void
2261 av_set_leave_one_nonspec (av_set_t *setp)
2263 expr_t expr;
2264 av_set_iterator i;
2265 bool has_one_nonspec = false;
2267 /* Keep all speculative exprs, and leave one non-speculative
2268 (the first one). */
2269 FOR_EACH_EXPR_1 (expr, i, setp)
2271 if (!EXPR_SPEC_DONE_DS (expr))
2273 if (has_one_nonspec)
2274 av_set_iter_remove (&i);
2275 else
2276 has_one_nonspec = true;
2281 /* Return the N'th element of the SET. */
2282 expr_t
2283 av_set_element (av_set_t set, int n)
2285 expr_t expr;
2286 av_set_iterator i;
2288 FOR_EACH_EXPR (expr, i, set)
2289 if (n-- == 0)
2290 return expr;
2292 gcc_unreachable ();
2293 return NULL;
2296 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2297 void
2298 av_set_substract_cond_branches (av_set_t *avp)
2300 av_set_iterator i;
2301 expr_t expr;
2303 FOR_EACH_EXPR_1 (expr, i, avp)
2304 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2305 av_set_iter_remove (&i);
2308 /* Multiplies usefulness attribute of each member of av-set *AVP by
2309 value PROB / ALL_PROB. */
2310 void
2311 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2313 av_set_iterator i;
2314 expr_t expr;
2316 FOR_EACH_EXPR (expr, i, av)
2317 EXPR_USEFULNESS (expr) = (all_prob
2318 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2319 : 0);
2322 /* Leave in AVP only those expressions, which are present in AV,
2323 and return it. */
2324 void
2325 av_set_intersect (av_set_t *avp, av_set_t av)
2327 av_set_iterator i;
2328 expr_t expr;
2330 FOR_EACH_EXPR_1 (expr, i, avp)
2331 if (av_set_lookup (av, EXPR_VINSN (expr)) == NULL)
2332 av_set_iter_remove (&i);
2337 /* Dependence hooks to initialize insn data. */
2339 /* This is used in hooks callable from dependence analysis when initializing
2340 instruction's data. */
2341 static struct
2343 /* Where the dependence was found (lhs/rhs). */
2344 deps_where_t where;
2346 /* The actual data object to initialize. */
2347 idata_t id;
2349 /* True when the insn should not be made clonable. */
2350 bool force_unique_p;
2352 /* True when insn should be treated as of type USE, i.e. never renamed. */
2353 bool force_use_p;
2354 } deps_init_id_data;
2357 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2358 clonable. */
2359 static void
2360 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2362 int type;
2364 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2365 That clonable insns which can be separated into lhs and rhs have type SET.
2366 Other clonable insns have type USE. */
2367 type = GET_CODE (insn);
2369 /* Only regular insns could be cloned. */
2370 if (type == INSN && !force_unique_p)
2371 type = SET;
2372 else if (type == JUMP_INSN && simplejump_p (insn))
2373 type = PC;
2374 else if (type == DEBUG_INSN)
2375 type = !force_unique_p ? USE : INSN;
2377 IDATA_TYPE (id) = type;
2378 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2379 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2380 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2383 /* Start initializing insn data. */
2384 static void
2385 deps_init_id_start_insn (insn_t insn)
2387 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2389 setup_id_for_insn (deps_init_id_data.id, insn,
2390 deps_init_id_data.force_unique_p);
2391 deps_init_id_data.where = DEPS_IN_INSN;
2394 /* Start initializing lhs data. */
2395 static void
2396 deps_init_id_start_lhs (rtx lhs)
2398 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2399 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2401 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2403 IDATA_LHS (deps_init_id_data.id) = lhs;
2404 deps_init_id_data.where = DEPS_IN_LHS;
2408 /* Finish initializing lhs data. */
2409 static void
2410 deps_init_id_finish_lhs (void)
2412 deps_init_id_data.where = DEPS_IN_INSN;
2415 /* Note a set of REGNO. */
2416 static void
2417 deps_init_id_note_reg_set (int regno)
2419 haifa_note_reg_set (regno);
2421 if (deps_init_id_data.where == DEPS_IN_RHS)
2422 deps_init_id_data.force_use_p = true;
2424 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2425 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2427 #ifdef STACK_REGS
2428 /* Make instructions that set stack registers to be ineligible for
2429 renaming to avoid issues with find_used_regs. */
2430 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2431 deps_init_id_data.force_use_p = true;
2432 #endif
2435 /* Note a clobber of REGNO. */
2436 static void
2437 deps_init_id_note_reg_clobber (int regno)
2439 haifa_note_reg_clobber (regno);
2441 if (deps_init_id_data.where == DEPS_IN_RHS)
2442 deps_init_id_data.force_use_p = true;
2444 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2445 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2448 /* Note a use of REGNO. */
2449 static void
2450 deps_init_id_note_reg_use (int regno)
2452 haifa_note_reg_use (regno);
2454 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2455 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2458 /* Start initializing rhs data. */
2459 static void
2460 deps_init_id_start_rhs (rtx rhs)
2462 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2464 /* And there was no sel_deps_reset_to_insn (). */
2465 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2467 IDATA_RHS (deps_init_id_data.id) = rhs;
2468 deps_init_id_data.where = DEPS_IN_RHS;
2472 /* Finish initializing rhs data. */
2473 static void
2474 deps_init_id_finish_rhs (void)
2476 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2477 || deps_init_id_data.where == DEPS_IN_INSN);
2478 deps_init_id_data.where = DEPS_IN_INSN;
2481 /* Finish initializing insn data. */
2482 static void
2483 deps_init_id_finish_insn (void)
2485 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2487 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2489 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2490 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2492 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2493 || deps_init_id_data.force_use_p)
2495 /* This should be a USE, as we don't want to schedule its RHS
2496 separately. However, we still want to have them recorded
2497 for the purposes of substitution. That's why we don't
2498 simply call downgrade_to_use () here. */
2499 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2500 gcc_assert (!lhs == !rhs);
2502 IDATA_TYPE (deps_init_id_data.id) = USE;
2506 deps_init_id_data.where = DEPS_IN_NOWHERE;
2509 /* This is dependence info used for initializing insn's data. */
2510 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2512 /* This initializes most of the static part of the above structure. */
2513 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2515 NULL,
2517 deps_init_id_start_insn,
2518 deps_init_id_finish_insn,
2519 deps_init_id_start_lhs,
2520 deps_init_id_finish_lhs,
2521 deps_init_id_start_rhs,
2522 deps_init_id_finish_rhs,
2523 deps_init_id_note_reg_set,
2524 deps_init_id_note_reg_clobber,
2525 deps_init_id_note_reg_use,
2526 NULL, /* note_mem_dep */
2527 NULL, /* note_dep */
2529 0, /* use_cselib */
2530 0, /* use_deps_list */
2531 0 /* generate_spec_deps */
2534 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2535 we don't actually need information about lhs and rhs. */
2536 static void
2537 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2539 rtx pat = PATTERN (insn);
2541 if (NONJUMP_INSN_P (insn)
2542 && GET_CODE (pat) == SET
2543 && !force_unique_p)
2545 IDATA_RHS (id) = SET_SRC (pat);
2546 IDATA_LHS (id) = SET_DEST (pat);
2548 else
2549 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2552 /* Possibly downgrade INSN to USE. */
2553 static void
2554 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2556 bool must_be_use = false;
2557 unsigned uid = INSN_UID (insn);
2558 df_ref *rec;
2559 rtx lhs = IDATA_LHS (id);
2560 rtx rhs = IDATA_RHS (id);
2562 /* We downgrade only SETs. */
2563 if (IDATA_TYPE (id) != SET)
2564 return;
2566 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2568 IDATA_TYPE (id) = USE;
2569 return;
2572 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2574 df_ref def = *rec;
2576 if (DF_REF_INSN (def)
2577 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2578 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2580 must_be_use = true;
2581 break;
2584 #ifdef STACK_REGS
2585 /* Make instructions that set stack registers to be ineligible for
2586 renaming to avoid issues with find_used_regs. */
2587 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2589 must_be_use = true;
2590 break;
2592 #endif
2595 if (must_be_use)
2596 IDATA_TYPE (id) = USE;
2599 /* Setup register sets describing INSN in ID. */
2600 static void
2601 setup_id_reg_sets (idata_t id, insn_t insn)
2603 unsigned uid = INSN_UID (insn);
2604 df_ref *rec;
2605 regset tmp = get_clear_regset_from_pool ();
2607 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2609 df_ref def = *rec;
2610 unsigned int regno = DF_REF_REGNO (def);
2612 /* Post modifies are treated like clobbers by sched-deps.c. */
2613 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2614 | DF_REF_PRE_POST_MODIFY)))
2615 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2616 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2618 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2620 #ifdef STACK_REGS
2621 /* For stack registers, treat writes to them as writes
2622 to the first one to be consistent with sched-deps.c. */
2623 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2624 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2625 #endif
2627 /* Mark special refs that generate read/write def pair. */
2628 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2629 || regno == STACK_POINTER_REGNUM)
2630 bitmap_set_bit (tmp, regno);
2633 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2635 df_ref use = *rec;
2636 unsigned int regno = DF_REF_REGNO (use);
2638 /* When these refs are met for the first time, skip them, as
2639 these uses are just counterparts of some defs. */
2640 if (bitmap_bit_p (tmp, regno))
2641 bitmap_clear_bit (tmp, regno);
2642 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2644 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2646 #ifdef STACK_REGS
2647 /* For stack registers, treat reads from them as reads from
2648 the first one to be consistent with sched-deps.c. */
2649 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2650 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2651 #endif
2655 return_regset_to_pool (tmp);
2658 /* Initialize instruction data for INSN in ID using DF's data. */
2659 static void
2660 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2662 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2664 setup_id_for_insn (id, insn, force_unique_p);
2665 setup_id_lhs_rhs (id, insn, force_unique_p);
2667 if (INSN_NOP_P (insn))
2668 return;
2670 maybe_downgrade_id_to_use (id, insn);
2671 setup_id_reg_sets (id, insn);
2674 /* Initialize instruction data for INSN in ID. */
2675 static void
2676 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2678 struct deps_desc _dc, *dc = &_dc;
2680 deps_init_id_data.where = DEPS_IN_NOWHERE;
2681 deps_init_id_data.id = id;
2682 deps_init_id_data.force_unique_p = force_unique_p;
2683 deps_init_id_data.force_use_p = false;
2685 init_deps (dc, false);
2687 memcpy (&deps_init_id_sched_deps_info,
2688 &const_deps_init_id_sched_deps_info,
2689 sizeof (deps_init_id_sched_deps_info));
2691 if (spec_info != NULL)
2692 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2694 sched_deps_info = &deps_init_id_sched_deps_info;
2696 deps_analyze_insn (dc, insn);
2698 free_deps (dc);
2700 deps_init_id_data.id = NULL;
2705 /* Implement hooks for collecting fundamental insn properties like if insn is
2706 an ASM or is within a SCHED_GROUP. */
2708 /* True when a "one-time init" data for INSN was already inited. */
2709 static bool
2710 first_time_insn_init (insn_t insn)
2712 return INSN_LIVE (insn) == NULL;
2715 /* Hash an entry in a transformed_insns hashtable. */
2716 static hashval_t
2717 hash_transformed_insns (const void *p)
2719 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2722 /* Compare the entries in a transformed_insns hashtable. */
2723 static int
2724 eq_transformed_insns (const void *p, const void *q)
2726 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2727 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2729 if (INSN_UID (i1) == INSN_UID (i2))
2730 return 1;
2731 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2734 /* Free an entry in a transformed_insns hashtable. */
2735 static void
2736 free_transformed_insns (void *p)
2738 struct transformed_insns *pti = (struct transformed_insns *) p;
2740 vinsn_detach (pti->vinsn_old);
2741 vinsn_detach (pti->vinsn_new);
2742 free (pti);
2745 /* Init the s_i_d data for INSN which should be inited just once, when
2746 we first see the insn. */
2747 static void
2748 init_first_time_insn_data (insn_t insn)
2750 /* This should not be set if this is the first time we init data for
2751 insn. */
2752 gcc_assert (first_time_insn_init (insn));
2754 /* These are needed for nops too. */
2755 INSN_LIVE (insn) = get_regset_from_pool ();
2756 INSN_LIVE_VALID_P (insn) = false;
2758 if (!INSN_NOP_P (insn))
2760 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2761 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2762 INSN_TRANSFORMED_INSNS (insn)
2763 = htab_create (16, hash_transformed_insns,
2764 eq_transformed_insns, free_transformed_insns);
2765 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2769 /* Free almost all above data for INSN that is scheduled already.
2770 Used for extra-large basic blocks. */
2771 void
2772 free_data_for_scheduled_insn (insn_t insn)
2774 gcc_assert (! first_time_insn_init (insn));
2776 if (! INSN_ANALYZED_DEPS (insn))
2777 return;
2779 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2780 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2781 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2783 /* This is allocated only for bookkeeping insns. */
2784 if (INSN_ORIGINATORS (insn))
2785 BITMAP_FREE (INSN_ORIGINATORS (insn));
2786 free_deps (&INSN_DEPS_CONTEXT (insn));
2788 INSN_ANALYZED_DEPS (insn) = NULL;
2790 /* Clear the readonly flag so we would ICE when trying to recalculate
2791 the deps context (as we believe that it should not happen). */
2792 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2795 /* Free the same data as above for INSN. */
2796 static void
2797 free_first_time_insn_data (insn_t insn)
2799 gcc_assert (! first_time_insn_init (insn));
2801 free_data_for_scheduled_insn (insn);
2802 return_regset_to_pool (INSN_LIVE (insn));
2803 INSN_LIVE (insn) = NULL;
2804 INSN_LIVE_VALID_P (insn) = false;
2807 /* Initialize region-scope data structures for basic blocks. */
2808 static void
2809 init_global_and_expr_for_bb (basic_block bb)
2811 if (sel_bb_empty_p (bb))
2812 return;
2814 invalidate_av_set (bb);
2817 /* Data for global dependency analysis (to initialize CANT_MOVE and
2818 SCHED_GROUP_P). */
2819 static struct
2821 /* Previous insn. */
2822 insn_t prev_insn;
2823 } init_global_data;
2825 /* Determine if INSN is in the sched_group, is an asm or should not be
2826 cloned. After that initialize its expr. */
2827 static void
2828 init_global_and_expr_for_insn (insn_t insn)
2830 if (LABEL_P (insn))
2831 return;
2833 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2835 init_global_data.prev_insn = NULL_RTX;
2836 return;
2839 gcc_assert (INSN_P (insn));
2841 if (SCHED_GROUP_P (insn))
2842 /* Setup a sched_group. */
2844 insn_t prev_insn = init_global_data.prev_insn;
2846 if (prev_insn)
2847 INSN_SCHED_NEXT (prev_insn) = insn;
2849 init_global_data.prev_insn = insn;
2851 else
2852 init_global_data.prev_insn = NULL_RTX;
2854 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2855 || asm_noperands (PATTERN (insn)) >= 0)
2856 /* Mark INSN as an asm. */
2857 INSN_ASM_P (insn) = true;
2860 bool force_unique_p;
2861 ds_t spec_done_ds;
2863 /* Certain instructions cannot be cloned. */
2864 if (CANT_MOVE (insn)
2865 || INSN_ASM_P (insn)
2866 || SCHED_GROUP_P (insn)
2867 || prologue_epilogue_contains (insn)
2868 /* Exception handling insns are always unique. */
2869 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2870 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2871 || control_flow_insn_p (insn))
2872 force_unique_p = true;
2873 else
2874 force_unique_p = false;
2876 if (targetm.sched.get_insn_spec_ds)
2878 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2879 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2881 else
2882 spec_done_ds = 0;
2884 /* Initialize INSN's expr. */
2885 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2886 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
2887 spec_done_ds, 0, 0, NULL, true, false, false, false,
2888 CANT_MOVE (insn));
2891 init_first_time_insn_data (insn);
2894 /* Scan the region and initialize instruction data for basic blocks BBS. */
2895 void
2896 sel_init_global_and_expr (bb_vec_t bbs)
2898 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
2899 const struct sched_scan_info_def ssi =
2901 NULL, /* extend_bb */
2902 init_global_and_expr_for_bb, /* init_bb */
2903 extend_insn_data, /* extend_insn */
2904 init_global_and_expr_for_insn /* init_insn */
2907 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2910 /* Finalize region-scope data structures for basic blocks. */
2911 static void
2912 finish_global_and_expr_for_bb (basic_block bb)
2914 av_set_clear (&BB_AV_SET (bb));
2915 BB_AV_LEVEL (bb) = 0;
2918 /* Finalize INSN's data. */
2919 static void
2920 finish_global_and_expr_insn (insn_t insn)
2922 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
2923 return;
2925 gcc_assert (INSN_P (insn));
2927 if (INSN_LUID (insn) > 0)
2929 free_first_time_insn_data (insn);
2930 INSN_WS_LEVEL (insn) = 0;
2931 CANT_MOVE (insn) = 0;
2933 /* We can no longer assert this, as vinsns of this insn could be
2934 easily live in other insn's caches. This should be changed to
2935 a counter-like approach among all vinsns. */
2936 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
2937 clear_expr (INSN_EXPR (insn));
2941 /* Finalize per instruction data for the whole region. */
2942 void
2943 sel_finish_global_and_expr (void)
2946 bb_vec_t bbs;
2947 int i;
2949 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
2951 for (i = 0; i < current_nr_blocks; i++)
2952 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
2954 /* Clear AV_SETs and INSN_EXPRs. */
2956 const struct sched_scan_info_def ssi =
2958 NULL, /* extend_bb */
2959 finish_global_and_expr_for_bb, /* init_bb */
2960 NULL, /* extend_insn */
2961 finish_global_and_expr_insn /* init_insn */
2964 sched_scan (&ssi, bbs, NULL, NULL, NULL);
2967 VEC_free (basic_block, heap, bbs);
2970 finish_insns ();
2974 /* In the below hooks, we merely calculate whether or not a dependence
2975 exists, and in what part of insn. However, we will need more data
2976 when we'll start caching dependence requests. */
2978 /* Container to hold information for dependency analysis. */
2979 static struct
2981 deps_t dc;
2983 /* A variable to track which part of rtx we are scanning in
2984 sched-deps.c: sched_analyze_insn (). */
2985 deps_where_t where;
2987 /* Current producer. */
2988 insn_t pro;
2990 /* Current consumer. */
2991 vinsn_t con;
2993 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
2994 X is from { INSN, LHS, RHS }. */
2995 ds_t has_dep_p[DEPS_IN_NOWHERE];
2996 } has_dependence_data;
2998 /* Start analyzing dependencies of INSN. */
2999 static void
3000 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3002 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3004 has_dependence_data.where = DEPS_IN_INSN;
3007 /* Finish analyzing dependencies of an insn. */
3008 static void
3009 has_dependence_finish_insn (void)
3011 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3013 has_dependence_data.where = DEPS_IN_NOWHERE;
3016 /* Start analyzing dependencies of LHS. */
3017 static void
3018 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3020 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3022 if (VINSN_LHS (has_dependence_data.con) != NULL)
3023 has_dependence_data.where = DEPS_IN_LHS;
3026 /* Finish analyzing dependencies of an lhs. */
3027 static void
3028 has_dependence_finish_lhs (void)
3030 has_dependence_data.where = DEPS_IN_INSN;
3033 /* Start analyzing dependencies of RHS. */
3034 static void
3035 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3037 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3039 if (VINSN_RHS (has_dependence_data.con) != NULL)
3040 has_dependence_data.where = DEPS_IN_RHS;
3043 /* Start analyzing dependencies of an rhs. */
3044 static void
3045 has_dependence_finish_rhs (void)
3047 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3048 || has_dependence_data.where == DEPS_IN_INSN);
3050 has_dependence_data.where = DEPS_IN_INSN;
3053 /* Note a set of REGNO. */
3054 static void
3055 has_dependence_note_reg_set (int regno)
3057 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3059 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3060 VINSN_INSN_RTX
3061 (has_dependence_data.con)))
3063 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3065 if (reg_last->sets != NULL
3066 || reg_last->clobbers != NULL)
3067 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3069 if (reg_last->uses)
3070 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3074 /* Note a clobber of REGNO. */
3075 static void
3076 has_dependence_note_reg_clobber (int regno)
3078 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3080 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3081 VINSN_INSN_RTX
3082 (has_dependence_data.con)))
3084 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3086 if (reg_last->sets)
3087 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3089 if (reg_last->uses)
3090 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3094 /* Note a use of REGNO. */
3095 static void
3096 has_dependence_note_reg_use (int regno)
3098 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3100 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3101 VINSN_INSN_RTX
3102 (has_dependence_data.con)))
3104 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3106 if (reg_last->sets)
3107 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3109 if (reg_last->clobbers)
3110 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3112 /* Handle BE_IN_SPEC. */
3113 if (reg_last->uses)
3115 ds_t pro_spec_checked_ds;
3117 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3118 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3120 if (pro_spec_checked_ds != 0)
3121 /* Merge BE_IN_SPEC bits into *DSP. */
3122 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3123 NULL_RTX, NULL_RTX);
3128 /* Note a memory dependence. */
3129 static void
3130 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3131 rtx pending_mem ATTRIBUTE_UNUSED,
3132 insn_t pending_insn ATTRIBUTE_UNUSED,
3133 ds_t ds ATTRIBUTE_UNUSED)
3135 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3136 VINSN_INSN_RTX (has_dependence_data.con)))
3138 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3140 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3144 /* Note a dependence. */
3145 static void
3146 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3147 ds_t ds ATTRIBUTE_UNUSED)
3149 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3150 VINSN_INSN_RTX (has_dependence_data.con)))
3152 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3154 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3158 /* Mark the insn as having a hard dependence that prevents speculation. */
3159 void
3160 sel_mark_hard_insn (rtx insn)
3162 int i;
3164 /* Only work when we're in has_dependence_p mode.
3165 ??? This is a hack, this should actually be a hook. */
3166 if (!has_dependence_data.dc || !has_dependence_data.pro)
3167 return;
3169 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3170 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3172 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3173 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3176 /* This structure holds the hooks for the dependency analysis used when
3177 actually processing dependencies in the scheduler. */
3178 static struct sched_deps_info_def has_dependence_sched_deps_info;
3180 /* This initializes most of the fields of the above structure. */
3181 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3183 NULL,
3185 has_dependence_start_insn,
3186 has_dependence_finish_insn,
3187 has_dependence_start_lhs,
3188 has_dependence_finish_lhs,
3189 has_dependence_start_rhs,
3190 has_dependence_finish_rhs,
3191 has_dependence_note_reg_set,
3192 has_dependence_note_reg_clobber,
3193 has_dependence_note_reg_use,
3194 has_dependence_note_mem_dep,
3195 has_dependence_note_dep,
3197 0, /* use_cselib */
3198 0, /* use_deps_list */
3199 0 /* generate_spec_deps */
3202 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3203 static void
3204 setup_has_dependence_sched_deps_info (void)
3206 memcpy (&has_dependence_sched_deps_info,
3207 &const_has_dependence_sched_deps_info,
3208 sizeof (has_dependence_sched_deps_info));
3210 if (spec_info != NULL)
3211 has_dependence_sched_deps_info.generate_spec_deps = 1;
3213 sched_deps_info = &has_dependence_sched_deps_info;
3216 /* Remove all dependences found and recorded in has_dependence_data array. */
3217 void
3218 sel_clear_has_dependence (void)
3220 int i;
3222 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3223 has_dependence_data.has_dep_p[i] = 0;
3226 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3227 to the dependence information array in HAS_DEP_PP. */
3228 ds_t
3229 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3231 int i;
3232 ds_t ds;
3233 struct deps_desc *dc;
3235 if (INSN_SIMPLEJUMP_P (pred))
3236 /* Unconditional jump is just a transfer of control flow.
3237 Ignore it. */
3238 return false;
3240 dc = &INSN_DEPS_CONTEXT (pred);
3242 /* We init this field lazily. */
3243 if (dc->reg_last == NULL)
3244 init_deps_reg_last (dc);
3246 if (!dc->readonly)
3248 has_dependence_data.pro = NULL;
3249 /* Initialize empty dep context with information about PRED. */
3250 advance_deps_context (dc, pred);
3251 dc->readonly = 1;
3254 has_dependence_data.where = DEPS_IN_NOWHERE;
3255 has_dependence_data.pro = pred;
3256 has_dependence_data.con = EXPR_VINSN (expr);
3257 has_dependence_data.dc = dc;
3259 sel_clear_has_dependence ();
3261 /* Now catch all dependencies that would be generated between PRED and
3262 INSN. */
3263 setup_has_dependence_sched_deps_info ();
3264 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3265 has_dependence_data.dc = NULL;
3267 /* When a barrier was found, set DEPS_IN_INSN bits. */
3268 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3269 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3270 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3271 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3273 /* Do not allow stores to memory to move through checks. Currently
3274 we don't move this to sched-deps.c as the check doesn't have
3275 obvious places to which this dependence can be attached.
3276 FIMXE: this should go to a hook. */
3277 if (EXPR_LHS (expr)
3278 && MEM_P (EXPR_LHS (expr))
3279 && sel_insn_is_speculation_check (pred))
3280 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3282 *has_dep_pp = has_dependence_data.has_dep_p;
3283 ds = 0;
3284 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3285 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3286 NULL_RTX, NULL_RTX);
3288 return ds;
3292 /* Dependence hooks implementation that checks dependence latency constraints
3293 on the insns being scheduled. The entry point for these routines is
3294 tick_check_p predicate. */
3296 static struct
3298 /* An expr we are currently checking. */
3299 expr_t expr;
3301 /* A minimal cycle for its scheduling. */
3302 int cycle;
3304 /* Whether we have seen a true dependence while checking. */
3305 bool seen_true_dep_p;
3306 } tick_check_data;
3308 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3309 on PRO with status DS and weight DW. */
3310 static void
3311 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3313 expr_t con_expr = tick_check_data.expr;
3314 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3316 if (con_insn != pro_insn)
3318 enum reg_note dt;
3319 int tick;
3321 if (/* PROducer was removed from above due to pipelining. */
3322 !INSN_IN_STREAM_P (pro_insn)
3323 /* Or PROducer was originally on the next iteration regarding the
3324 CONsumer. */
3325 || (INSN_SCHED_TIMES (pro_insn)
3326 - EXPR_SCHED_TIMES (con_expr)) > 1)
3327 /* Don't count this dependence. */
3328 return;
3330 dt = ds_to_dt (ds);
3331 if (dt == REG_DEP_TRUE)
3332 tick_check_data.seen_true_dep_p = true;
3334 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3337 dep_def _dep, *dep = &_dep;
3339 init_dep (dep, pro_insn, con_insn, dt);
3341 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3344 /* When there are several kinds of dependencies between pro and con,
3345 only REG_DEP_TRUE should be taken into account. */
3346 if (tick > tick_check_data.cycle
3347 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3348 tick_check_data.cycle = tick;
3352 /* An implementation of note_dep hook. */
3353 static void
3354 tick_check_note_dep (insn_t pro, ds_t ds)
3356 tick_check_dep_with_dw (pro, ds, 0);
3359 /* An implementation of note_mem_dep hook. */
3360 static void
3361 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3363 dw_t dw;
3365 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3366 ? estimate_dep_weak (mem1, mem2)
3367 : 0);
3369 tick_check_dep_with_dw (pro, ds, dw);
3372 /* This structure contains hooks for dependence analysis used when determining
3373 whether an insn is ready for scheduling. */
3374 static struct sched_deps_info_def tick_check_sched_deps_info =
3376 NULL,
3378 NULL,
3379 NULL,
3380 NULL,
3381 NULL,
3382 NULL,
3383 NULL,
3384 haifa_note_reg_set,
3385 haifa_note_reg_clobber,
3386 haifa_note_reg_use,
3387 tick_check_note_mem_dep,
3388 tick_check_note_dep,
3390 0, 0, 0
3393 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3394 scheduled. Return 0 if all data from producers in DC is ready. */
3396 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3398 int cycles_left;
3399 /* Initialize variables. */
3400 tick_check_data.expr = expr;
3401 tick_check_data.cycle = 0;
3402 tick_check_data.seen_true_dep_p = false;
3403 sched_deps_info = &tick_check_sched_deps_info;
3405 gcc_assert (!dc->readonly);
3406 dc->readonly = 1;
3407 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3408 dc->readonly = 0;
3410 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3412 return cycles_left >= 0 ? cycles_left : 0;
3416 /* Functions to work with insns. */
3418 /* Returns true if LHS of INSN is the same as DEST of an insn
3419 being moved. */
3420 bool
3421 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3423 rtx lhs = INSN_LHS (insn);
3425 if (lhs == NULL || dest == NULL)
3426 return false;
3428 return rtx_equal_p (lhs, dest);
3431 /* Return s_i_d entry of INSN. Callable from debugger. */
3432 sel_insn_data_def
3433 insn_sid (insn_t insn)
3435 return *SID (insn);
3438 /* True when INSN is a speculative check. We can tell this by looking
3439 at the data structures of the selective scheduler, not by examining
3440 the pattern. */
3441 bool
3442 sel_insn_is_speculation_check (rtx insn)
3444 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3447 /* Extracts machine mode MODE and destination location DST_LOC
3448 for given INSN. */
3449 void
3450 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3452 rtx pat = PATTERN (insn);
3454 gcc_assert (dst_loc);
3455 gcc_assert (GET_CODE (pat) == SET);
3457 *dst_loc = SET_DEST (pat);
3459 gcc_assert (*dst_loc);
3460 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3462 if (mode)
3463 *mode = GET_MODE (*dst_loc);
3466 /* Returns true when moving through JUMP will result in bookkeeping
3467 creation. */
3468 bool
3469 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3471 insn_t succ;
3472 succ_iterator si;
3474 FOR_EACH_SUCC (succ, si, jump)
3475 if (sel_num_cfg_preds_gt_1 (succ))
3476 return true;
3478 return false;
3481 /* Return 'true' if INSN is the only one in its basic block. */
3482 static bool
3483 insn_is_the_only_one_in_bb_p (insn_t insn)
3485 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3488 #ifdef ENABLE_CHECKING
3489 /* Check that the region we're scheduling still has at most one
3490 backedge. */
3491 static void
3492 verify_backedges (void)
3494 if (pipelining_p)
3496 int i, n = 0;
3497 edge e;
3498 edge_iterator ei;
3500 for (i = 0; i < current_nr_blocks; i++)
3501 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3502 if (in_current_region_p (e->dest)
3503 && BLOCK_TO_BB (e->dest->index) < i)
3504 n++;
3506 gcc_assert (n <= 1);
3509 #endif
3512 /* Functions to work with control flow. */
3514 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3515 are sorted in topological order (it might have been invalidated by
3516 redirecting an edge). */
3517 static void
3518 sel_recompute_toporder (void)
3520 int i, n, rgn;
3521 int *postorder, n_blocks;
3523 postorder = XALLOCAVEC (int, n_basic_blocks);
3524 n_blocks = post_order_compute (postorder, false, false);
3526 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3527 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3528 if (CONTAINING_RGN (postorder[i]) == rgn)
3530 BLOCK_TO_BB (postorder[i]) = n;
3531 BB_TO_BLOCK (n) = postorder[i];
3532 n++;
3535 /* Assert that we updated info for all blocks. We may miss some blocks if
3536 this function is called when redirecting an edge made a block
3537 unreachable, but that block is not deleted yet. */
3538 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3541 /* Tidy the possibly empty block BB. */
3542 static bool
3543 maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p)
3545 basic_block succ_bb, pred_bb;
3546 edge e;
3547 edge_iterator ei;
3548 bool rescan_p;
3550 /* Keep empty bb only if this block immediately precedes EXIT and
3551 has incoming non-fallthrough edge, or it has no predecessors or
3552 successors. Otherwise remove it. */
3553 if (!sel_bb_empty_p (bb)
3554 || (single_succ_p (bb)
3555 && single_succ (bb) == EXIT_BLOCK_PTR
3556 && (!single_pred_p (bb)
3557 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3558 || EDGE_COUNT (bb->preds) == 0
3559 || EDGE_COUNT (bb->succs) == 0)
3560 return false;
3562 /* Do not attempt to redirect complex edges. */
3563 FOR_EACH_EDGE (e, ei, bb->preds)
3564 if (e->flags & EDGE_COMPLEX)
3565 return false;
3567 free_data_sets (bb);
3569 /* Do not delete BB if it has more than one successor.
3570 That can occur when we moving a jump. */
3571 if (!single_succ_p (bb))
3573 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3574 sel_merge_blocks (bb->prev_bb, bb);
3575 return true;
3578 succ_bb = single_succ (bb);
3579 rescan_p = true;
3580 pred_bb = NULL;
3582 /* Redirect all non-fallthru edges to the next bb. */
3583 while (rescan_p)
3585 rescan_p = false;
3587 FOR_EACH_EDGE (e, ei, bb->preds)
3589 pred_bb = e->src;
3591 if (!(e->flags & EDGE_FALLTHRU))
3593 recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb);
3594 rescan_p = true;
3595 break;
3600 /* If it is possible - merge BB with its predecessor. */
3601 if (can_merge_blocks_p (bb->prev_bb, bb))
3602 sel_merge_blocks (bb->prev_bb, bb);
3603 else
3604 /* Otherwise this is a block without fallthru predecessor.
3605 Just delete it. */
3607 gcc_assert (pred_bb != NULL);
3609 if (in_current_region_p (pred_bb))
3610 move_bb_info (pred_bb, bb);
3611 remove_empty_bb (bb, true);
3614 if (recompute_toporder_p)
3615 sel_recompute_toporder ();
3617 #ifdef ENABLE_CHECKING
3618 verify_backedges ();
3619 #endif
3621 return true;
3624 /* Tidy the control flow after we have removed original insn from
3625 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3626 is true, also try to optimize control flow on non-empty blocks. */
3627 bool
3628 tidy_control_flow (basic_block xbb, bool full_tidying)
3630 bool changed = true;
3631 insn_t first, last;
3633 /* First check whether XBB is empty. */
3634 changed = maybe_tidy_empty_bb (xbb, false);
3635 if (changed || !full_tidying)
3636 return changed;
3638 /* Check if there is a unnecessary jump after insn left. */
3639 if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
3640 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3641 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3643 if (sel_remove_insn (BB_END (xbb), false, false))
3644 return true;
3645 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3648 first = sel_bb_head (xbb);
3649 last = sel_bb_end (xbb);
3650 if (MAY_HAVE_DEBUG_INSNS)
3652 if (first != last && DEBUG_INSN_P (first))
3654 first = NEXT_INSN (first);
3655 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3657 if (first != last && DEBUG_INSN_P (last))
3659 last = PREV_INSN (last);
3660 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3662 /* Check if there is an unnecessary jump in previous basic block leading
3663 to next basic block left after removing INSN from stream.
3664 If it is so, remove that jump and redirect edge to current
3665 basic block (where there was INSN before deletion). This way
3666 when NOP will be deleted several instructions later with its
3667 basic block we will not get a jump to next instruction, which
3668 can be harmful. */
3669 if (first == last
3670 && !sel_bb_empty_p (xbb)
3671 && INSN_NOP_P (last)
3672 /* Flow goes fallthru from current block to the next. */
3673 && EDGE_COUNT (xbb->succs) == 1
3674 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3675 /* When successor is an EXIT block, it may not be the next block. */
3676 && single_succ (xbb) != EXIT_BLOCK_PTR
3677 /* And unconditional jump in previous basic block leads to
3678 next basic block of XBB and this jump can be safely removed. */
3679 && in_current_region_p (xbb->prev_bb)
3680 && jump_leads_only_to_bb_p (BB_END (xbb->prev_bb), xbb->next_bb)
3681 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3682 /* Also this jump is not at the scheduling boundary. */
3683 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3685 bool recompute_toporder_p;
3686 /* Clear data structures of jump - jump itself will be removed
3687 by sel_redirect_edge_and_branch. */
3688 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3689 recompute_toporder_p
3690 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3692 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3694 /* It can turn out that after removing unused jump, basic block
3695 that contained that jump, becomes empty too. In such case
3696 remove it too. */
3697 if (sel_bb_empty_p (xbb->prev_bb))
3698 changed = maybe_tidy_empty_bb (xbb->prev_bb, recompute_toporder_p);
3699 else if (recompute_toporder_p)
3700 sel_recompute_toporder ();
3703 return changed;
3706 /* Purge meaningless empty blocks in the middle of a region. */
3707 void
3708 purge_empty_blocks (void)
3710 /* Do not attempt to delete preheader. */
3711 int i = sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0))) ? 1 : 0;
3713 while (i < current_nr_blocks)
3715 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3717 if (maybe_tidy_empty_bb (b, false))
3718 continue;
3720 i++;
3724 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3725 do not delete insn's data, because it will be later re-emitted.
3726 Return true if we have removed some blocks afterwards. */
3727 bool
3728 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3730 basic_block bb = BLOCK_FOR_INSN (insn);
3732 gcc_assert (INSN_IN_STREAM_P (insn));
3734 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3736 expr_t expr;
3737 av_set_iterator i;
3739 /* When we remove a debug insn that is head of a BB, it remains
3740 in the AV_SET of the block, but it shouldn't. */
3741 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3742 if (EXPR_INSN_RTX (expr) == insn)
3744 av_set_iter_remove (&i);
3745 break;
3749 if (only_disconnect)
3751 insn_t prev = PREV_INSN (insn);
3752 insn_t next = NEXT_INSN (insn);
3753 basic_block bb = BLOCK_FOR_INSN (insn);
3755 NEXT_INSN (prev) = next;
3756 PREV_INSN (next) = prev;
3758 if (BB_HEAD (bb) == insn)
3760 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3761 BB_HEAD (bb) = prev;
3763 if (BB_END (bb) == insn)
3764 BB_END (bb) = prev;
3766 else
3768 remove_insn (insn);
3769 clear_expr (INSN_EXPR (insn));
3772 /* It is necessary to null this fields before calling add_insn (). */
3773 PREV_INSN (insn) = NULL_RTX;
3774 NEXT_INSN (insn) = NULL_RTX;
3776 return tidy_control_flow (bb, full_tidying);
3779 /* Estimate number of the insns in BB. */
3780 static int
3781 sel_estimate_number_of_insns (basic_block bb)
3783 int res = 0;
3784 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3786 for (; insn != next_tail; insn = NEXT_INSN (insn))
3787 if (NONDEBUG_INSN_P (insn))
3788 res++;
3790 return res;
3793 /* We don't need separate luids for notes or labels. */
3794 static int
3795 sel_luid_for_non_insn (rtx x)
3797 gcc_assert (NOTE_P (x) || LABEL_P (x));
3799 return -1;
3802 /* Return seqno of the only predecessor of INSN. */
3803 static int
3804 get_seqno_of_a_pred (insn_t insn)
3806 int seqno;
3808 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3810 if (!sel_bb_head_p (insn))
3811 seqno = INSN_SEQNO (PREV_INSN (insn));
3812 else
3814 basic_block bb = BLOCK_FOR_INSN (insn);
3816 if (single_pred_p (bb)
3817 && !in_current_region_p (single_pred (bb)))
3819 /* We can have preds outside a region when splitting edges
3820 for pipelining of an outer loop. Use succ instead.
3821 There should be only one of them. */
3822 insn_t succ = NULL;
3823 succ_iterator si;
3824 bool first = true;
3826 gcc_assert (flag_sel_sched_pipelining_outer_loops
3827 && current_loop_nest);
3828 FOR_EACH_SUCC_1 (succ, si, insn,
3829 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
3831 gcc_assert (first);
3832 first = false;
3835 gcc_assert (succ != NULL);
3836 seqno = INSN_SEQNO (succ);
3838 else
3840 insn_t *preds;
3841 int n;
3843 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
3844 gcc_assert (n == 1);
3846 seqno = INSN_SEQNO (preds[0]);
3848 free (preds);
3852 return seqno;
3855 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
3856 with positive seqno exist. */
3858 get_seqno_by_preds (rtx insn)
3860 basic_block bb = BLOCK_FOR_INSN (insn);
3861 rtx tmp = insn, head = BB_HEAD (bb);
3862 insn_t *preds;
3863 int n, i, seqno;
3865 while (tmp != head)
3866 if (INSN_P (tmp))
3867 return INSN_SEQNO (tmp);
3868 else
3869 tmp = PREV_INSN (tmp);
3871 cfg_preds (bb, &preds, &n);
3872 for (i = 0, seqno = -1; i < n; i++)
3873 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
3875 return seqno;
3880 /* Extend pass-scope data structures for basic blocks. */
3881 void
3882 sel_extend_global_bb_info (void)
3884 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
3885 last_basic_block);
3888 /* Extend region-scope data structures for basic blocks. */
3889 static void
3890 extend_region_bb_info (void)
3892 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
3893 last_basic_block);
3896 /* Extend all data structures to fit for all basic blocks. */
3897 static void
3898 extend_bb_info (void)
3900 sel_extend_global_bb_info ();
3901 extend_region_bb_info ();
3904 /* Finalize pass-scope data structures for basic blocks. */
3905 void
3906 sel_finish_global_bb_info (void)
3908 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
3911 /* Finalize region-scope data structures for basic blocks. */
3912 static void
3913 finish_region_bb_info (void)
3915 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
3919 /* Data for each insn in current region. */
3920 VEC (sel_insn_data_def, heap) *s_i_d = NULL;
3922 /* A vector for the insns we've emitted. */
3923 static insn_vec_t new_insns = NULL;
3925 /* Extend data structures for insns from current region. */
3926 static void
3927 extend_insn_data (void)
3929 int reserve;
3931 sched_extend_target ();
3932 sched_deps_init (false);
3934 /* Extend data structures for insns from current region. */
3935 reserve = (sched_max_luid + 1
3936 - VEC_length (sel_insn_data_def, s_i_d));
3937 if (reserve > 0
3938 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
3940 int size;
3942 if (sched_max_luid / 2 > 1024)
3943 size = sched_max_luid + 1024;
3944 else
3945 size = 3 * sched_max_luid / 2;
3948 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
3952 /* Finalize data structures for insns from current region. */
3953 static void
3954 finish_insns (void)
3956 unsigned i;
3958 /* Clear here all dependence contexts that may have left from insns that were
3959 removed during the scheduling. */
3960 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
3962 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
3964 if (sid_entry->live)
3965 return_regset_to_pool (sid_entry->live);
3966 if (sid_entry->analyzed_deps)
3968 BITMAP_FREE (sid_entry->analyzed_deps);
3969 BITMAP_FREE (sid_entry->found_deps);
3970 htab_delete (sid_entry->transformed_insns);
3971 free_deps (&sid_entry->deps_context);
3973 if (EXPR_VINSN (&sid_entry->expr))
3975 clear_expr (&sid_entry->expr);
3977 /* Also, clear CANT_MOVE bit here, because we really don't want it
3978 to be passed to the next region. */
3979 CANT_MOVE_BY_LUID (i) = 0;
3983 VEC_free (sel_insn_data_def, heap, s_i_d);
3986 /* A proxy to pass initialization data to init_insn (). */
3987 static sel_insn_data_def _insn_init_ssid;
3988 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
3990 /* If true create a new vinsn. Otherwise use the one from EXPR. */
3991 static bool insn_init_create_new_vinsn_p;
3993 /* Set all necessary data for initialization of the new insn[s]. */
3994 static expr_t
3995 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
3997 expr_t x = &insn_init_ssid->expr;
3999 copy_expr_onside (x, expr);
4000 if (vi != NULL)
4002 insn_init_create_new_vinsn_p = false;
4003 change_vinsn_in_expr (x, vi);
4005 else
4006 insn_init_create_new_vinsn_p = true;
4008 insn_init_ssid->seqno = seqno;
4009 return x;
4012 /* Init data for INSN. */
4013 static void
4014 init_insn_data (insn_t insn)
4016 expr_t expr;
4017 sel_insn_data_t ssid = insn_init_ssid;
4019 /* The fields mentioned below are special and hence are not being
4020 propagated to the new insns. */
4021 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4022 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4023 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4025 expr = INSN_EXPR (insn);
4026 copy_expr (expr, &ssid->expr);
4027 prepare_insn_expr (insn, ssid->seqno);
4029 if (insn_init_create_new_vinsn_p)
4030 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4032 if (first_time_insn_init (insn))
4033 init_first_time_insn_data (insn);
4036 /* This is used to initialize spurious jumps generated by
4037 sel_redirect_edge (). */
4038 static void
4039 init_simplejump_data (insn_t insn)
4041 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4042 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4043 false, true);
4044 INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
4045 init_first_time_insn_data (insn);
4048 /* Perform deferred initialization of insns. This is used to process
4049 a new jump that may be created by redirect_edge. */
4050 void
4051 sel_init_new_insn (insn_t insn, int flags)
4053 /* We create data structures for bb when the first insn is emitted in it. */
4054 if (INSN_P (insn)
4055 && INSN_IN_STREAM_P (insn)
4056 && insn_is_the_only_one_in_bb_p (insn))
4058 extend_bb_info ();
4059 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4062 if (flags & INSN_INIT_TODO_LUID)
4063 sched_init_luids (NULL, NULL, NULL, insn);
4065 if (flags & INSN_INIT_TODO_SSID)
4067 extend_insn_data ();
4068 init_insn_data (insn);
4069 clear_expr (&insn_init_ssid->expr);
4072 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4074 extend_insn_data ();
4075 init_simplejump_data (insn);
4078 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4079 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4083 /* Functions to init/finish work with lv sets. */
4085 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4086 static void
4087 init_lv_set (basic_block bb)
4089 gcc_assert (!BB_LV_SET_VALID_P (bb));
4091 BB_LV_SET (bb) = get_regset_from_pool ();
4092 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4093 BB_LV_SET_VALID_P (bb) = true;
4096 /* Copy liveness information to BB from FROM_BB. */
4097 static void
4098 copy_lv_set_from (basic_block bb, basic_block from_bb)
4100 gcc_assert (!BB_LV_SET_VALID_P (bb));
4102 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4103 BB_LV_SET_VALID_P (bb) = true;
4106 /* Initialize lv set of all bb headers. */
4107 void
4108 init_lv_sets (void)
4110 basic_block bb;
4112 /* Initialize of LV sets. */
4113 FOR_EACH_BB (bb)
4114 init_lv_set (bb);
4116 /* Don't forget EXIT_BLOCK. */
4117 init_lv_set (EXIT_BLOCK_PTR);
4120 /* Release lv set of HEAD. */
4121 static void
4122 free_lv_set (basic_block bb)
4124 gcc_assert (BB_LV_SET (bb) != NULL);
4126 return_regset_to_pool (BB_LV_SET (bb));
4127 BB_LV_SET (bb) = NULL;
4128 BB_LV_SET_VALID_P (bb) = false;
4131 /* Finalize lv sets of all bb headers. */
4132 void
4133 free_lv_sets (void)
4135 basic_block bb;
4137 /* Don't forget EXIT_BLOCK. */
4138 free_lv_set (EXIT_BLOCK_PTR);
4140 /* Free LV sets. */
4141 FOR_EACH_BB (bb)
4142 if (BB_LV_SET (bb))
4143 free_lv_set (bb);
4146 /* Initialize an invalid AV_SET for BB.
4147 This set will be updated next time compute_av () process BB. */
4148 static void
4149 invalidate_av_set (basic_block bb)
4151 gcc_assert (BB_AV_LEVEL (bb) <= 0
4152 && BB_AV_SET (bb) == NULL);
4154 BB_AV_LEVEL (bb) = -1;
4157 /* Create initial data sets for BB (they will be invalid). */
4158 static void
4159 create_initial_data_sets (basic_block bb)
4161 if (BB_LV_SET (bb))
4162 BB_LV_SET_VALID_P (bb) = false;
4163 else
4164 BB_LV_SET (bb) = get_regset_from_pool ();
4165 invalidate_av_set (bb);
4168 /* Free av set of BB. */
4169 static void
4170 free_av_set (basic_block bb)
4172 av_set_clear (&BB_AV_SET (bb));
4173 BB_AV_LEVEL (bb) = 0;
4176 /* Free data sets of BB. */
4177 void
4178 free_data_sets (basic_block bb)
4180 free_lv_set (bb);
4181 free_av_set (bb);
4184 /* Exchange lv sets of TO and FROM. */
4185 static void
4186 exchange_lv_sets (basic_block to, basic_block from)
4189 regset to_lv_set = BB_LV_SET (to);
4191 BB_LV_SET (to) = BB_LV_SET (from);
4192 BB_LV_SET (from) = to_lv_set;
4196 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4198 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4199 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4204 /* Exchange av sets of TO and FROM. */
4205 static void
4206 exchange_av_sets (basic_block to, basic_block from)
4209 av_set_t to_av_set = BB_AV_SET (to);
4211 BB_AV_SET (to) = BB_AV_SET (from);
4212 BB_AV_SET (from) = to_av_set;
4216 int to_av_level = BB_AV_LEVEL (to);
4218 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4219 BB_AV_LEVEL (from) = to_av_level;
4223 /* Exchange data sets of TO and FROM. */
4224 void
4225 exchange_data_sets (basic_block to, basic_block from)
4227 exchange_lv_sets (to, from);
4228 exchange_av_sets (to, from);
4231 /* Copy data sets of FROM to TO. */
4232 void
4233 copy_data_sets (basic_block to, basic_block from)
4235 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4236 gcc_assert (BB_AV_SET (to) == NULL);
4238 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4239 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4241 if (BB_AV_SET_VALID_P (from))
4243 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4245 if (BB_LV_SET_VALID_P (from))
4247 gcc_assert (BB_LV_SET (to) != NULL);
4248 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4252 /* Return an av set for INSN, if any. */
4253 av_set_t
4254 get_av_set (insn_t insn)
4256 av_set_t av_set;
4258 gcc_assert (AV_SET_VALID_P (insn));
4260 if (sel_bb_head_p (insn))
4261 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4262 else
4263 av_set = NULL;
4265 return av_set;
4268 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4270 get_av_level (insn_t insn)
4272 int av_level;
4274 gcc_assert (INSN_P (insn));
4276 if (sel_bb_head_p (insn))
4277 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4278 else
4279 av_level = INSN_WS_LEVEL (insn);
4281 return av_level;
4286 /* Variables to work with control-flow graph. */
4288 /* The basic block that already has been processed by the sched_data_update (),
4289 but hasn't been in sel_add_bb () yet. */
4290 static VEC (basic_block, heap) *last_added_blocks = NULL;
4292 /* A pool for allocating successor infos. */
4293 static struct
4295 /* A stack for saving succs_info structures. */
4296 struct succs_info *stack;
4298 /* Its size. */
4299 int size;
4301 /* Top of the stack. */
4302 int top;
4304 /* Maximal value of the top. */
4305 int max_top;
4306 } succs_info_pool;
4308 /* Functions to work with control-flow graph. */
4310 /* Return basic block note of BB. */
4311 insn_t
4312 sel_bb_head (basic_block bb)
4314 insn_t head;
4316 if (bb == EXIT_BLOCK_PTR)
4318 gcc_assert (exit_insn != NULL_RTX);
4319 head = exit_insn;
4321 else
4323 insn_t note;
4325 note = bb_note (bb);
4326 head = next_nonnote_insn (note);
4328 if (head && BLOCK_FOR_INSN (head) != bb)
4329 head = NULL_RTX;
4332 return head;
4335 /* Return true if INSN is a basic block header. */
4336 bool
4337 sel_bb_head_p (insn_t insn)
4339 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4342 /* Return last insn of BB. */
4343 insn_t
4344 sel_bb_end (basic_block bb)
4346 if (sel_bb_empty_p (bb))
4347 return NULL_RTX;
4349 gcc_assert (bb != EXIT_BLOCK_PTR);
4351 return BB_END (bb);
4354 /* Return true if INSN is the last insn in its basic block. */
4355 bool
4356 sel_bb_end_p (insn_t insn)
4358 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4361 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4362 bool
4363 sel_bb_empty_p (basic_block bb)
4365 return sel_bb_head (bb) == NULL;
4368 /* True when BB belongs to the current scheduling region. */
4369 bool
4370 in_current_region_p (basic_block bb)
4372 if (bb->index < NUM_FIXED_BLOCKS)
4373 return false;
4375 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4378 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4379 basic_block
4380 fallthru_bb_of_jump (rtx jump)
4382 if (!JUMP_P (jump))
4383 return NULL;
4385 if (any_uncondjump_p (jump))
4386 return single_succ (BLOCK_FOR_INSN (jump));
4388 if (!any_condjump_p (jump))
4389 return NULL;
4391 /* A basic block that ends with a conditional jump may still have one successor
4392 (and be followed by a barrier), we are not interested. */
4393 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4394 return NULL;
4396 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4399 /* Remove all notes from BB. */
4400 static void
4401 init_bb (basic_block bb)
4403 remove_notes (bb_note (bb), BB_END (bb));
4404 BB_NOTE_LIST (bb) = note_list;
4407 void
4408 sel_init_bbs (bb_vec_t bbs, basic_block bb)
4410 const struct sched_scan_info_def ssi =
4412 extend_bb_info, /* extend_bb */
4413 init_bb, /* init_bb */
4414 NULL, /* extend_insn */
4415 NULL /* init_insn */
4418 sched_scan (&ssi, bbs, bb, new_insns, NULL);
4421 /* Restore notes for the whole region. */
4422 static void
4423 sel_restore_notes (void)
4425 int bb;
4426 insn_t insn;
4428 for (bb = 0; bb < current_nr_blocks; bb++)
4430 basic_block first, last;
4432 first = EBB_FIRST_BB (bb);
4433 last = EBB_LAST_BB (bb)->next_bb;
4437 note_list = BB_NOTE_LIST (first);
4438 restore_other_notes (NULL, first);
4439 BB_NOTE_LIST (first) = NULL_RTX;
4441 FOR_BB_INSNS (first, insn)
4442 if (NONDEBUG_INSN_P (insn))
4443 reemit_notes (insn);
4445 first = first->next_bb;
4447 while (first != last);
4451 /* Free per-bb data structures. */
4452 void
4453 sel_finish_bbs (void)
4455 sel_restore_notes ();
4457 /* Remove current loop preheader from this loop. */
4458 if (current_loop_nest)
4459 sel_remove_loop_preheader ();
4461 finish_region_bb_info ();
4464 /* Return true if INSN has a single successor of type FLAGS. */
4465 bool
4466 sel_insn_has_single_succ_p (insn_t insn, int flags)
4468 insn_t succ;
4469 succ_iterator si;
4470 bool first_p = true;
4472 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4474 if (first_p)
4475 first_p = false;
4476 else
4477 return false;
4480 return true;
4483 /* Allocate successor's info. */
4484 static struct succs_info *
4485 alloc_succs_info (void)
4487 if (succs_info_pool.top == succs_info_pool.max_top)
4489 int i;
4491 if (++succs_info_pool.max_top >= succs_info_pool.size)
4492 gcc_unreachable ();
4494 i = ++succs_info_pool.top;
4495 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4496 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4497 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4499 else
4500 succs_info_pool.top++;
4502 return &succs_info_pool.stack[succs_info_pool.top];
4505 /* Free successor's info. */
4506 void
4507 free_succs_info (struct succs_info * sinfo)
4509 gcc_assert (succs_info_pool.top >= 0
4510 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4511 succs_info_pool.top--;
4513 /* Clear stale info. */
4514 VEC_block_remove (rtx, sinfo->succs_ok,
4515 0, VEC_length (rtx, sinfo->succs_ok));
4516 VEC_block_remove (rtx, sinfo->succs_other,
4517 0, VEC_length (rtx, sinfo->succs_other));
4518 VEC_block_remove (int, sinfo->probs_ok,
4519 0, VEC_length (int, sinfo->probs_ok));
4520 sinfo->all_prob = 0;
4521 sinfo->succs_ok_n = 0;
4522 sinfo->all_succs_n = 0;
4525 /* Compute successor info for INSN. FLAGS are the flags passed
4526 to the FOR_EACH_SUCC_1 iterator. */
4527 struct succs_info *
4528 compute_succs_info (insn_t insn, short flags)
4530 succ_iterator si;
4531 insn_t succ;
4532 struct succs_info *sinfo = alloc_succs_info ();
4534 /* Traverse *all* successors and decide what to do with each. */
4535 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4537 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4538 perform code motion through inner loops. */
4539 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4541 if (current_flags & flags)
4543 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4544 VEC_safe_push (int, heap, sinfo->probs_ok,
4545 /* FIXME: Improve calculation when skipping
4546 inner loop to exits. */
4547 (si.bb_end
4548 ? si.e1->probability
4549 : REG_BR_PROB_BASE));
4550 sinfo->succs_ok_n++;
4552 else
4553 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4555 /* Compute all_prob. */
4556 if (!si.bb_end)
4557 sinfo->all_prob = REG_BR_PROB_BASE;
4558 else
4559 sinfo->all_prob += si.e1->probability;
4561 sinfo->all_succs_n++;
4564 return sinfo;
4567 /* Return the predecessors of BB in PREDS and their number in N.
4568 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4569 static void
4570 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4572 edge e;
4573 edge_iterator ei;
4575 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4577 FOR_EACH_EDGE (e, ei, bb->preds)
4579 basic_block pred_bb = e->src;
4580 insn_t bb_end = BB_END (pred_bb);
4582 /* ??? This code is not supposed to walk out of a region. */
4583 gcc_assert (in_current_region_p (pred_bb));
4585 if (sel_bb_empty_p (pred_bb))
4586 cfg_preds_1 (pred_bb, preds, n, size);
4587 else
4589 if (*n == *size)
4590 *preds = XRESIZEVEC (insn_t, *preds,
4591 (*size = 2 * *size + 1));
4592 (*preds)[(*n)++] = bb_end;
4596 gcc_assert (*n != 0);
4599 /* Find all predecessors of BB and record them in PREDS and their number
4600 in N. Empty blocks are skipped, and only normal (forward in-region)
4601 edges are processed. */
4602 static void
4603 cfg_preds (basic_block bb, insn_t **preds, int *n)
4605 int size = 0;
4607 *preds = NULL;
4608 *n = 0;
4609 cfg_preds_1 (bb, preds, n, &size);
4612 /* Returns true if we are moving INSN through join point. */
4613 bool
4614 sel_num_cfg_preds_gt_1 (insn_t insn)
4616 basic_block bb;
4618 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4619 return false;
4621 bb = BLOCK_FOR_INSN (insn);
4623 while (1)
4625 if (EDGE_COUNT (bb->preds) > 1)
4626 return true;
4628 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4629 bb = EDGE_PRED (bb, 0)->src;
4631 if (!sel_bb_empty_p (bb))
4632 break;
4635 return false;
4638 /* Returns true when BB should be the end of an ebb. Adapted from the
4639 code in sched-ebb.c. */
4640 bool
4641 bb_ends_ebb_p (basic_block bb)
4643 basic_block next_bb = bb_next_bb (bb);
4644 edge e;
4645 edge_iterator ei;
4647 if (next_bb == EXIT_BLOCK_PTR
4648 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4649 || (LABEL_P (BB_HEAD (next_bb))
4650 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4651 Work around that. */
4652 && !single_pred_p (next_bb)))
4653 return true;
4655 if (!in_current_region_p (next_bb))
4656 return true;
4658 FOR_EACH_EDGE (e, ei, bb->succs)
4659 if ((e->flags & EDGE_FALLTHRU) != 0)
4661 gcc_assert (e->dest == next_bb);
4663 return false;
4666 return true;
4669 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4670 successor of INSN. */
4671 bool
4672 in_same_ebb_p (insn_t insn, insn_t succ)
4674 basic_block ptr = BLOCK_FOR_INSN (insn);
4676 for(;;)
4678 if (ptr == BLOCK_FOR_INSN (succ))
4679 return true;
4681 if (bb_ends_ebb_p (ptr))
4682 return false;
4684 ptr = bb_next_bb (ptr);
4687 gcc_unreachable ();
4688 return false;
4691 /* Recomputes the reverse topological order for the function and
4692 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4693 modified appropriately. */
4694 static void
4695 recompute_rev_top_order (void)
4697 int *postorder;
4698 int n_blocks, i;
4700 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4702 rev_top_order_index_len = last_basic_block;
4703 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4704 rev_top_order_index_len);
4707 postorder = XNEWVEC (int, n_basic_blocks);
4709 n_blocks = post_order_compute (postorder, true, false);
4710 gcc_assert (n_basic_blocks == n_blocks);
4712 /* Build reverse function: for each basic block with BB->INDEX == K
4713 rev_top_order_index[K] is it's reverse topological sort number. */
4714 for (i = 0; i < n_blocks; i++)
4716 gcc_assert (postorder[i] < rev_top_order_index_len);
4717 rev_top_order_index[postorder[i]] = i;
4720 free (postorder);
4723 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4724 void
4725 clear_outdated_rtx_info (basic_block bb)
4727 rtx insn;
4729 FOR_BB_INSNS (bb, insn)
4730 if (INSN_P (insn))
4732 SCHED_GROUP_P (insn) = 0;
4733 INSN_AFTER_STALL_P (insn) = 0;
4734 INSN_SCHED_TIMES (insn) = 0;
4735 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4737 /* We cannot use the changed caches, as previously we could ignore
4738 the LHS dependence due to enabled renaming and transform
4739 the expression, and currently we'll be unable to do this. */
4740 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4744 /* Add BB_NOTE to the pool of available basic block notes. */
4745 static void
4746 return_bb_to_pool (basic_block bb)
4748 rtx note = bb_note (bb);
4750 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4751 && bb->aux == NULL);
4753 /* It turns out that current cfg infrastructure does not support
4754 reuse of basic blocks. Don't bother for now. */
4755 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4758 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4759 static rtx
4760 get_bb_note_from_pool (void)
4762 if (VEC_empty (rtx, bb_note_pool))
4763 return NULL_RTX;
4764 else
4766 rtx note = VEC_pop (rtx, bb_note_pool);
4768 PREV_INSN (note) = NULL_RTX;
4769 NEXT_INSN (note) = NULL_RTX;
4771 return note;
4775 /* Free bb_note_pool. */
4776 void
4777 free_bb_note_pool (void)
4779 VEC_free (rtx, heap, bb_note_pool);
4782 /* Setup scheduler pool and successor structure. */
4783 void
4784 alloc_sched_pools (void)
4786 int succs_size;
4788 succs_size = MAX_WS + 1;
4789 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4790 succs_info_pool.size = succs_size;
4791 succs_info_pool.top = -1;
4792 succs_info_pool.max_top = -1;
4794 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4795 sizeof (struct _list_node), 500);
4798 /* Free the pools. */
4799 void
4800 free_sched_pools (void)
4802 int i;
4804 free_alloc_pool (sched_lists_pool);
4805 gcc_assert (succs_info_pool.top == -1);
4806 for (i = 0; i < succs_info_pool.max_top; i++)
4808 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4809 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4810 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4812 free (succs_info_pool.stack);
4816 /* Returns a position in RGN where BB can be inserted retaining
4817 topological order. */
4818 static int
4819 find_place_to_insert_bb (basic_block bb, int rgn)
4821 bool has_preds_outside_rgn = false;
4822 edge e;
4823 edge_iterator ei;
4825 /* Find whether we have preds outside the region. */
4826 FOR_EACH_EDGE (e, ei, bb->preds)
4827 if (!in_current_region_p (e->src))
4829 has_preds_outside_rgn = true;
4830 break;
4833 /* Recompute the top order -- needed when we have > 1 pred
4834 and in case we don't have preds outside. */
4835 if (flag_sel_sched_pipelining_outer_loops
4836 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
4838 int i, bbi = bb->index, cur_bbi;
4840 recompute_rev_top_order ();
4841 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
4843 cur_bbi = BB_TO_BLOCK (i);
4844 if (rev_top_order_index[bbi]
4845 < rev_top_order_index[cur_bbi])
4846 break;
4849 /* We skipped the right block, so we increase i. We accomodate
4850 it for increasing by step later, so we decrease i. */
4851 return (i + 1) - 1;
4853 else if (has_preds_outside_rgn)
4855 /* This is the case when we generate an extra empty block
4856 to serve as region head during pipelining. */
4857 e = EDGE_SUCC (bb, 0);
4858 gcc_assert (EDGE_COUNT (bb->succs) == 1
4859 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
4860 && (BLOCK_TO_BB (e->dest->index) == 0));
4861 return -1;
4864 /* We don't have preds outside the region. We should have
4865 the only pred, because the multiple preds case comes from
4866 the pipelining of outer loops, and that is handled above.
4867 Just take the bbi of this single pred. */
4868 if (EDGE_COUNT (bb->succs) > 0)
4870 int pred_bbi;
4872 gcc_assert (EDGE_COUNT (bb->preds) == 1);
4874 pred_bbi = EDGE_PRED (bb, 0)->src->index;
4875 return BLOCK_TO_BB (pred_bbi);
4877 else
4878 /* BB has no successors. It is safe to put it in the end. */
4879 return current_nr_blocks - 1;
4882 /* Deletes an empty basic block freeing its data. */
4883 static void
4884 delete_and_free_basic_block (basic_block bb)
4886 gcc_assert (sel_bb_empty_p (bb));
4888 if (BB_LV_SET (bb))
4889 free_lv_set (bb);
4891 bitmap_clear_bit (blocks_to_reschedule, bb->index);
4893 /* Can't assert av_set properties because we use sel_aremove_bb
4894 when removing loop preheader from the region. At the point of
4895 removing the preheader we already have deallocated sel_region_bb_info. */
4896 gcc_assert (BB_LV_SET (bb) == NULL
4897 && !BB_LV_SET_VALID_P (bb)
4898 && BB_AV_LEVEL (bb) == 0
4899 && BB_AV_SET (bb) == NULL);
4901 delete_basic_block (bb);
4904 /* Add BB to the current region and update the region data. */
4905 static void
4906 add_block_to_current_region (basic_block bb)
4908 int i, pos, bbi = -2, rgn;
4910 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4911 bbi = find_place_to_insert_bb (bb, rgn);
4912 bbi += 1;
4913 pos = RGN_BLOCKS (rgn) + bbi;
4915 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4916 && ebb_head[bbi] == pos);
4918 /* Make a place for the new block. */
4919 extend_regions ();
4921 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4922 BLOCK_TO_BB (rgn_bb_table[i])++;
4924 memmove (rgn_bb_table + pos + 1,
4925 rgn_bb_table + pos,
4926 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4928 /* Initialize data for BB. */
4929 rgn_bb_table[pos] = bb->index;
4930 BLOCK_TO_BB (bb->index) = bbi;
4931 CONTAINING_RGN (bb->index) = rgn;
4933 RGN_NR_BLOCKS (rgn)++;
4935 for (i = rgn + 1; i <= nr_regions; i++)
4936 RGN_BLOCKS (i)++;
4939 /* Remove BB from the current region and update the region data. */
4940 static void
4941 remove_bb_from_region (basic_block bb)
4943 int i, pos, bbi = -2, rgn;
4945 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
4946 bbi = BLOCK_TO_BB (bb->index);
4947 pos = RGN_BLOCKS (rgn) + bbi;
4949 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
4950 && ebb_head[bbi] == pos);
4952 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
4953 BLOCK_TO_BB (rgn_bb_table[i])--;
4955 memmove (rgn_bb_table + pos,
4956 rgn_bb_table + pos + 1,
4957 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
4959 RGN_NR_BLOCKS (rgn)--;
4960 for (i = rgn + 1; i <= nr_regions; i++)
4961 RGN_BLOCKS (i)--;
4964 /* Add BB to the current region and update all data. If BB is NULL, add all
4965 blocks from last_added_blocks vector. */
4966 static void
4967 sel_add_bb (basic_block bb)
4969 /* Extend luids so that new notes will receive zero luids. */
4970 sched_init_luids (NULL, NULL, NULL, NULL);
4971 sched_init_bbs ();
4972 sel_init_bbs (last_added_blocks, NULL);
4974 /* When bb is passed explicitly, the vector should contain
4975 the only element that equals to bb; otherwise, the vector
4976 should not be NULL. */
4977 gcc_assert (last_added_blocks != NULL);
4979 if (bb != NULL)
4981 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
4982 && VEC_index (basic_block,
4983 last_added_blocks, 0) == bb);
4984 add_block_to_current_region (bb);
4986 /* We associate creating/deleting data sets with the first insn
4987 appearing / disappearing in the bb. */
4988 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
4989 create_initial_data_sets (bb);
4991 VEC_free (basic_block, heap, last_added_blocks);
4993 else
4994 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
4996 int i;
4997 basic_block temp_bb = NULL;
4999 for (i = 0;
5000 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5002 add_block_to_current_region (bb);
5003 temp_bb = bb;
5006 /* We need to fetch at least one bb so we know the region
5007 to update. */
5008 gcc_assert (temp_bb != NULL);
5009 bb = temp_bb;
5011 VEC_free (basic_block, heap, last_added_blocks);
5014 rgn_setup_region (CONTAINING_RGN (bb->index));
5017 /* Remove BB from the current region and update all data.
5018 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5019 static void
5020 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5022 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5024 remove_bb_from_region (bb);
5025 return_bb_to_pool (bb);
5026 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5028 if (remove_from_cfg_p)
5029 delete_and_free_basic_block (bb);
5031 rgn_setup_region (CONTAINING_RGN (bb->index));
5034 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5035 static void
5036 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5038 gcc_assert (in_current_region_p (merge_bb));
5040 concat_note_lists (BB_NOTE_LIST (empty_bb),
5041 &BB_NOTE_LIST (merge_bb));
5042 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5046 /* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put
5047 EMPTY_BB's note lists into its predecessor instead of putting them
5048 into the successor. When REMOVE_FROM_CFG_P is true, also remove
5049 the empty block. */
5050 void
5051 sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
5052 bool remove_from_cfg_p)
5054 basic_block merge_bb;
5056 gcc_assert (sel_bb_empty_p (empty_bb));
5058 if (merge_up_p)
5060 merge_bb = empty_bb->prev_bb;
5061 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1
5062 && EDGE_PRED (empty_bb, 0)->src == merge_bb);
5064 else
5066 edge e;
5067 edge_iterator ei;
5069 merge_bb = bb_next_bb (empty_bb);
5071 /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
5072 successor block. */
5073 for (ei = ei_start (empty_bb->preds);
5074 (e = ei_safe_edge (ei)); )
5076 if (! (e->flags & EDGE_FALLTHRU))
5077 sel_redirect_edge_and_branch (e, merge_bb);
5078 else
5079 ei_next (&ei);
5082 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1
5083 && EDGE_SUCC (empty_bb, 0)->dest == merge_bb);
5086 move_bb_info (merge_bb, empty_bb);
5087 remove_empty_bb (empty_bb, remove_from_cfg_p);
5090 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5091 region, but keep it in CFG. */
5092 static void
5093 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5095 /* The block should contain just a note or a label.
5096 We try to check whether it is unused below. */
5097 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5098 || LABEL_P (BB_HEAD (empty_bb)));
5100 /* If basic block has predecessors or successors, redirect them. */
5101 if (remove_from_cfg_p
5102 && (EDGE_COUNT (empty_bb->preds) > 0
5103 || EDGE_COUNT (empty_bb->succs) > 0))
5105 basic_block pred;
5106 basic_block succ;
5108 /* We need to init PRED and SUCC before redirecting edges. */
5109 if (EDGE_COUNT (empty_bb->preds) > 0)
5111 edge e;
5113 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5115 e = EDGE_PRED (empty_bb, 0);
5116 gcc_assert (e->src == empty_bb->prev_bb
5117 && (e->flags & EDGE_FALLTHRU));
5119 pred = empty_bb->prev_bb;
5121 else
5122 pred = NULL;
5124 if (EDGE_COUNT (empty_bb->succs) > 0)
5126 /* We do not check fallthruness here as above, because
5127 after removing a jump the edge may actually be not fallthru. */
5128 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5129 succ = EDGE_SUCC (empty_bb, 0)->dest;
5131 else
5132 succ = NULL;
5134 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5136 edge e = EDGE_PRED (empty_bb, 0);
5138 if (e->flags & EDGE_FALLTHRU)
5139 redirect_edge_succ_nodup (e, succ);
5140 else
5141 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5144 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5146 edge e = EDGE_SUCC (empty_bb, 0);
5148 if (find_edge (pred, e->dest) == NULL)
5149 redirect_edge_pred (e, pred);
5153 /* Finish removing. */
5154 sel_remove_bb (empty_bb, remove_from_cfg_p);
5157 /* An implementation of create_basic_block hook, which additionally updates
5158 per-bb data structures. */
5159 static basic_block
5160 sel_create_basic_block (void *headp, void *endp, basic_block after)
5162 basic_block new_bb;
5163 insn_t new_bb_note;
5165 gcc_assert (flag_sel_sched_pipelining_outer_loops
5166 || last_added_blocks == NULL);
5168 new_bb_note = get_bb_note_from_pool ();
5170 if (new_bb_note == NULL_RTX)
5171 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5172 else
5174 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5175 new_bb_note, after);
5176 new_bb->aux = NULL;
5179 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5181 return new_bb;
5184 /* Implement sched_init_only_bb (). */
5185 static void
5186 sel_init_only_bb (basic_block bb, basic_block after)
5188 gcc_assert (after == NULL);
5190 extend_regions ();
5191 rgn_make_new_region_out_of_new_block (bb);
5194 /* Update the latch when we've splitted or merged it from FROM block to TO.
5195 This should be checked for all outer loops, too. */
5196 static void
5197 change_loops_latches (basic_block from, basic_block to)
5199 gcc_assert (from != to);
5201 if (current_loop_nest)
5203 struct loop *loop;
5205 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5206 if (considered_for_pipelining_p (loop) && loop->latch == from)
5208 gcc_assert (loop == current_loop_nest);
5209 loop->latch = to;
5210 gcc_assert (loop_latch_edge (loop));
5215 /* Splits BB on two basic blocks, adding it to the region and extending
5216 per-bb data structures. Returns the newly created bb. */
5217 static basic_block
5218 sel_split_block (basic_block bb, rtx after)
5220 basic_block new_bb;
5221 insn_t insn;
5223 new_bb = sched_split_block_1 (bb, after);
5224 sel_add_bb (new_bb);
5226 /* This should be called after sel_add_bb, because this uses
5227 CONTAINING_RGN for the new block, which is not yet initialized.
5228 FIXME: this function may be a no-op now. */
5229 change_loops_latches (bb, new_bb);
5231 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5232 FOR_BB_INSNS (new_bb, insn)
5233 if (INSN_P (insn))
5234 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5236 if (sel_bb_empty_p (bb))
5238 gcc_assert (!sel_bb_empty_p (new_bb));
5240 /* NEW_BB has data sets that need to be updated and BB holds
5241 data sets that should be removed. Exchange these data sets
5242 so that we won't lose BB's valid data sets. */
5243 exchange_data_sets (new_bb, bb);
5244 free_data_sets (bb);
5247 if (!sel_bb_empty_p (new_bb)
5248 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5249 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5251 return new_bb;
5254 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5255 Otherwise returns NULL. */
5256 static rtx
5257 check_for_new_jump (basic_block bb, int prev_max_uid)
5259 rtx end;
5261 end = sel_bb_end (bb);
5262 if (end && INSN_UID (end) >= prev_max_uid)
5263 return end;
5264 return NULL;
5267 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5268 New means having UID at least equal to PREV_MAX_UID. */
5269 static rtx
5270 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5272 rtx jump;
5274 /* Return immediately if no new insns were emitted. */
5275 if (get_max_uid () == prev_max_uid)
5276 return NULL;
5278 /* Now check both blocks for new jumps. It will ever be only one. */
5279 if ((jump = check_for_new_jump (from, prev_max_uid)))
5280 return jump;
5282 if (jump_bb != NULL
5283 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5284 return jump;
5285 return NULL;
5288 /* Splits E and adds the newly created basic block to the current region.
5289 Returns this basic block. */
5290 basic_block
5291 sel_split_edge (edge e)
5293 basic_block new_bb, src, other_bb = NULL;
5294 int prev_max_uid;
5295 rtx jump;
5297 src = e->src;
5298 prev_max_uid = get_max_uid ();
5299 new_bb = split_edge (e);
5301 if (flag_sel_sched_pipelining_outer_loops
5302 && current_loop_nest)
5304 int i;
5305 basic_block bb;
5307 /* Some of the basic blocks might not have been added to the loop.
5308 Add them here, until this is fixed in force_fallthru. */
5309 for (i = 0;
5310 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5311 if (!bb->loop_father)
5313 add_bb_to_loop (bb, e->dest->loop_father);
5315 gcc_assert (!other_bb && (new_bb->index != bb->index));
5316 other_bb = bb;
5320 /* Add all last_added_blocks to the region. */
5321 sel_add_bb (NULL);
5323 jump = find_new_jump (src, new_bb, prev_max_uid);
5324 if (jump)
5325 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5327 /* Put the correct lv set on this block. */
5328 if (other_bb && !sel_bb_empty_p (other_bb))
5329 compute_live (sel_bb_head (other_bb));
5331 return new_bb;
5334 /* Implement sched_create_empty_bb (). */
5335 static basic_block
5336 sel_create_empty_bb (basic_block after)
5338 basic_block new_bb;
5340 new_bb = sched_create_empty_bb_1 (after);
5342 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5343 later. */
5344 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5345 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5347 VEC_free (basic_block, heap, last_added_blocks);
5348 return new_bb;
5351 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5352 will be splitted to insert a check. */
5353 basic_block
5354 sel_create_recovery_block (insn_t orig_insn)
5356 basic_block first_bb, second_bb, recovery_block;
5357 basic_block before_recovery = NULL;
5358 rtx jump;
5360 first_bb = BLOCK_FOR_INSN (orig_insn);
5361 if (sel_bb_end_p (orig_insn))
5363 /* Avoid introducing an empty block while splitting. */
5364 gcc_assert (single_succ_p (first_bb));
5365 second_bb = single_succ (first_bb);
5367 else
5368 second_bb = sched_split_block (first_bb, orig_insn);
5370 recovery_block = sched_create_recovery_block (&before_recovery);
5371 if (before_recovery)
5372 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5374 gcc_assert (sel_bb_empty_p (recovery_block));
5375 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5376 if (current_loops != NULL)
5377 add_bb_to_loop (recovery_block, first_bb->loop_father);
5379 sel_add_bb (recovery_block);
5381 jump = BB_END (recovery_block);
5382 gcc_assert (sel_bb_head (recovery_block) == jump);
5383 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5385 return recovery_block;
5388 /* Merge basic block B into basic block A. */
5389 void
5390 sel_merge_blocks (basic_block a, basic_block b)
5392 sel_remove_empty_bb (b, true, false);
5393 merge_blocks (a, b);
5395 change_loops_latches (b, a);
5398 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5399 data structures for possibly created bb and insns. Returns the newly
5400 added bb or NULL, when a bb was not needed. */
5401 void
5402 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5404 basic_block jump_bb, src;
5405 int prev_max_uid;
5406 rtx jump;
5408 gcc_assert (!sel_bb_empty_p (e->src));
5410 src = e->src;
5411 prev_max_uid = get_max_uid ();
5412 jump_bb = redirect_edge_and_branch_force (e, to);
5414 if (jump_bb != NULL)
5415 sel_add_bb (jump_bb);
5417 /* This function could not be used to spoil the loop structure by now,
5418 thus we don't care to update anything. But check it to be sure. */
5419 if (current_loop_nest
5420 && pipelining_p)
5421 gcc_assert (loop_latch_edge (current_loop_nest));
5423 jump = find_new_jump (src, jump_bb, prev_max_uid);
5424 if (jump)
5425 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5428 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5429 redirected edge are in reverse topological order. */
5430 bool
5431 sel_redirect_edge_and_branch (edge e, basic_block to)
5433 bool latch_edge_p;
5434 basic_block src;
5435 int prev_max_uid;
5436 rtx jump;
5437 edge redirected;
5438 bool recompute_toporder_p = false;
5440 latch_edge_p = (pipelining_p
5441 && current_loop_nest
5442 && e == loop_latch_edge (current_loop_nest));
5444 src = e->src;
5445 prev_max_uid = get_max_uid ();
5447 redirected = redirect_edge_and_branch (e, to);
5449 gcc_assert (redirected && last_added_blocks == NULL);
5451 /* When we've redirected a latch edge, update the header. */
5452 if (latch_edge_p)
5454 current_loop_nest->header = to;
5455 gcc_assert (loop_latch_edge (current_loop_nest));
5458 /* In rare situations, the topological relation between the blocks connected
5459 by the redirected edge can change (see PR42245 for an example). Update
5460 block_to_bb/bb_to_block. */
5461 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5462 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5463 recompute_toporder_p = true;
5465 jump = find_new_jump (src, NULL, prev_max_uid);
5466 if (jump)
5467 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5469 return recompute_toporder_p;
5472 /* This variable holds the cfg hooks used by the selective scheduler. */
5473 static struct cfg_hooks sel_cfg_hooks;
5475 /* Register sel-sched cfg hooks. */
5476 void
5477 sel_register_cfg_hooks (void)
5479 sched_split_block = sel_split_block;
5481 orig_cfg_hooks = get_cfg_hooks ();
5482 sel_cfg_hooks = orig_cfg_hooks;
5484 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5486 set_cfg_hooks (sel_cfg_hooks);
5488 sched_init_only_bb = sel_init_only_bb;
5489 sched_split_block = sel_split_block;
5490 sched_create_empty_bb = sel_create_empty_bb;
5493 /* Unregister sel-sched cfg hooks. */
5494 void
5495 sel_unregister_cfg_hooks (void)
5497 sched_create_empty_bb = NULL;
5498 sched_split_block = NULL;
5499 sched_init_only_bb = NULL;
5501 set_cfg_hooks (orig_cfg_hooks);
5505 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5506 LABEL is where this jump should be directed. */
5508 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5510 rtx insn_rtx;
5512 gcc_assert (!INSN_P (pattern));
5514 start_sequence ();
5516 if (label == NULL_RTX)
5517 insn_rtx = emit_insn (pattern);
5518 else if (DEBUG_INSN_P (label))
5519 insn_rtx = emit_debug_insn (pattern);
5520 else
5522 insn_rtx = emit_jump_insn (pattern);
5523 JUMP_LABEL (insn_rtx) = label;
5524 ++LABEL_NUSES (label);
5527 end_sequence ();
5529 sched_init_luids (NULL, NULL, NULL, NULL);
5530 sched_extend_target ();
5531 sched_deps_init (false);
5533 /* Initialize INSN_CODE now. */
5534 recog_memoized (insn_rtx);
5535 return insn_rtx;
5538 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5539 must not be clonable. */
5540 vinsn_t
5541 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5543 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5545 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5546 return vinsn_create (insn_rtx, force_unique_p);
5549 /* Create a copy of INSN_RTX. */
5551 create_copy_of_insn_rtx (rtx insn_rtx)
5553 rtx res;
5555 if (DEBUG_INSN_P (insn_rtx))
5556 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5557 insn_rtx);
5559 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5561 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5562 NULL_RTX);
5563 return res;
5566 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5567 void
5568 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5570 vinsn_detach (EXPR_VINSN (expr));
5572 EXPR_VINSN (expr) = new_vinsn;
5573 vinsn_attach (new_vinsn);
5576 /* Helpers for global init. */
5577 /* This structure is used to be able to call existing bundling mechanism
5578 and calculate insn priorities. */
5579 static struct haifa_sched_info sched_sel_haifa_sched_info =
5581 NULL, /* init_ready_list */
5582 NULL, /* can_schedule_ready_p */
5583 NULL, /* schedule_more_p */
5584 NULL, /* new_ready */
5585 NULL, /* rgn_rank */
5586 sel_print_insn, /* rgn_print_insn */
5587 contributes_to_priority,
5588 NULL, /* insn_finishes_block_p */
5590 NULL, NULL,
5591 NULL, NULL,
5592 0, 0,
5594 NULL, /* add_remove_insn */
5595 NULL, /* begin_schedule_ready */
5596 NULL, /* advance_target_bb */
5597 SEL_SCHED | NEW_BBS
5600 /* Setup special insns used in the scheduler. */
5601 void
5602 setup_nop_and_exit_insns (void)
5604 gcc_assert (nop_pattern == NULL_RTX
5605 && exit_insn == NULL_RTX);
5607 nop_pattern = gen_nop ();
5609 start_sequence ();
5610 emit_insn (nop_pattern);
5611 exit_insn = get_insns ();
5612 end_sequence ();
5613 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5616 /* Free special insns used in the scheduler. */
5617 void
5618 free_nop_and_exit_insns (void)
5620 exit_insn = NULL_RTX;
5621 nop_pattern = NULL_RTX;
5624 /* Setup a special vinsn used in new insns initialization. */
5625 void
5626 setup_nop_vinsn (void)
5628 nop_vinsn = vinsn_create (exit_insn, false);
5629 vinsn_attach (nop_vinsn);
5632 /* Free a special vinsn used in new insns initialization. */
5633 void
5634 free_nop_vinsn (void)
5636 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5637 vinsn_detach (nop_vinsn);
5638 nop_vinsn = NULL;
5641 /* Call a set_sched_flags hook. */
5642 void
5643 sel_set_sched_flags (void)
5645 /* ??? This means that set_sched_flags were called, and we decided to
5646 support speculation. However, set_sched_flags also modifies flags
5647 on current_sched_info, doing this only at global init. And we
5648 sometimes change c_s_i later. So put the correct flags again. */
5649 if (spec_info && targetm.sched.set_sched_flags)
5650 targetm.sched.set_sched_flags (spec_info);
5653 /* Setup pointers to global sched info structures. */
5654 void
5655 sel_setup_sched_infos (void)
5657 rgn_setup_common_sched_info ();
5659 memcpy (&sel_common_sched_info, common_sched_info,
5660 sizeof (sel_common_sched_info));
5662 sel_common_sched_info.fix_recovery_cfg = NULL;
5663 sel_common_sched_info.add_block = NULL;
5664 sel_common_sched_info.estimate_number_of_insns
5665 = sel_estimate_number_of_insns;
5666 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5667 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5669 common_sched_info = &sel_common_sched_info;
5671 current_sched_info = &sched_sel_haifa_sched_info;
5672 current_sched_info->sched_max_insns_priority =
5673 get_rgn_sched_max_insns_priority ();
5675 sel_set_sched_flags ();
5679 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5680 *BB_ORD_INDEX after that is increased. */
5681 static void
5682 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5684 RGN_NR_BLOCKS (rgn) += 1;
5685 RGN_DONT_CALC_DEPS (rgn) = 0;
5686 RGN_HAS_REAL_EBB (rgn) = 0;
5687 CONTAINING_RGN (bb->index) = rgn;
5688 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5689 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5690 (*bb_ord_index)++;
5692 /* FIXME: it is true only when not scheduling ebbs. */
5693 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5696 /* Functions to support pipelining of outer loops. */
5698 /* Creates a new empty region and returns it's number. */
5699 static int
5700 sel_create_new_region (void)
5702 int new_rgn_number = nr_regions;
5704 RGN_NR_BLOCKS (new_rgn_number) = 0;
5706 /* FIXME: This will work only when EBBs are not created. */
5707 if (new_rgn_number != 0)
5708 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5709 RGN_NR_BLOCKS (new_rgn_number - 1);
5710 else
5711 RGN_BLOCKS (new_rgn_number) = 0;
5713 /* Set the blocks of the next region so the other functions may
5714 calculate the number of blocks in the region. */
5715 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5716 RGN_NR_BLOCKS (new_rgn_number);
5718 nr_regions++;
5720 return new_rgn_number;
5723 /* If X has a smaller topological sort number than Y, returns -1;
5724 if greater, returns 1. */
5725 static int
5726 bb_top_order_comparator (const void *x, const void *y)
5728 basic_block bb1 = *(const basic_block *) x;
5729 basic_block bb2 = *(const basic_block *) y;
5731 gcc_assert (bb1 == bb2
5732 || rev_top_order_index[bb1->index]
5733 != rev_top_order_index[bb2->index]);
5735 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5736 bbs with greater number should go earlier. */
5737 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5738 return -1;
5739 else
5740 return 1;
5743 /* Create a region for LOOP and return its number. If we don't want
5744 to pipeline LOOP, return -1. */
5745 static int
5746 make_region_from_loop (struct loop *loop)
5748 unsigned int i;
5749 int new_rgn_number = -1;
5750 struct loop *inner;
5752 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5753 int bb_ord_index = 0;
5754 basic_block *loop_blocks;
5755 basic_block preheader_block;
5757 if (loop->num_nodes
5758 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5759 return -1;
5761 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5762 for (inner = loop->inner; inner; inner = inner->inner)
5763 if (flow_bb_inside_loop_p (inner, loop->latch))
5764 return -1;
5766 loop->ninsns = num_loop_insns (loop);
5767 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5768 return -1;
5770 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5772 for (i = 0; i < loop->num_nodes; i++)
5773 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5775 free (loop_blocks);
5776 return -1;
5779 preheader_block = loop_preheader_edge (loop)->src;
5780 gcc_assert (preheader_block);
5781 gcc_assert (loop_blocks[0] == loop->header);
5783 new_rgn_number = sel_create_new_region ();
5785 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5786 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5788 for (i = 0; i < loop->num_nodes; i++)
5790 /* Add only those blocks that haven't been scheduled in the inner loop.
5791 The exception is the basic blocks with bookkeeping code - they should
5792 be added to the region (and they actually don't belong to the loop
5793 body, but to the region containing that loop body). */
5795 gcc_assert (new_rgn_number >= 0);
5797 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5799 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5800 new_rgn_number);
5801 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5805 free (loop_blocks);
5806 MARK_LOOP_FOR_PIPELINING (loop);
5808 return new_rgn_number;
5811 /* Create a new region from preheader blocks LOOP_BLOCKS. */
5812 void
5813 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5815 unsigned int i;
5816 int new_rgn_number = -1;
5817 basic_block bb;
5819 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5820 int bb_ord_index = 0;
5822 new_rgn_number = sel_create_new_region ();
5824 for (i = 0; VEC_iterate (basic_block, *loop_blocks, i, bb); i++)
5826 gcc_assert (new_rgn_number >= 0);
5828 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
5831 VEC_free (basic_block, heap, *loop_blocks);
5832 gcc_assert (*loop_blocks == NULL);
5836 /* Create region(s) from loop nest LOOP, such that inner loops will be
5837 pipelined before outer loops. Returns true when a region for LOOP
5838 is created. */
5839 static bool
5840 make_regions_from_loop_nest (struct loop *loop)
5842 struct loop *cur_loop;
5843 int rgn_number;
5845 /* Traverse all inner nodes of the loop. */
5846 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
5847 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
5848 return false;
5850 /* At this moment all regular inner loops should have been pipelined.
5851 Try to create a region from this loop. */
5852 rgn_number = make_region_from_loop (loop);
5854 if (rgn_number < 0)
5855 return false;
5857 VEC_safe_push (loop_p, heap, loop_nests, loop);
5858 return true;
5861 /* Initalize data structures needed. */
5862 void
5863 sel_init_pipelining (void)
5865 /* Collect loop information to be used in outer loops pipelining. */
5866 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
5867 | LOOPS_HAVE_FALLTHRU_PREHEADERS
5868 | LOOPS_HAVE_RECORDED_EXITS
5869 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
5870 current_loop_nest = NULL;
5872 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
5873 sbitmap_zero (bbs_in_loop_rgns);
5875 recompute_rev_top_order ();
5878 /* Returns a struct loop for region RGN. */
5879 loop_p
5880 get_loop_nest_for_rgn (unsigned int rgn)
5882 /* Regions created with extend_rgns don't have corresponding loop nests,
5883 because they don't represent loops. */
5884 if (rgn < VEC_length (loop_p, loop_nests))
5885 return VEC_index (loop_p, loop_nests, rgn);
5886 else
5887 return NULL;
5890 /* True when LOOP was included into pipelining regions. */
5891 bool
5892 considered_for_pipelining_p (struct loop *loop)
5894 if (loop_depth (loop) == 0)
5895 return false;
5897 /* Now, the loop could be too large or irreducible. Check whether its
5898 region is in LOOP_NESTS.
5899 We determine the region number of LOOP as the region number of its
5900 latch. We can't use header here, because this header could be
5901 just removed preheader and it will give us the wrong region number.
5902 Latch can't be used because it could be in the inner loop too. */
5903 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
5905 int rgn = CONTAINING_RGN (loop->latch->index);
5907 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
5908 return true;
5911 return false;
5914 /* Makes regions from the rest of the blocks, after loops are chosen
5915 for pipelining. */
5916 static void
5917 make_regions_from_the_rest (void)
5919 int cur_rgn_blocks;
5920 int *loop_hdr;
5921 int i;
5923 basic_block bb;
5924 edge e;
5925 edge_iterator ei;
5926 int *degree;
5928 /* Index in rgn_bb_table where to start allocating new regions. */
5929 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
5931 /* Make regions from all the rest basic blocks - those that don't belong to
5932 any loop or belong to irreducible loops. Prepare the data structures
5933 for extend_rgns. */
5935 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
5936 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
5937 loop. */
5938 loop_hdr = XNEWVEC (int, last_basic_block);
5939 degree = XCNEWVEC (int, last_basic_block);
5942 /* For each basic block that belongs to some loop assign the number
5943 of innermost loop it belongs to. */
5944 for (i = 0; i < last_basic_block; i++)
5945 loop_hdr[i] = -1;
5947 FOR_EACH_BB (bb)
5949 if (bb->loop_father && !bb->loop_father->num == 0
5950 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
5951 loop_hdr[bb->index] = bb->loop_father->num;
5954 /* For each basic block degree is calculated as the number of incoming
5955 edges, that are going out of bbs that are not yet scheduled.
5956 The basic blocks that are scheduled have degree value of zero. */
5957 FOR_EACH_BB (bb)
5959 degree[bb->index] = 0;
5961 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
5963 FOR_EACH_EDGE (e, ei, bb->preds)
5964 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
5965 degree[bb->index]++;
5967 else
5968 degree[bb->index] = -1;
5971 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
5973 /* Any block that did not end up in a region is placed into a region
5974 by itself. */
5975 FOR_EACH_BB (bb)
5976 if (degree[bb->index] >= 0)
5978 rgn_bb_table[cur_rgn_blocks] = bb->index;
5979 RGN_NR_BLOCKS (nr_regions) = 1;
5980 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
5981 RGN_DONT_CALC_DEPS (nr_regions) = 0;
5982 RGN_HAS_REAL_EBB (nr_regions) = 0;
5983 CONTAINING_RGN (bb->index) = nr_regions++;
5984 BLOCK_TO_BB (bb->index) = 0;
5987 free (degree);
5988 free (loop_hdr);
5991 /* Free data structures used in pipelining of loops. */
5992 void sel_finish_pipelining (void)
5994 loop_iterator li;
5995 struct loop *loop;
5997 /* Release aux fields so we don't free them later by mistake. */
5998 FOR_EACH_LOOP (li, loop, 0)
5999 loop->aux = NULL;
6001 loop_optimizer_finalize ();
6003 VEC_free (loop_p, heap, loop_nests);
6005 free (rev_top_order_index);
6006 rev_top_order_index = NULL;
6009 /* This function replaces the find_rgns when
6010 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6011 void
6012 sel_find_rgns (void)
6014 sel_init_pipelining ();
6015 extend_regions ();
6017 if (current_loops)
6019 loop_p loop;
6020 loop_iterator li;
6022 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6023 ? LI_FROM_INNERMOST
6024 : LI_ONLY_INNERMOST))
6025 make_regions_from_loop_nest (loop);
6028 /* Make regions from all the rest basic blocks and schedule them.
6029 These blocks include blocks that don't belong to any loop or belong
6030 to irreducible loops. */
6031 make_regions_from_the_rest ();
6033 /* We don't need bbs_in_loop_rgns anymore. */
6034 sbitmap_free (bbs_in_loop_rgns);
6035 bbs_in_loop_rgns = NULL;
6038 /* Adds the preheader blocks from previous loop to current region taking
6039 it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
6040 This function is only used with -fsel-sched-pipelining-outer-loops. */
6041 void
6042 sel_add_loop_preheaders (void)
6044 int i;
6045 basic_block bb;
6046 VEC(basic_block, heap) *preheader_blocks
6047 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6049 for (i = 0;
6050 VEC_iterate (basic_block, preheader_blocks, i, bb);
6051 i++)
6053 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6054 sel_add_bb (bb);
6057 VEC_free (basic_block, heap, preheader_blocks);
6060 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6061 Please note that the function should also work when pipelining_p is
6062 false, because it is used when deciding whether we should or should
6063 not reschedule pipelined code. */
6064 bool
6065 sel_is_loop_preheader_p (basic_block bb)
6067 if (current_loop_nest)
6069 struct loop *outer;
6071 if (preheader_removed)
6072 return false;
6074 /* Preheader is the first block in the region. */
6075 if (BLOCK_TO_BB (bb->index) == 0)
6076 return true;
6078 /* We used to find a preheader with the topological information.
6079 Check that the above code is equivalent to what we did before. */
6081 if (in_current_region_p (current_loop_nest->header))
6082 gcc_assert (!(BLOCK_TO_BB (bb->index)
6083 < BLOCK_TO_BB (current_loop_nest->header->index)));
6085 /* Support the situation when the latch block of outer loop
6086 could be from here. */
6087 for (outer = loop_outer (current_loop_nest);
6088 outer;
6089 outer = loop_outer (outer))
6090 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6091 gcc_unreachable ();
6094 return false;
6097 /* Checks whether JUMP leads to basic block DEST_BB and no other blocks. */
6098 bool
6099 jump_leads_only_to_bb_p (insn_t jump, basic_block dest_bb)
6101 basic_block jump_bb = BLOCK_FOR_INSN (jump);
6103 /* It is not jump, jump with side-effects or jump can lead to several
6104 basic blocks. */
6105 if (!onlyjump_p (jump)
6106 || !any_uncondjump_p (jump))
6107 return false;
6109 /* Several outgoing edges, abnormal edge or destination of jump is
6110 not DEST_BB. */
6111 if (EDGE_COUNT (jump_bb->succs) != 1
6112 || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
6113 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6114 return false;
6116 /* If not anything of the upper. */
6117 return true;
6120 /* Removes the loop preheader from the current region and saves it in
6121 PREHEADER_BLOCKS of the father loop, so they will be added later to
6122 region that represents an outer loop. */
6123 static void
6124 sel_remove_loop_preheader (void)
6126 int i, old_len;
6127 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6128 basic_block bb;
6129 bool all_empty_p = true;
6130 VEC(basic_block, heap) *preheader_blocks
6131 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6133 gcc_assert (current_loop_nest);
6134 old_len = VEC_length (basic_block, preheader_blocks);
6136 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6137 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6139 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6141 /* If the basic block belongs to region, but doesn't belong to
6142 corresponding loop, then it should be a preheader. */
6143 if (sel_is_loop_preheader_p (bb))
6145 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6146 if (BB_END (bb) != bb_note (bb))
6147 all_empty_p = false;
6151 /* Remove these blocks only after iterating over the whole region. */
6152 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6153 i >= old_len;
6154 i--)
6156 bb = VEC_index (basic_block, preheader_blocks, i);
6157 sel_remove_bb (bb, false);
6160 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6162 if (!all_empty_p)
6163 /* Immediately create new region from preheader. */
6164 make_region_from_loop_preheader (&preheader_blocks);
6165 else
6167 /* If all preheader blocks are empty - dont create new empty region.
6168 Instead, remove them completely. */
6169 for (i = 0; VEC_iterate (basic_block, preheader_blocks, i, bb); i++)
6171 edge e;
6172 edge_iterator ei;
6173 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6175 /* Redirect all incoming edges to next basic block. */
6176 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6178 if (! (e->flags & EDGE_FALLTHRU))
6179 redirect_edge_and_branch (e, bb->next_bb);
6180 else
6181 redirect_edge_succ (e, bb->next_bb);
6183 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6184 delete_and_free_basic_block (bb);
6186 /* Check if after deleting preheader there is a nonconditional
6187 jump in PREV_BB that leads to the next basic block NEXT_BB.
6188 If it is so - delete this jump and clear data sets of its
6189 basic block if it becomes empty. */
6190 if (next_bb->prev_bb == prev_bb
6191 && prev_bb != ENTRY_BLOCK_PTR
6192 && jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
6194 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6195 if (BB_END (prev_bb) == bb_note (prev_bb))
6196 free_data_sets (prev_bb);
6200 VEC_free (basic_block, heap, preheader_blocks);
6202 else
6203 /* Store preheader within the father's loop structure. */
6204 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6205 preheader_blocks);
6207 #endif