gcc/
[official-gcc.git] / gcc / sel-sched-ir.c
blob1c3614b1f18af43acf8344f65294cd566c1056d0
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "diagnostic-core.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "hashtab.h"
30 #include "hash-set.h"
31 #include "vec.h"
32 #include "input.h"
33 #include "function.h"
34 #include "predict.h"
35 #include "dominance.h"
36 #include "cfg.h"
37 #include "cfgrtl.h"
38 #include "cfganal.h"
39 #include "cfgbuild.h"
40 #include "basic-block.h"
41 #include "flags.h"
42 #include "insn-config.h"
43 #include "insn-attr.h"
44 #include "except.h"
45 #include "recog.h"
46 #include "params.h"
47 #include "target.h"
48 #include "sched-int.h"
49 #include "ggc.h"
50 #include "symtab.h"
51 #include "inchash.h"
52 #include "tree.h"
53 #include "langhooks.h"
54 #include "rtlhooks-def.h"
55 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
57 #ifdef INSN_SCHEDULING
58 #include "sel-sched-ir.h"
59 /* We don't have to use it except for sel_print_insn. */
60 #include "sel-sched-dump.h"
62 /* A vector holding bb info for whole scheduling pass. */
63 vec<sel_global_bb_info_def>
64 sel_global_bb_info = vNULL;
66 /* A vector holding bb info. */
67 vec<sel_region_bb_info_def>
68 sel_region_bb_info = vNULL;
70 /* A pool for allocating all lists. */
71 pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
73 /* This contains information about successors for compute_av_set. */
74 struct succs_info current_succs;
76 /* Data structure to describe interaction with the generic scheduler utils. */
77 static struct common_sched_info_def sel_common_sched_info;
79 /* The loop nest being pipelined. */
80 struct loop *current_loop_nest;
82 /* LOOP_NESTS is a vector containing the corresponding loop nest for
83 each region. */
84 static vec<loop_p> loop_nests = vNULL;
86 /* Saves blocks already in loop regions, indexed by bb->index. */
87 static sbitmap bbs_in_loop_rgns = NULL;
89 /* CFG hooks that are saved before changing create_basic_block hook. */
90 static struct cfg_hooks orig_cfg_hooks;
93 /* Array containing reverse topological index of function basic blocks,
94 indexed by BB->INDEX. */
95 static int *rev_top_order_index = NULL;
97 /* Length of the above array. */
98 static int rev_top_order_index_len = -1;
100 /* A regset pool structure. */
101 static struct
103 /* The stack to which regsets are returned. */
104 regset *v;
106 /* Its pointer. */
107 int n;
109 /* Its size. */
110 int s;
112 /* In VV we save all generated regsets so that, when destructing the
113 pool, we can compare it with V and check that every regset was returned
114 back to pool. */
115 regset *vv;
117 /* The pointer of VV stack. */
118 int nn;
120 /* Its size. */
121 int ss;
123 /* The difference between allocated and returned regsets. */
124 int diff;
125 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
127 /* This represents the nop pool. */
128 static struct
130 /* The vector which holds previously emitted nops. */
131 insn_t *v;
133 /* Its pointer. */
134 int n;
136 /* Its size. */
137 int s;
138 } nop_pool = { NULL, 0, 0 };
140 /* The pool for basic block notes. */
141 static vec<rtx_note *> bb_note_pool;
143 /* A NOP pattern used to emit placeholder insns. */
144 rtx nop_pattern = NULL_RTX;
145 /* A special instruction that resides in EXIT_BLOCK.
146 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
147 rtx_insn *exit_insn = NULL;
149 /* TRUE if while scheduling current region, which is loop, its preheader
150 was removed. */
151 bool preheader_removed = false;
154 /* Forward static declarations. */
155 static void fence_clear (fence_t);
157 static void deps_init_id (idata_t, insn_t, bool);
158 static void init_id_from_df (idata_t, insn_t, bool);
159 static expr_t set_insn_init (expr_t, vinsn_t, int);
161 static void cfg_preds (basic_block, insn_t **, int *);
162 static void prepare_insn_expr (insn_t, int);
163 static void free_history_vect (vec<expr_history_def> &);
165 static void move_bb_info (basic_block, basic_block);
166 static void remove_empty_bb (basic_block, bool);
167 static void sel_merge_blocks (basic_block, basic_block);
168 static void sel_remove_loop_preheader (void);
169 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
171 static bool insn_is_the_only_one_in_bb_p (insn_t);
172 static void create_initial_data_sets (basic_block);
174 static void free_av_set (basic_block);
175 static void invalidate_av_set (basic_block);
176 static void extend_insn_data (void);
177 static void sel_init_new_insn (insn_t, int, int = -1);
178 static void finish_insns (void);
180 /* Various list functions. */
182 /* Copy an instruction list L. */
183 ilist_t
184 ilist_copy (ilist_t l)
186 ilist_t head = NULL, *tailp = &head;
188 while (l)
190 ilist_add (tailp, ILIST_INSN (l));
191 tailp = &ILIST_NEXT (*tailp);
192 l = ILIST_NEXT (l);
195 return head;
198 /* Invert an instruction list L. */
199 ilist_t
200 ilist_invert (ilist_t l)
202 ilist_t res = NULL;
204 while (l)
206 ilist_add (&res, ILIST_INSN (l));
207 l = ILIST_NEXT (l);
210 return res;
213 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
214 void
215 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
217 bnd_t bnd;
219 _list_add (lp);
220 bnd = BLIST_BND (*lp);
222 BND_TO (bnd) = to;
223 BND_PTR (bnd) = ptr;
224 BND_AV (bnd) = NULL;
225 BND_AV1 (bnd) = NULL;
226 BND_DC (bnd) = dc;
229 /* Remove the list note pointed to by LP. */
230 void
231 blist_remove (blist_t *lp)
233 bnd_t b = BLIST_BND (*lp);
235 av_set_clear (&BND_AV (b));
236 av_set_clear (&BND_AV1 (b));
237 ilist_clear (&BND_PTR (b));
239 _list_remove (lp);
242 /* Init a fence tail L. */
243 void
244 flist_tail_init (flist_tail_t l)
246 FLIST_TAIL_HEAD (l) = NULL;
247 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
250 /* Try to find fence corresponding to INSN in L. */
251 fence_t
252 flist_lookup (flist_t l, insn_t insn)
254 while (l)
256 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
257 return FLIST_FENCE (l);
259 l = FLIST_NEXT (l);
262 return NULL;
265 /* Init the fields of F before running fill_insns. */
266 static void
267 init_fence_for_scheduling (fence_t f)
269 FENCE_BNDS (f) = NULL;
270 FENCE_PROCESSED_P (f) = false;
271 FENCE_SCHEDULED_P (f) = false;
274 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
275 static void
276 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
277 insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
278 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
279 int cycle, int cycle_issued_insns, int issue_more,
280 bool starts_cycle_p, bool after_stall_p)
282 fence_t f;
284 _list_add (lp);
285 f = FLIST_FENCE (*lp);
287 FENCE_INSN (f) = insn;
289 gcc_assert (state != NULL);
290 FENCE_STATE (f) = state;
292 FENCE_CYCLE (f) = cycle;
293 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
294 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
295 FENCE_AFTER_STALL_P (f) = after_stall_p;
297 gcc_assert (dc != NULL);
298 FENCE_DC (f) = dc;
300 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
301 FENCE_TC (f) = tc;
303 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
304 FENCE_ISSUE_MORE (f) = issue_more;
305 FENCE_EXECUTING_INSNS (f) = executing_insns;
306 FENCE_READY_TICKS (f) = ready_ticks;
307 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
308 FENCE_SCHED_NEXT (f) = sched_next;
310 init_fence_for_scheduling (f);
313 /* Remove the head node of the list pointed to by LP. */
314 static void
315 flist_remove (flist_t *lp)
317 if (FENCE_INSN (FLIST_FENCE (*lp)))
318 fence_clear (FLIST_FENCE (*lp));
319 _list_remove (lp);
322 /* Clear the fence list pointed to by LP. */
323 void
324 flist_clear (flist_t *lp)
326 while (*lp)
327 flist_remove (lp);
330 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
331 void
332 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
334 def_t d;
336 _list_add (dl);
337 d = DEF_LIST_DEF (*dl);
339 d->orig_insn = original_insn;
340 d->crosses_call = crosses_call;
344 /* Functions to work with target contexts. */
346 /* Bulk target context. It is convenient for debugging purposes to ensure
347 that there are no uninitialized (null) target contexts. */
348 static tc_t bulk_tc = (tc_t) 1;
350 /* Target hooks wrappers. In the future we can provide some default
351 implementations for them. */
353 /* Allocate a store for the target context. */
354 static tc_t
355 alloc_target_context (void)
357 return (targetm.sched.alloc_sched_context
358 ? targetm.sched.alloc_sched_context () : bulk_tc);
361 /* Init target context TC.
362 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
363 Overwise, copy current backend context to TC. */
364 static void
365 init_target_context (tc_t tc, bool clean_p)
367 if (targetm.sched.init_sched_context)
368 targetm.sched.init_sched_context (tc, clean_p);
371 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
372 int init_target_context (). */
373 tc_t
374 create_target_context (bool clean_p)
376 tc_t tc = alloc_target_context ();
378 init_target_context (tc, clean_p);
379 return tc;
382 /* Copy TC to the current backend context. */
383 void
384 set_target_context (tc_t tc)
386 if (targetm.sched.set_sched_context)
387 targetm.sched.set_sched_context (tc);
390 /* TC is about to be destroyed. Free any internal data. */
391 static void
392 clear_target_context (tc_t tc)
394 if (targetm.sched.clear_sched_context)
395 targetm.sched.clear_sched_context (tc);
398 /* Clear and free it. */
399 static void
400 delete_target_context (tc_t tc)
402 clear_target_context (tc);
404 if (targetm.sched.free_sched_context)
405 targetm.sched.free_sched_context (tc);
408 /* Make a copy of FROM in TO.
409 NB: May be this should be a hook. */
410 static void
411 copy_target_context (tc_t to, tc_t from)
413 tc_t tmp = create_target_context (false);
415 set_target_context (from);
416 init_target_context (to, false);
418 set_target_context (tmp);
419 delete_target_context (tmp);
422 /* Create a copy of TC. */
423 static tc_t
424 create_copy_of_target_context (tc_t tc)
426 tc_t copy = alloc_target_context ();
428 copy_target_context (copy, tc);
430 return copy;
433 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
434 is the same as in init_target_context (). */
435 void
436 reset_target_context (tc_t tc, bool clean_p)
438 clear_target_context (tc);
439 init_target_context (tc, clean_p);
442 /* Functions to work with dependence contexts.
443 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
444 context. It accumulates information about processed insns to decide if
445 current insn is dependent on the processed ones. */
447 /* Make a copy of FROM in TO. */
448 static void
449 copy_deps_context (deps_t to, deps_t from)
451 init_deps (to, false);
452 deps_join (to, from);
455 /* Allocate store for dep context. */
456 static deps_t
457 alloc_deps_context (void)
459 return XNEW (struct deps_desc);
462 /* Allocate and initialize dep context. */
463 static deps_t
464 create_deps_context (void)
466 deps_t dc = alloc_deps_context ();
468 init_deps (dc, false);
469 return dc;
472 /* Create a copy of FROM. */
473 static deps_t
474 create_copy_of_deps_context (deps_t from)
476 deps_t to = alloc_deps_context ();
478 copy_deps_context (to, from);
479 return to;
482 /* Clean up internal data of DC. */
483 static void
484 clear_deps_context (deps_t dc)
486 free_deps (dc);
489 /* Clear and free DC. */
490 static void
491 delete_deps_context (deps_t dc)
493 clear_deps_context (dc);
494 free (dc);
497 /* Clear and init DC. */
498 static void
499 reset_deps_context (deps_t dc)
501 clear_deps_context (dc);
502 init_deps (dc, false);
505 /* This structure describes the dependence analysis hooks for advancing
506 dependence context. */
507 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
509 NULL,
511 NULL, /* start_insn */
512 NULL, /* finish_insn */
513 NULL, /* start_lhs */
514 NULL, /* finish_lhs */
515 NULL, /* start_rhs */
516 NULL, /* finish_rhs */
517 haifa_note_reg_set,
518 haifa_note_reg_clobber,
519 haifa_note_reg_use,
520 NULL, /* note_mem_dep */
521 NULL, /* note_dep */
523 0, 0, 0
526 /* Process INSN and add its impact on DC. */
527 void
528 advance_deps_context (deps_t dc, insn_t insn)
530 sched_deps_info = &advance_deps_context_sched_deps_info;
531 deps_analyze_insn (dc, insn);
535 /* Functions to work with DFA states. */
537 /* Allocate store for a DFA state. */
538 static state_t
539 state_alloc (void)
541 return xmalloc (dfa_state_size);
544 /* Allocate and initialize DFA state. */
545 static state_t
546 state_create (void)
548 state_t state = state_alloc ();
550 state_reset (state);
551 advance_state (state);
552 return state;
555 /* Free DFA state. */
556 static void
557 state_free (state_t state)
559 free (state);
562 /* Make a copy of FROM in TO. */
563 static void
564 state_copy (state_t to, state_t from)
566 memcpy (to, from, dfa_state_size);
569 /* Create a copy of FROM. */
570 static state_t
571 state_create_copy (state_t from)
573 state_t to = state_alloc ();
575 state_copy (to, from);
576 return to;
580 /* Functions to work with fences. */
582 /* Clear the fence. */
583 static void
584 fence_clear (fence_t f)
586 state_t s = FENCE_STATE (f);
587 deps_t dc = FENCE_DC (f);
588 void *tc = FENCE_TC (f);
590 ilist_clear (&FENCE_BNDS (f));
592 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
593 || (s == NULL && dc == NULL && tc == NULL));
595 free (s);
597 if (dc != NULL)
598 delete_deps_context (dc);
600 if (tc != NULL)
601 delete_target_context (tc);
602 vec_free (FENCE_EXECUTING_INSNS (f));
603 free (FENCE_READY_TICKS (f));
604 FENCE_READY_TICKS (f) = NULL;
607 /* Init a list of fences with successors of OLD_FENCE. */
608 void
609 init_fences (insn_t old_fence)
611 insn_t succ;
612 succ_iterator si;
613 bool first = true;
614 int ready_ticks_size = get_max_uid () + 1;
616 FOR_EACH_SUCC_1 (succ, si, old_fence,
617 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
620 if (first)
621 first = false;
622 else
623 gcc_assert (flag_sel_sched_pipelining_outer_loops);
625 flist_add (&fences, succ,
626 state_create (),
627 create_deps_context () /* dc */,
628 create_target_context (true) /* tc */,
629 NULL /* last_scheduled_insn */,
630 NULL, /* executing_insns */
631 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
632 ready_ticks_size,
633 NULL /* sched_next */,
634 1 /* cycle */, 0 /* cycle_issued_insns */,
635 issue_rate, /* issue_more */
636 1 /* starts_cycle_p */, 0 /* after_stall_p */);
640 /* Merges two fences (filling fields of fence F with resulting values) by
641 following rules: 1) state, target context and last scheduled insn are
642 propagated from fallthrough edge if it is available;
643 2) deps context and cycle is propagated from more probable edge;
644 3) all other fields are set to corresponding constant values.
646 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
647 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
648 and AFTER_STALL_P are the corresponding fields of the second fence. */
649 static void
650 merge_fences (fence_t f, insn_t insn,
651 state_t state, deps_t dc, void *tc,
652 rtx_insn *last_scheduled_insn,
653 vec<rtx_insn *, va_gc> *executing_insns,
654 int *ready_ticks, int ready_ticks_size,
655 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
657 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
659 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
660 && !sched_next && !FENCE_SCHED_NEXT (f));
662 /* Check if we can decide which path fences came.
663 If we can't (or don't want to) - reset all. */
664 if (last_scheduled_insn == NULL
665 || last_scheduled_insn_old == NULL
666 /* This is a case when INSN is reachable on several paths from
667 one insn (this can happen when pipelining of outer loops is on and
668 there are two edges: one going around of inner loop and the other -
669 right through it; in such case just reset everything). */
670 || last_scheduled_insn == last_scheduled_insn_old)
672 state_reset (FENCE_STATE (f));
673 state_free (state);
675 reset_deps_context (FENCE_DC (f));
676 delete_deps_context (dc);
678 reset_target_context (FENCE_TC (f), true);
679 delete_target_context (tc);
681 if (cycle > FENCE_CYCLE (f))
682 FENCE_CYCLE (f) = cycle;
684 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
685 FENCE_ISSUE_MORE (f) = issue_rate;
686 vec_free (executing_insns);
687 free (ready_ticks);
688 if (FENCE_EXECUTING_INSNS (f))
689 FENCE_EXECUTING_INSNS (f)->block_remove (0,
690 FENCE_EXECUTING_INSNS (f)->length ());
691 if (FENCE_READY_TICKS (f))
692 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
694 else
696 edge edge_old = NULL, edge_new = NULL;
697 edge candidate;
698 succ_iterator si;
699 insn_t succ;
701 /* Find fallthrough edge. */
702 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
703 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
705 if (!candidate
706 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
707 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
709 /* No fallthrough edge leading to basic block of INSN. */
710 state_reset (FENCE_STATE (f));
711 state_free (state);
713 reset_target_context (FENCE_TC (f), true);
714 delete_target_context (tc);
716 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
717 FENCE_ISSUE_MORE (f) = issue_rate;
719 else
720 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
722 /* Would be weird if same insn is successor of several fallthrough
723 edges. */
724 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
725 != BLOCK_FOR_INSN (last_scheduled_insn_old));
727 state_free (FENCE_STATE (f));
728 FENCE_STATE (f) = state;
730 delete_target_context (FENCE_TC (f));
731 FENCE_TC (f) = tc;
733 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
734 FENCE_ISSUE_MORE (f) = issue_more;
736 else
738 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
739 state_free (state);
740 delete_target_context (tc);
742 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
743 != BLOCK_FOR_INSN (last_scheduled_insn));
746 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
747 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
748 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
750 if (succ == insn)
752 /* No same successor allowed from several edges. */
753 gcc_assert (!edge_old);
754 edge_old = si.e1;
757 /* Find edge of second predecessor (last_scheduled_insn->insn). */
758 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
759 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
761 if (succ == insn)
763 /* No same successor allowed from several edges. */
764 gcc_assert (!edge_new);
765 edge_new = si.e1;
769 /* Check if we can choose most probable predecessor. */
770 if (edge_old == NULL || edge_new == NULL)
772 reset_deps_context (FENCE_DC (f));
773 delete_deps_context (dc);
774 vec_free (executing_insns);
775 free (ready_ticks);
777 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
778 if (FENCE_EXECUTING_INSNS (f))
779 FENCE_EXECUTING_INSNS (f)->block_remove (0,
780 FENCE_EXECUTING_INSNS (f)->length ());
781 if (FENCE_READY_TICKS (f))
782 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
784 else
785 if (edge_new->probability > edge_old->probability)
787 delete_deps_context (FENCE_DC (f));
788 FENCE_DC (f) = dc;
789 vec_free (FENCE_EXECUTING_INSNS (f));
790 FENCE_EXECUTING_INSNS (f) = executing_insns;
791 free (FENCE_READY_TICKS (f));
792 FENCE_READY_TICKS (f) = ready_ticks;
793 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
794 FENCE_CYCLE (f) = cycle;
796 else
798 /* Leave DC and CYCLE untouched. */
799 delete_deps_context (dc);
800 vec_free (executing_insns);
801 free (ready_ticks);
805 /* Fill remaining invariant fields. */
806 if (after_stall_p)
807 FENCE_AFTER_STALL_P (f) = 1;
809 FENCE_ISSUED_INSNS (f) = 0;
810 FENCE_STARTS_CYCLE_P (f) = 1;
811 FENCE_SCHED_NEXT (f) = NULL;
814 /* Add a new fence to NEW_FENCES list, initializing it from all
815 other parameters. */
816 static void
817 add_to_fences (flist_tail_t new_fences, insn_t insn,
818 state_t state, deps_t dc, void *tc,
819 rtx_insn *last_scheduled_insn,
820 vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
821 int ready_ticks_size, rtx_insn *sched_next, int cycle,
822 int cycle_issued_insns, int issue_rate,
823 bool starts_cycle_p, bool after_stall_p)
825 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
827 if (! f)
829 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
830 last_scheduled_insn, executing_insns, ready_ticks,
831 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
832 issue_rate, starts_cycle_p, after_stall_p);
834 FLIST_TAIL_TAILP (new_fences)
835 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
837 else
839 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
840 executing_insns, ready_ticks, ready_ticks_size,
841 sched_next, cycle, issue_rate, after_stall_p);
845 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
846 void
847 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
849 fence_t f, old;
850 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
852 old = FLIST_FENCE (old_fences);
853 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
854 FENCE_INSN (FLIST_FENCE (old_fences)));
855 if (f)
857 merge_fences (f, old->insn, old->state, old->dc, old->tc,
858 old->last_scheduled_insn, old->executing_insns,
859 old->ready_ticks, old->ready_ticks_size,
860 old->sched_next, old->cycle, old->issue_more,
861 old->after_stall_p);
863 else
865 _list_add (tailp);
866 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
867 *FLIST_FENCE (*tailp) = *old;
868 init_fence_for_scheduling (FLIST_FENCE (*tailp));
870 FENCE_INSN (old) = NULL;
873 /* Add a new fence to NEW_FENCES list and initialize most of its data
874 as a clean one. */
875 void
876 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
878 int ready_ticks_size = get_max_uid () + 1;
880 add_to_fences (new_fences,
881 succ, state_create (), create_deps_context (),
882 create_target_context (true),
883 NULL, NULL,
884 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
885 NULL, FENCE_CYCLE (fence) + 1,
886 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
889 /* Add a new fence to NEW_FENCES list and initialize all of its data
890 from FENCE and SUCC. */
891 void
892 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
894 int * new_ready_ticks
895 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
897 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
898 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
899 add_to_fences (new_fences,
900 succ, state_create_copy (FENCE_STATE (fence)),
901 create_copy_of_deps_context (FENCE_DC (fence)),
902 create_copy_of_target_context (FENCE_TC (fence)),
903 FENCE_LAST_SCHEDULED_INSN (fence),
904 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
905 new_ready_ticks,
906 FENCE_READY_TICKS_SIZE (fence),
907 FENCE_SCHED_NEXT (fence),
908 FENCE_CYCLE (fence),
909 FENCE_ISSUED_INSNS (fence),
910 FENCE_ISSUE_MORE (fence),
911 FENCE_STARTS_CYCLE_P (fence),
912 FENCE_AFTER_STALL_P (fence));
916 /* Functions to work with regset and nop pools. */
918 /* Returns the new regset from pool. It might have some of the bits set
919 from the previous usage. */
920 regset
921 get_regset_from_pool (void)
923 regset rs;
925 if (regset_pool.n != 0)
926 rs = regset_pool.v[--regset_pool.n];
927 else
928 /* We need to create the regset. */
930 rs = ALLOC_REG_SET (&reg_obstack);
932 if (regset_pool.nn == regset_pool.ss)
933 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
934 (regset_pool.ss = 2 * regset_pool.ss + 1));
935 regset_pool.vv[regset_pool.nn++] = rs;
938 regset_pool.diff++;
940 return rs;
943 /* Same as above, but returns the empty regset. */
944 regset
945 get_clear_regset_from_pool (void)
947 regset rs = get_regset_from_pool ();
949 CLEAR_REG_SET (rs);
950 return rs;
953 /* Return regset RS to the pool for future use. */
954 void
955 return_regset_to_pool (regset rs)
957 gcc_assert (rs);
958 regset_pool.diff--;
960 if (regset_pool.n == regset_pool.s)
961 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
962 (regset_pool.s = 2 * regset_pool.s + 1));
963 regset_pool.v[regset_pool.n++] = rs;
966 #ifdef ENABLE_CHECKING
967 /* This is used as a qsort callback for sorting regset pool stacks.
968 X and XX are addresses of two regsets. They are never equal. */
969 static int
970 cmp_v_in_regset_pool (const void *x, const void *xx)
972 uintptr_t r1 = (uintptr_t) *((const regset *) x);
973 uintptr_t r2 = (uintptr_t) *((const regset *) xx);
974 if (r1 > r2)
975 return 1;
976 else if (r1 < r2)
977 return -1;
978 gcc_unreachable ();
980 #endif
982 /* Free the regset pool possibly checking for memory leaks. */
983 void
984 free_regset_pool (void)
986 #ifdef ENABLE_CHECKING
988 regset *v = regset_pool.v;
989 int i = 0;
990 int n = regset_pool.n;
992 regset *vv = regset_pool.vv;
993 int ii = 0;
994 int nn = regset_pool.nn;
996 int diff = 0;
998 gcc_assert (n <= nn);
1000 /* Sort both vectors so it will be possible to compare them. */
1001 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
1002 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
1004 while (ii < nn)
1006 if (v[i] == vv[ii])
1007 i++;
1008 else
1009 /* VV[II] was lost. */
1010 diff++;
1012 ii++;
1015 gcc_assert (diff == regset_pool.diff);
1017 #endif
1019 /* If not true - we have a memory leak. */
1020 gcc_assert (regset_pool.diff == 0);
1022 while (regset_pool.n)
1024 --regset_pool.n;
1025 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1028 free (regset_pool.v);
1029 regset_pool.v = NULL;
1030 regset_pool.s = 0;
1032 free (regset_pool.vv);
1033 regset_pool.vv = NULL;
1034 regset_pool.nn = 0;
1035 regset_pool.ss = 0;
1037 regset_pool.diff = 0;
1041 /* Functions to work with nop pools. NOP insns are used as temporary
1042 placeholders of the insns being scheduled to allow correct update of
1043 the data sets. When update is finished, NOPs are deleted. */
1045 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1046 nops sel-sched generates. */
1047 static vinsn_t nop_vinsn = NULL;
1049 /* Emit a nop before INSN, taking it from pool. */
1050 insn_t
1051 get_nop_from_pool (insn_t insn)
1053 rtx nop_pat;
1054 insn_t nop;
1055 bool old_p = nop_pool.n != 0;
1056 int flags;
1058 if (old_p)
1059 nop_pat = nop_pool.v[--nop_pool.n];
1060 else
1061 nop_pat = nop_pattern;
1063 nop = emit_insn_before (nop_pat, insn);
1065 if (old_p)
1066 flags = INSN_INIT_TODO_SSID;
1067 else
1068 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1070 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1071 sel_init_new_insn (nop, flags);
1073 return nop;
1076 /* Remove NOP from the instruction stream and return it to the pool. */
1077 void
1078 return_nop_to_pool (insn_t nop, bool full_tidying)
1080 gcc_assert (INSN_IN_STREAM_P (nop));
1081 sel_remove_insn (nop, false, full_tidying);
1083 /* We'll recycle this nop. */
1084 nop->set_undeleted ();
1086 if (nop_pool.n == nop_pool.s)
1087 nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
1088 (nop_pool.s = 2 * nop_pool.s + 1));
1089 nop_pool.v[nop_pool.n++] = nop;
1092 /* Free the nop pool. */
1093 void
1094 free_nop_pool (void)
1096 nop_pool.n = 0;
1097 nop_pool.s = 0;
1098 free (nop_pool.v);
1099 nop_pool.v = NULL;
1103 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1104 The callback is given two rtxes XX and YY and writes the new rtxes
1105 to NX and NY in case some needs to be skipped. */
1106 static int
1107 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1109 const_rtx x = *xx;
1110 const_rtx y = *yy;
1112 if (GET_CODE (x) == UNSPEC
1113 && (targetm.sched.skip_rtx_p == NULL
1114 || targetm.sched.skip_rtx_p (x)))
1116 *nx = XVECEXP (x, 0, 0);
1117 *ny = CONST_CAST_RTX (y);
1118 return 1;
1121 if (GET_CODE (y) == UNSPEC
1122 && (targetm.sched.skip_rtx_p == NULL
1123 || targetm.sched.skip_rtx_p (y)))
1125 *nx = CONST_CAST_RTX (x);
1126 *ny = XVECEXP (y, 0, 0);
1127 return 1;
1130 return 0;
1133 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1134 to support ia64 speculation. When changes are needed, new rtx X and new mode
1135 NMODE are written, and the callback returns true. */
1136 static int
1137 hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED,
1138 rtx *nx, machine_mode* nmode)
1140 if (GET_CODE (x) == UNSPEC
1141 && targetm.sched.skip_rtx_p
1142 && targetm.sched.skip_rtx_p (x))
1144 *nx = XVECEXP (x, 0 ,0);
1145 *nmode = VOIDmode;
1146 return 1;
1149 return 0;
1152 /* Returns LHS and RHS are ok to be scheduled separately. */
1153 static bool
1154 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1156 if (lhs == NULL || rhs == NULL)
1157 return false;
1159 /* Do not schedule constants as rhs: no point to use reg, if const
1160 can be used. Moreover, scheduling const as rhs may lead to mode
1161 mismatch cause consts don't have modes but they could be merged
1162 from branches where the same const used in different modes. */
1163 if (CONSTANT_P (rhs))
1164 return false;
1166 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1167 if (COMPARISON_P (rhs))
1168 return false;
1170 /* Do not allow single REG to be an rhs. */
1171 if (REG_P (rhs))
1172 return false;
1174 /* See comment at find_used_regs_1 (*1) for explanation of this
1175 restriction. */
1176 /* FIXME: remove this later. */
1177 if (MEM_P (lhs))
1178 return false;
1180 /* This will filter all tricky things like ZERO_EXTRACT etc.
1181 For now we don't handle it. */
1182 if (!REG_P (lhs) && !MEM_P (lhs))
1183 return false;
1185 return true;
1188 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1189 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1190 used e.g. for insns from recovery blocks. */
1191 static void
1192 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1194 hash_rtx_callback_function hrcf;
1195 int insn_class;
1197 VINSN_INSN_RTX (vi) = insn;
1198 VINSN_COUNT (vi) = 0;
1199 vi->cost = -1;
1201 if (INSN_NOP_P (insn))
1202 return;
1204 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1205 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1206 else
1207 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1209 /* Hash vinsn depending on whether it is separable or not. */
1210 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1211 if (VINSN_SEPARABLE_P (vi))
1213 rtx rhs = VINSN_RHS (vi);
1215 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1216 NULL, NULL, false, hrcf);
1217 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1218 VOIDmode, NULL, NULL,
1219 false, hrcf);
1221 else
1223 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1224 NULL, NULL, false, hrcf);
1225 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1228 insn_class = haifa_classify_insn (insn);
1229 if (insn_class >= 2
1230 && (!targetm.sched.get_insn_spec_ds
1231 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1232 == 0)))
1233 VINSN_MAY_TRAP_P (vi) = true;
1234 else
1235 VINSN_MAY_TRAP_P (vi) = false;
1238 /* Indicate that VI has become the part of an rtx object. */
1239 void
1240 vinsn_attach (vinsn_t vi)
1242 /* Assert that VI is not pending for deletion. */
1243 gcc_assert (VINSN_INSN_RTX (vi));
1245 VINSN_COUNT (vi)++;
1248 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1249 VINSN_TYPE (VI). */
1250 static vinsn_t
1251 vinsn_create (insn_t insn, bool force_unique_p)
1253 vinsn_t vi = XCNEW (struct vinsn_def);
1255 vinsn_init (vi, insn, force_unique_p);
1256 return vi;
1259 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1260 the copy. */
1261 vinsn_t
1262 vinsn_copy (vinsn_t vi, bool reattach_p)
1264 rtx_insn *copy;
1265 bool unique = VINSN_UNIQUE_P (vi);
1266 vinsn_t new_vi;
1268 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1269 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1270 if (reattach_p)
1272 vinsn_detach (vi);
1273 vinsn_attach (new_vi);
1276 return new_vi;
1279 /* Delete the VI vinsn and free its data. */
1280 static void
1281 vinsn_delete (vinsn_t vi)
1283 gcc_assert (VINSN_COUNT (vi) == 0);
1285 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1287 return_regset_to_pool (VINSN_REG_SETS (vi));
1288 return_regset_to_pool (VINSN_REG_USES (vi));
1289 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1292 free (vi);
1295 /* Indicate that VI is no longer a part of some rtx object.
1296 Remove VI if it is no longer needed. */
1297 void
1298 vinsn_detach (vinsn_t vi)
1300 gcc_assert (VINSN_COUNT (vi) > 0);
1302 if (--VINSN_COUNT (vi) == 0)
1303 vinsn_delete (vi);
1306 /* Returns TRUE if VI is a branch. */
1307 bool
1308 vinsn_cond_branch_p (vinsn_t vi)
1310 insn_t insn;
1312 if (!VINSN_UNIQUE_P (vi))
1313 return false;
1315 insn = VINSN_INSN_RTX (vi);
1316 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1317 return false;
1319 return control_flow_insn_p (insn);
1322 /* Return latency of INSN. */
1323 static int
1324 sel_insn_rtx_cost (rtx_insn *insn)
1326 int cost;
1328 /* A USE insn, or something else we don't need to
1329 understand. We can't pass these directly to
1330 result_ready_cost or insn_default_latency because it will
1331 trigger a fatal error for unrecognizable insns. */
1332 if (recog_memoized (insn) < 0)
1333 cost = 0;
1334 else
1336 cost = insn_default_latency (insn);
1338 if (cost < 0)
1339 cost = 0;
1342 return cost;
1345 /* Return the cost of the VI.
1346 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1348 sel_vinsn_cost (vinsn_t vi)
1350 int cost = vi->cost;
1352 if (cost < 0)
1354 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1355 vi->cost = cost;
1358 return cost;
1362 /* Functions for insn emitting. */
1364 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1365 EXPR and SEQNO. */
1366 insn_t
1367 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1369 insn_t new_insn;
1371 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1373 new_insn = emit_insn_after (pattern, after);
1374 set_insn_init (expr, NULL, seqno);
1375 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1377 return new_insn;
1380 /* Force newly generated vinsns to be unique. */
1381 static bool init_insn_force_unique_p = false;
1383 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1384 initialize its data from EXPR and SEQNO. */
1385 insn_t
1386 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1387 insn_t after)
1389 insn_t insn;
1391 gcc_assert (!init_insn_force_unique_p);
1393 init_insn_force_unique_p = true;
1394 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1395 CANT_MOVE (insn) = 1;
1396 init_insn_force_unique_p = false;
1398 return insn;
1401 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1402 take it as a new vinsn instead of EXPR's vinsn.
1403 We simplify insns later, after scheduling region in
1404 simplify_changed_insns. */
1405 insn_t
1406 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1407 insn_t after)
1409 expr_t emit_expr;
1410 insn_t insn;
1411 int flags;
1413 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1414 seqno);
1415 insn = EXPR_INSN_RTX (emit_expr);
1417 /* The insn may come from the transformation cache, which may hold already
1418 deleted insns, so mark it as not deleted. */
1419 insn->set_undeleted ();
1421 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1423 flags = INSN_INIT_TODO_SSID;
1424 if (INSN_LUID (insn) == 0)
1425 flags |= INSN_INIT_TODO_LUID;
1426 sel_init_new_insn (insn, flags);
1428 return insn;
1431 /* Move insn from EXPR after AFTER. */
1432 insn_t
1433 sel_move_insn (expr_t expr, int seqno, insn_t after)
1435 insn_t insn = EXPR_INSN_RTX (expr);
1436 basic_block bb = BLOCK_FOR_INSN (after);
1437 insn_t next = NEXT_INSN (after);
1439 /* Assert that in move_op we disconnected this insn properly. */
1440 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1441 SET_PREV_INSN (insn) = after;
1442 SET_NEXT_INSN (insn) = next;
1444 SET_NEXT_INSN (after) = insn;
1445 SET_PREV_INSN (next) = insn;
1447 /* Update links from insn to bb and vice versa. */
1448 df_insn_change_bb (insn, bb);
1449 if (BB_END (bb) == after)
1450 BB_END (bb) = insn;
1452 prepare_insn_expr (insn, seqno);
1453 return insn;
1457 /* Functions to work with right-hand sides. */
1459 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1460 VECT and return true when found. Use NEW_VINSN for comparison only when
1461 COMPARE_VINSNS is true. Write to INDP the index on which
1462 the search has stopped, such that inserting the new element at INDP will
1463 retain VECT's sort order. */
1464 static bool
1465 find_in_history_vect_1 (vec<expr_history_def> vect,
1466 unsigned uid, vinsn_t new_vinsn,
1467 bool compare_vinsns, int *indp)
1469 expr_history_def *arr;
1470 int i, j, len = vect.length ();
1472 if (len == 0)
1474 *indp = 0;
1475 return false;
1478 arr = vect.address ();
1479 i = 0, j = len - 1;
1481 while (i <= j)
1483 unsigned auid = arr[i].uid;
1484 vinsn_t avinsn = arr[i].new_expr_vinsn;
1486 if (auid == uid
1487 /* When undoing transformation on a bookkeeping copy, the new vinsn
1488 may not be exactly equal to the one that is saved in the vector.
1489 This is because the insn whose copy we're checking was possibly
1490 substituted itself. */
1491 && (! compare_vinsns
1492 || vinsn_equal_p (avinsn, new_vinsn)))
1494 *indp = i;
1495 return true;
1497 else if (auid > uid)
1498 break;
1499 i++;
1502 *indp = i;
1503 return false;
1506 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1507 the position found or -1, if no such value is in vector.
1508 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1510 find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1511 vinsn_t new_vinsn, bool originators_p)
1513 int ind;
1515 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1516 false, &ind))
1517 return ind;
1519 if (INSN_ORIGINATORS (insn) && originators_p)
1521 unsigned uid;
1522 bitmap_iterator bi;
1524 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1525 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1526 return ind;
1529 return -1;
1532 /* Insert new element in a sorted history vector pointed to by PVECT,
1533 if it is not there already. The element is searched using
1534 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1535 the history of a transformation. */
1536 void
1537 insert_in_history_vect (vec<expr_history_def> *pvect,
1538 unsigned uid, enum local_trans_type type,
1539 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1540 ds_t spec_ds)
1542 vec<expr_history_def> vect = *pvect;
1543 expr_history_def temp;
1544 bool res;
1545 int ind;
1547 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1549 if (res)
1551 expr_history_def *phist = &vect[ind];
1553 /* It is possible that speculation types of expressions that were
1554 propagated through different paths will be different here. In this
1555 case, merge the status to get the correct check later. */
1556 if (phist->spec_ds != spec_ds)
1557 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1558 return;
1561 temp.uid = uid;
1562 temp.old_expr_vinsn = old_expr_vinsn;
1563 temp.new_expr_vinsn = new_expr_vinsn;
1564 temp.spec_ds = spec_ds;
1565 temp.type = type;
1567 vinsn_attach (old_expr_vinsn);
1568 vinsn_attach (new_expr_vinsn);
1569 vect.safe_insert (ind, temp);
1570 *pvect = vect;
1573 /* Free history vector PVECT. */
1574 static void
1575 free_history_vect (vec<expr_history_def> &pvect)
1577 unsigned i;
1578 expr_history_def *phist;
1580 if (! pvect.exists ())
1581 return;
1583 for (i = 0; pvect.iterate (i, &phist); i++)
1585 vinsn_detach (phist->old_expr_vinsn);
1586 vinsn_detach (phist->new_expr_vinsn);
1589 pvect.release ();
1592 /* Merge vector FROM to PVECT. */
1593 static void
1594 merge_history_vect (vec<expr_history_def> *pvect,
1595 vec<expr_history_def> from)
1597 expr_history_def *phist;
1598 int i;
1600 /* We keep this vector sorted. */
1601 for (i = 0; from.iterate (i, &phist); i++)
1602 insert_in_history_vect (pvect, phist->uid, phist->type,
1603 phist->old_expr_vinsn, phist->new_expr_vinsn,
1604 phist->spec_ds);
1607 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1608 bool
1609 vinsn_equal_p (vinsn_t x, vinsn_t y)
1611 rtx_equal_p_callback_function repcf;
1613 if (x == y)
1614 return true;
1616 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1617 return false;
1619 if (VINSN_HASH (x) != VINSN_HASH (y))
1620 return false;
1622 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1623 if (VINSN_SEPARABLE_P (x))
1625 /* Compare RHSes of VINSNs. */
1626 gcc_assert (VINSN_RHS (x));
1627 gcc_assert (VINSN_RHS (y));
1629 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1632 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1636 /* Functions for working with expressions. */
1638 /* Initialize EXPR. */
1639 static void
1640 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1641 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1642 ds_t spec_to_check_ds, int orig_sched_cycle,
1643 vec<expr_history_def> history,
1644 signed char target_available,
1645 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1646 bool cant_move)
1648 vinsn_attach (vi);
1650 EXPR_VINSN (expr) = vi;
1651 EXPR_SPEC (expr) = spec;
1652 EXPR_USEFULNESS (expr) = use;
1653 EXPR_PRIORITY (expr) = priority;
1654 EXPR_PRIORITY_ADJ (expr) = 0;
1655 EXPR_SCHED_TIMES (expr) = sched_times;
1656 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1657 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1658 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1659 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1661 if (history.exists ())
1662 EXPR_HISTORY_OF_CHANGES (expr) = history;
1663 else
1664 EXPR_HISTORY_OF_CHANGES (expr).create (0);
1666 EXPR_TARGET_AVAILABLE (expr) = target_available;
1667 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1668 EXPR_WAS_RENAMED (expr) = was_renamed;
1669 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1670 EXPR_CANT_MOVE (expr) = cant_move;
1673 /* Make a copy of the expr FROM into the expr TO. */
1674 void
1675 copy_expr (expr_t to, expr_t from)
1677 vec<expr_history_def> temp = vNULL;
1679 if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1681 unsigned i;
1682 expr_history_def *phist;
1684 temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1685 for (i = 0;
1686 temp.iterate (i, &phist);
1687 i++)
1689 vinsn_attach (phist->old_expr_vinsn);
1690 vinsn_attach (phist->new_expr_vinsn);
1694 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1695 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1696 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1697 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1698 EXPR_ORIG_SCHED_CYCLE (from), temp,
1699 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1700 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1701 EXPR_CANT_MOVE (from));
1704 /* Same, but the final expr will not ever be in av sets, so don't copy
1705 "uninteresting" data such as bitmap cache. */
1706 void
1707 copy_expr_onside (expr_t to, expr_t from)
1709 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1710 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1711 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1712 vNULL,
1713 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1714 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1715 EXPR_CANT_MOVE (from));
1718 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1719 initializing new insns. */
1720 static void
1721 prepare_insn_expr (insn_t insn, int seqno)
1723 expr_t expr = INSN_EXPR (insn);
1724 ds_t ds;
1726 INSN_SEQNO (insn) = seqno;
1727 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1728 EXPR_SPEC (expr) = 0;
1729 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1730 EXPR_WAS_SUBSTITUTED (expr) = 0;
1731 EXPR_WAS_RENAMED (expr) = 0;
1732 EXPR_TARGET_AVAILABLE (expr) = 1;
1733 INSN_LIVE_VALID_P (insn) = false;
1735 /* ??? If this expression is speculative, make its dependence
1736 as weak as possible. We can filter this expression later
1737 in process_spec_exprs, because we do not distinguish
1738 between the status we got during compute_av_set and the
1739 existing status. To be fixed. */
1740 ds = EXPR_SPEC_DONE_DS (expr);
1741 if (ds)
1742 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1744 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1747 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1748 is non-null when expressions are merged from different successors at
1749 a split point. */
1750 static void
1751 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1753 if (EXPR_TARGET_AVAILABLE (to) < 0
1754 || EXPR_TARGET_AVAILABLE (from) < 0)
1755 EXPR_TARGET_AVAILABLE (to) = -1;
1756 else
1758 /* We try to detect the case when one of the expressions
1759 can only be reached through another one. In this case,
1760 we can do better. */
1761 if (split_point == NULL)
1763 int toind, fromind;
1765 toind = EXPR_ORIG_BB_INDEX (to);
1766 fromind = EXPR_ORIG_BB_INDEX (from);
1768 if (toind && toind == fromind)
1769 /* Do nothing -- everything is done in
1770 merge_with_other_exprs. */
1772 else
1773 EXPR_TARGET_AVAILABLE (to) = -1;
1775 else if (EXPR_TARGET_AVAILABLE (from) == 0
1776 && EXPR_LHS (from)
1777 && REG_P (EXPR_LHS (from))
1778 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1779 EXPR_TARGET_AVAILABLE (to) = -1;
1780 else
1781 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1785 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1786 is non-null when expressions are merged from different successors at
1787 a split point. */
1788 static void
1789 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1791 ds_t old_to_ds, old_from_ds;
1793 old_to_ds = EXPR_SPEC_DONE_DS (to);
1794 old_from_ds = EXPR_SPEC_DONE_DS (from);
1796 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1797 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1798 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1800 /* When merging e.g. control & data speculative exprs, or a control
1801 speculative with a control&data speculative one, we really have
1802 to change vinsn too. Also, when speculative status is changed,
1803 we also need to record this as a transformation in expr's history. */
1804 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1806 old_to_ds = ds_get_speculation_types (old_to_ds);
1807 old_from_ds = ds_get_speculation_types (old_from_ds);
1809 if (old_to_ds != old_from_ds)
1811 ds_t record_ds;
1813 /* When both expressions are speculative, we need to change
1814 the vinsn first. */
1815 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1817 int res;
1819 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1820 gcc_assert (res >= 0);
1823 if (split_point != NULL)
1825 /* Record the change with proper status. */
1826 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1827 record_ds &= ~(old_to_ds & SPECULATIVE);
1828 record_ds &= ~(old_from_ds & SPECULATIVE);
1830 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1831 INSN_UID (split_point), TRANS_SPECULATION,
1832 EXPR_VINSN (from), EXPR_VINSN (to),
1833 record_ds);
1840 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1841 this is done along different paths. */
1842 void
1843 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1845 /* Choose the maximum of the specs of merged exprs. This is required
1846 for correctness of bookkeeping. */
1847 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1848 EXPR_SPEC (to) = EXPR_SPEC (from);
1850 if (split_point)
1851 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1852 else
1853 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1854 EXPR_USEFULNESS (from));
1856 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1857 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1859 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1860 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1862 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1863 EXPR_ORIG_BB_INDEX (to) = 0;
1865 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1866 EXPR_ORIG_SCHED_CYCLE (from));
1868 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1869 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1870 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1872 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1873 EXPR_HISTORY_OF_CHANGES (from));
1874 update_target_availability (to, from, split_point);
1875 update_speculative_bits (to, from, split_point);
1878 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1879 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1880 are merged from different successors at a split point. */
1881 void
1882 merge_expr (expr_t to, expr_t from, insn_t split_point)
1884 vinsn_t to_vi = EXPR_VINSN (to);
1885 vinsn_t from_vi = EXPR_VINSN (from);
1887 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1889 /* Make sure that speculative pattern is propagated into exprs that
1890 have non-speculative one. This will provide us with consistent
1891 speculative bits and speculative patterns inside expr. */
1892 if ((EXPR_SPEC_DONE_DS (from) != 0
1893 && EXPR_SPEC_DONE_DS (to) == 0)
1894 /* Do likewise for volatile insns, so that we always retain
1895 the may_trap_p bit on the resulting expression. */
1896 || (VINSN_MAY_TRAP_P (EXPR_VINSN (from))
1897 && !VINSN_MAY_TRAP_P (EXPR_VINSN (to))))
1898 change_vinsn_in_expr (to, EXPR_VINSN (from));
1900 merge_expr_data (to, from, split_point);
1901 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1904 /* Clear the information of this EXPR. */
1905 void
1906 clear_expr (expr_t expr)
1909 vinsn_detach (EXPR_VINSN (expr));
1910 EXPR_VINSN (expr) = NULL;
1912 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1915 /* For a given LV_SET, mark EXPR having unavailable target register. */
1916 static void
1917 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1919 if (EXPR_SEPARABLE_P (expr))
1921 if (REG_P (EXPR_LHS (expr))
1922 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1924 /* If it's an insn like r1 = use (r1, ...), and it exists in
1925 different forms in each of the av_sets being merged, we can't say
1926 whether original destination register is available or not.
1927 However, this still works if destination register is not used
1928 in the original expression: if the branch at which LV_SET we're
1929 looking here is not actually 'other branch' in sense that same
1930 expression is available through it (but it can't be determined
1931 at computation stage because of transformations on one of the
1932 branches), it still won't affect the availability.
1933 Liveness of a register somewhere on a code motion path means
1934 it's either read somewhere on a codemotion path, live on
1935 'other' branch, live at the point immediately following
1936 the original operation, or is read by the original operation.
1937 The latter case is filtered out in the condition below.
1938 It still doesn't cover the case when register is defined and used
1939 somewhere within the code motion path, and in this case we could
1940 miss a unifying code motion along both branches using a renamed
1941 register, but it won't affect a code correctness since upon
1942 an actual code motion a bookkeeping code would be generated. */
1943 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1944 EXPR_LHS (expr)))
1945 EXPR_TARGET_AVAILABLE (expr) = -1;
1946 else
1947 EXPR_TARGET_AVAILABLE (expr) = false;
1950 else
1952 unsigned regno;
1953 reg_set_iterator rsi;
1955 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1956 0, regno, rsi)
1957 if (bitmap_bit_p (lv_set, regno))
1959 EXPR_TARGET_AVAILABLE (expr) = false;
1960 break;
1963 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1964 0, regno, rsi)
1965 if (bitmap_bit_p (lv_set, regno))
1967 EXPR_TARGET_AVAILABLE (expr) = false;
1968 break;
1973 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1974 or dependence status have changed, 2 when also the target register
1975 became unavailable, 0 if nothing had to be changed. */
1977 speculate_expr (expr_t expr, ds_t ds)
1979 int res;
1980 rtx_insn *orig_insn_rtx;
1981 rtx spec_pat;
1982 ds_t target_ds, current_ds;
1984 /* Obtain the status we need to put on EXPR. */
1985 target_ds = (ds & SPECULATIVE);
1986 current_ds = EXPR_SPEC_DONE_DS (expr);
1987 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1989 orig_insn_rtx = EXPR_INSN_RTX (expr);
1991 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1993 switch (res)
1995 case 0:
1996 EXPR_SPEC_DONE_DS (expr) = ds;
1997 return current_ds != ds ? 1 : 0;
1999 case 1:
2001 rtx_insn *spec_insn_rtx =
2002 create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
2003 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
2005 change_vinsn_in_expr (expr, spec_vinsn);
2006 EXPR_SPEC_DONE_DS (expr) = ds;
2007 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
2009 /* Do not allow clobbering the address register of speculative
2010 insns. */
2011 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
2012 expr_dest_reg (expr)))
2014 EXPR_TARGET_AVAILABLE (expr) = false;
2015 return 2;
2018 return 1;
2021 case -1:
2022 return -1;
2024 default:
2025 gcc_unreachable ();
2026 return -1;
2030 /* Return a destination register, if any, of EXPR. */
2032 expr_dest_reg (expr_t expr)
2034 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2036 if (dest != NULL_RTX && REG_P (dest))
2037 return dest;
2039 return NULL_RTX;
2042 /* Returns the REGNO of the R's destination. */
2043 unsigned
2044 expr_dest_regno (expr_t expr)
2046 rtx dest = expr_dest_reg (expr);
2048 gcc_assert (dest != NULL_RTX);
2049 return REGNO (dest);
2052 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2053 AV_SET having unavailable target register. */
2054 void
2055 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2057 expr_t expr;
2058 av_set_iterator avi;
2060 FOR_EACH_EXPR (expr, avi, join_set)
2061 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2062 set_unavailable_target_for_expr (expr, lv_set);
2066 /* Returns true if REG (at least partially) is present in REGS. */
2067 bool
2068 register_unavailable_p (regset regs, rtx reg)
2070 unsigned regno, end_regno;
2072 regno = REGNO (reg);
2073 if (bitmap_bit_p (regs, regno))
2074 return true;
2076 end_regno = END_REGNO (reg);
2078 while (++regno < end_regno)
2079 if (bitmap_bit_p (regs, regno))
2080 return true;
2082 return false;
2085 /* Av set functions. */
2087 /* Add a new element to av set SETP.
2088 Return the element added. */
2089 static av_set_t
2090 av_set_add_element (av_set_t *setp)
2092 /* Insert at the beginning of the list. */
2093 _list_add (setp);
2094 return *setp;
2097 /* Add EXPR to SETP. */
2098 void
2099 av_set_add (av_set_t *setp, expr_t expr)
2101 av_set_t elem;
2103 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2104 elem = av_set_add_element (setp);
2105 copy_expr (_AV_SET_EXPR (elem), expr);
2108 /* Same, but do not copy EXPR. */
2109 static void
2110 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2112 av_set_t elem;
2114 elem = av_set_add_element (setp);
2115 *_AV_SET_EXPR (elem) = *expr;
2118 /* Remove expr pointed to by IP from the av_set. */
2119 void
2120 av_set_iter_remove (av_set_iterator *ip)
2122 clear_expr (_AV_SET_EXPR (*ip->lp));
2123 _list_iter_remove (ip);
2126 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2127 sense of vinsn_equal_p function. Return NULL if no such expr is
2128 in SET was found. */
2129 expr_t
2130 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2132 expr_t expr;
2133 av_set_iterator i;
2135 FOR_EACH_EXPR (expr, i, set)
2136 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2137 return expr;
2138 return NULL;
2141 /* Same, but also remove the EXPR found. */
2142 static expr_t
2143 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2145 expr_t expr;
2146 av_set_iterator i;
2148 FOR_EACH_EXPR_1 (expr, i, setp)
2149 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2151 _list_iter_remove_nofree (&i);
2152 return expr;
2154 return NULL;
2157 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2158 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2159 Returns NULL if no such expr is in SET was found. */
2160 static expr_t
2161 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2163 expr_t cur_expr;
2164 av_set_iterator i;
2166 FOR_EACH_EXPR (cur_expr, i, set)
2168 if (cur_expr == expr)
2169 continue;
2170 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2171 return cur_expr;
2174 return NULL;
2177 /* If other expression is already in AVP, remove one of them. */
2178 expr_t
2179 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2181 expr_t expr2;
2183 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2184 if (expr2 != NULL)
2186 /* Reset target availability on merge, since taking it only from one
2187 of the exprs would be controversial for different code. */
2188 EXPR_TARGET_AVAILABLE (expr2) = -1;
2189 EXPR_USEFULNESS (expr2) = 0;
2191 merge_expr (expr2, expr, NULL);
2193 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2194 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2196 av_set_iter_remove (ip);
2197 return expr2;
2200 return expr;
2203 /* Return true if there is an expr that correlates to VI in SET. */
2204 bool
2205 av_set_is_in_p (av_set_t set, vinsn_t vi)
2207 return av_set_lookup (set, vi) != NULL;
2210 /* Return a copy of SET. */
2211 av_set_t
2212 av_set_copy (av_set_t set)
2214 expr_t expr;
2215 av_set_iterator i;
2216 av_set_t res = NULL;
2218 FOR_EACH_EXPR (expr, i, set)
2219 av_set_add (&res, expr);
2221 return res;
2224 /* Join two av sets that do not have common elements by attaching second set
2225 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2226 _AV_SET_NEXT of first set's last element). */
2227 static void
2228 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2230 gcc_assert (*to_tailp == NULL);
2231 *to_tailp = *fromp;
2232 *fromp = NULL;
2235 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2236 pointed to by FROMP afterwards. */
2237 void
2238 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2240 expr_t expr1;
2241 av_set_iterator i;
2243 /* Delete from TOP all exprs, that present in FROMP. */
2244 FOR_EACH_EXPR_1 (expr1, i, top)
2246 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2248 if (expr2)
2250 merge_expr (expr2, expr1, insn);
2251 av_set_iter_remove (&i);
2255 join_distinct_sets (i.lp, fromp);
2258 /* Same as above, but also update availability of target register in
2259 TOP judging by TO_LV_SET and FROM_LV_SET. */
2260 void
2261 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2262 regset from_lv_set, insn_t insn)
2264 expr_t expr1;
2265 av_set_iterator i;
2266 av_set_t *to_tailp, in_both_set = NULL;
2268 /* Delete from TOP all expres, that present in FROMP. */
2269 FOR_EACH_EXPR_1 (expr1, i, top)
2271 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2273 if (expr2)
2275 /* It may be that the expressions have different destination
2276 registers, in which case we need to check liveness here. */
2277 if (EXPR_SEPARABLE_P (expr1))
2279 int regno1 = (REG_P (EXPR_LHS (expr1))
2280 ? (int) expr_dest_regno (expr1) : -1);
2281 int regno2 = (REG_P (EXPR_LHS (expr2))
2282 ? (int) expr_dest_regno (expr2) : -1);
2284 /* ??? We don't have a way to check restrictions for
2285 *other* register on the current path, we did it only
2286 for the current target register. Give up. */
2287 if (regno1 != regno2)
2288 EXPR_TARGET_AVAILABLE (expr2) = -1;
2290 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2291 EXPR_TARGET_AVAILABLE (expr2) = -1;
2293 merge_expr (expr2, expr1, insn);
2294 av_set_add_nocopy (&in_both_set, expr2);
2295 av_set_iter_remove (&i);
2297 else
2298 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2299 FROM_LV_SET. */
2300 set_unavailable_target_for_expr (expr1, from_lv_set);
2302 to_tailp = i.lp;
2304 /* These expressions are not present in TOP. Check liveness
2305 restrictions on TO_LV_SET. */
2306 FOR_EACH_EXPR (expr1, i, *fromp)
2307 set_unavailable_target_for_expr (expr1, to_lv_set);
2309 join_distinct_sets (i.lp, &in_both_set);
2310 join_distinct_sets (to_tailp, fromp);
2313 /* Clear av_set pointed to by SETP. */
2314 void
2315 av_set_clear (av_set_t *setp)
2317 expr_t expr;
2318 av_set_iterator i;
2320 FOR_EACH_EXPR_1 (expr, i, setp)
2321 av_set_iter_remove (&i);
2323 gcc_assert (*setp == NULL);
2326 /* Leave only one non-speculative element in the SETP. */
2327 void
2328 av_set_leave_one_nonspec (av_set_t *setp)
2330 expr_t expr;
2331 av_set_iterator i;
2332 bool has_one_nonspec = false;
2334 /* Keep all speculative exprs, and leave one non-speculative
2335 (the first one). */
2336 FOR_EACH_EXPR_1 (expr, i, setp)
2338 if (!EXPR_SPEC_DONE_DS (expr))
2340 if (has_one_nonspec)
2341 av_set_iter_remove (&i);
2342 else
2343 has_one_nonspec = true;
2348 /* Return the N'th element of the SET. */
2349 expr_t
2350 av_set_element (av_set_t set, int n)
2352 expr_t expr;
2353 av_set_iterator i;
2355 FOR_EACH_EXPR (expr, i, set)
2356 if (n-- == 0)
2357 return expr;
2359 gcc_unreachable ();
2360 return NULL;
2363 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2364 void
2365 av_set_substract_cond_branches (av_set_t *avp)
2367 av_set_iterator i;
2368 expr_t expr;
2370 FOR_EACH_EXPR_1 (expr, i, avp)
2371 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2372 av_set_iter_remove (&i);
2375 /* Multiplies usefulness attribute of each member of av-set *AVP by
2376 value PROB / ALL_PROB. */
2377 void
2378 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2380 av_set_iterator i;
2381 expr_t expr;
2383 FOR_EACH_EXPR (expr, i, av)
2384 EXPR_USEFULNESS (expr) = (all_prob
2385 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2386 : 0);
2389 /* Leave in AVP only those expressions, which are present in AV,
2390 and return it, merging history expressions. */
2391 void
2392 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2394 av_set_iterator i;
2395 expr_t expr, expr2;
2397 FOR_EACH_EXPR_1 (expr, i, avp)
2398 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2399 av_set_iter_remove (&i);
2400 else
2401 /* When updating av sets in bookkeeping blocks, we can add more insns
2402 there which will be transformed but the upper av sets will not
2403 reflect those transformations. We then fail to undo those
2404 when searching for such insns. So merge the history saved
2405 in the av set of the block we are processing. */
2406 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2407 EXPR_HISTORY_OF_CHANGES (expr2));
2412 /* Dependence hooks to initialize insn data. */
2414 /* This is used in hooks callable from dependence analysis when initializing
2415 instruction's data. */
2416 static struct
2418 /* Where the dependence was found (lhs/rhs). */
2419 deps_where_t where;
2421 /* The actual data object to initialize. */
2422 idata_t id;
2424 /* True when the insn should not be made clonable. */
2425 bool force_unique_p;
2427 /* True when insn should be treated as of type USE, i.e. never renamed. */
2428 bool force_use_p;
2429 } deps_init_id_data;
2432 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2433 clonable. */
2434 static void
2435 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2437 int type;
2439 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2440 That clonable insns which can be separated into lhs and rhs have type SET.
2441 Other clonable insns have type USE. */
2442 type = GET_CODE (insn);
2444 /* Only regular insns could be cloned. */
2445 if (type == INSN && !force_unique_p)
2446 type = SET;
2447 else if (type == JUMP_INSN && simplejump_p (insn))
2448 type = PC;
2449 else if (type == DEBUG_INSN)
2450 type = !force_unique_p ? USE : INSN;
2452 IDATA_TYPE (id) = type;
2453 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2454 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2455 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2458 /* Start initializing insn data. */
2459 static void
2460 deps_init_id_start_insn (insn_t insn)
2462 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2464 setup_id_for_insn (deps_init_id_data.id, insn,
2465 deps_init_id_data.force_unique_p);
2466 deps_init_id_data.where = DEPS_IN_INSN;
2469 /* Start initializing lhs data. */
2470 static void
2471 deps_init_id_start_lhs (rtx lhs)
2473 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2474 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2476 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2478 IDATA_LHS (deps_init_id_data.id) = lhs;
2479 deps_init_id_data.where = DEPS_IN_LHS;
2483 /* Finish initializing lhs data. */
2484 static void
2485 deps_init_id_finish_lhs (void)
2487 deps_init_id_data.where = DEPS_IN_INSN;
2490 /* Note a set of REGNO. */
2491 static void
2492 deps_init_id_note_reg_set (int regno)
2494 haifa_note_reg_set (regno);
2496 if (deps_init_id_data.where == DEPS_IN_RHS)
2497 deps_init_id_data.force_use_p = true;
2499 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2500 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2502 #ifdef STACK_REGS
2503 /* Make instructions that set stack registers to be ineligible for
2504 renaming to avoid issues with find_used_regs. */
2505 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2506 deps_init_id_data.force_use_p = true;
2507 #endif
2510 /* Note a clobber of REGNO. */
2511 static void
2512 deps_init_id_note_reg_clobber (int regno)
2514 haifa_note_reg_clobber (regno);
2516 if (deps_init_id_data.where == DEPS_IN_RHS)
2517 deps_init_id_data.force_use_p = true;
2519 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2520 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2523 /* Note a use of REGNO. */
2524 static void
2525 deps_init_id_note_reg_use (int regno)
2527 haifa_note_reg_use (regno);
2529 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2530 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2533 /* Start initializing rhs data. */
2534 static void
2535 deps_init_id_start_rhs (rtx rhs)
2537 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2539 /* And there was no sel_deps_reset_to_insn (). */
2540 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2542 IDATA_RHS (deps_init_id_data.id) = rhs;
2543 deps_init_id_data.where = DEPS_IN_RHS;
2547 /* Finish initializing rhs data. */
2548 static void
2549 deps_init_id_finish_rhs (void)
2551 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2552 || deps_init_id_data.where == DEPS_IN_INSN);
2553 deps_init_id_data.where = DEPS_IN_INSN;
2556 /* Finish initializing insn data. */
2557 static void
2558 deps_init_id_finish_insn (void)
2560 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2562 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2564 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2565 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2567 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2568 || deps_init_id_data.force_use_p)
2570 /* This should be a USE, as we don't want to schedule its RHS
2571 separately. However, we still want to have them recorded
2572 for the purposes of substitution. That's why we don't
2573 simply call downgrade_to_use () here. */
2574 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2575 gcc_assert (!lhs == !rhs);
2577 IDATA_TYPE (deps_init_id_data.id) = USE;
2581 deps_init_id_data.where = DEPS_IN_NOWHERE;
2584 /* This is dependence info used for initializing insn's data. */
2585 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2587 /* This initializes most of the static part of the above structure. */
2588 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2590 NULL,
2592 deps_init_id_start_insn,
2593 deps_init_id_finish_insn,
2594 deps_init_id_start_lhs,
2595 deps_init_id_finish_lhs,
2596 deps_init_id_start_rhs,
2597 deps_init_id_finish_rhs,
2598 deps_init_id_note_reg_set,
2599 deps_init_id_note_reg_clobber,
2600 deps_init_id_note_reg_use,
2601 NULL, /* note_mem_dep */
2602 NULL, /* note_dep */
2604 0, /* use_cselib */
2605 0, /* use_deps_list */
2606 0 /* generate_spec_deps */
2609 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2610 we don't actually need information about lhs and rhs. */
2611 static void
2612 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2614 rtx pat = PATTERN (insn);
2616 if (NONJUMP_INSN_P (insn)
2617 && GET_CODE (pat) == SET
2618 && !force_unique_p)
2620 IDATA_RHS (id) = SET_SRC (pat);
2621 IDATA_LHS (id) = SET_DEST (pat);
2623 else
2624 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2627 /* Possibly downgrade INSN to USE. */
2628 static void
2629 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2631 bool must_be_use = false;
2632 df_ref def;
2633 rtx lhs = IDATA_LHS (id);
2634 rtx rhs = IDATA_RHS (id);
2636 /* We downgrade only SETs. */
2637 if (IDATA_TYPE (id) != SET)
2638 return;
2640 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2642 IDATA_TYPE (id) = USE;
2643 return;
2646 FOR_EACH_INSN_DEF (def, insn)
2648 if (DF_REF_INSN (def)
2649 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2650 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2652 must_be_use = true;
2653 break;
2656 #ifdef STACK_REGS
2657 /* Make instructions that set stack registers to be ineligible for
2658 renaming to avoid issues with find_used_regs. */
2659 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2661 must_be_use = true;
2662 break;
2664 #endif
2667 if (must_be_use)
2668 IDATA_TYPE (id) = USE;
2671 /* Setup register sets describing INSN in ID. */
2672 static void
2673 setup_id_reg_sets (idata_t id, insn_t insn)
2675 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2676 df_ref def, use;
2677 regset tmp = get_clear_regset_from_pool ();
2679 FOR_EACH_INSN_INFO_DEF (def, insn_info)
2681 unsigned int regno = DF_REF_REGNO (def);
2683 /* Post modifies are treated like clobbers by sched-deps.c. */
2684 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2685 | DF_REF_PRE_POST_MODIFY)))
2686 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2687 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2689 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2691 #ifdef STACK_REGS
2692 /* For stack registers, treat writes to them as writes
2693 to the first one to be consistent with sched-deps.c. */
2694 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2695 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2696 #endif
2698 /* Mark special refs that generate read/write def pair. */
2699 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2700 || regno == STACK_POINTER_REGNUM)
2701 bitmap_set_bit (tmp, regno);
2704 FOR_EACH_INSN_INFO_USE (use, insn_info)
2706 unsigned int regno = DF_REF_REGNO (use);
2708 /* When these refs are met for the first time, skip them, as
2709 these uses are just counterparts of some defs. */
2710 if (bitmap_bit_p (tmp, regno))
2711 bitmap_clear_bit (tmp, regno);
2712 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2714 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2716 #ifdef STACK_REGS
2717 /* For stack registers, treat reads from them as reads from
2718 the first one to be consistent with sched-deps.c. */
2719 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2720 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2721 #endif
2725 return_regset_to_pool (tmp);
2728 /* Initialize instruction data for INSN in ID using DF's data. */
2729 static void
2730 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2732 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2734 setup_id_for_insn (id, insn, force_unique_p);
2735 setup_id_lhs_rhs (id, insn, force_unique_p);
2737 if (INSN_NOP_P (insn))
2738 return;
2740 maybe_downgrade_id_to_use (id, insn);
2741 setup_id_reg_sets (id, insn);
2744 /* Initialize instruction data for INSN in ID. */
2745 static void
2746 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2748 struct deps_desc _dc, *dc = &_dc;
2750 deps_init_id_data.where = DEPS_IN_NOWHERE;
2751 deps_init_id_data.id = id;
2752 deps_init_id_data.force_unique_p = force_unique_p;
2753 deps_init_id_data.force_use_p = false;
2755 init_deps (dc, false);
2757 memcpy (&deps_init_id_sched_deps_info,
2758 &const_deps_init_id_sched_deps_info,
2759 sizeof (deps_init_id_sched_deps_info));
2761 if (spec_info != NULL)
2762 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2764 sched_deps_info = &deps_init_id_sched_deps_info;
2766 deps_analyze_insn (dc, insn);
2768 free_deps (dc);
2770 deps_init_id_data.id = NULL;
2774 struct sched_scan_info_def
2776 /* This hook notifies scheduler frontend to extend its internal per basic
2777 block data structures. This hook should be called once before a series of
2778 calls to bb_init (). */
2779 void (*extend_bb) (void);
2781 /* This hook makes scheduler frontend to initialize its internal data
2782 structures for the passed basic block. */
2783 void (*init_bb) (basic_block);
2785 /* This hook notifies scheduler frontend to extend its internal per insn data
2786 structures. This hook should be called once before a series of calls to
2787 insn_init (). */
2788 void (*extend_insn) (void);
2790 /* This hook makes scheduler frontend to initialize its internal data
2791 structures for the passed insn. */
2792 void (*init_insn) (insn_t);
2795 /* A driver function to add a set of basic blocks (BBS) to the
2796 scheduling region. */
2797 static void
2798 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2800 unsigned i;
2801 basic_block bb;
2803 if (ssi->extend_bb)
2804 ssi->extend_bb ();
2806 if (ssi->init_bb)
2807 FOR_EACH_VEC_ELT (bbs, i, bb)
2808 ssi->init_bb (bb);
2810 if (ssi->extend_insn)
2811 ssi->extend_insn ();
2813 if (ssi->init_insn)
2814 FOR_EACH_VEC_ELT (bbs, i, bb)
2816 rtx_insn *insn;
2818 FOR_BB_INSNS (bb, insn)
2819 ssi->init_insn (insn);
2823 /* Implement hooks for collecting fundamental insn properties like if insn is
2824 an ASM or is within a SCHED_GROUP. */
2826 /* True when a "one-time init" data for INSN was already inited. */
2827 static bool
2828 first_time_insn_init (insn_t insn)
2830 return INSN_LIVE (insn) == NULL;
2833 /* Hash an entry in a transformed_insns hashtable. */
2834 static hashval_t
2835 hash_transformed_insns (const void *p)
2837 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2840 /* Compare the entries in a transformed_insns hashtable. */
2841 static int
2842 eq_transformed_insns (const void *p, const void *q)
2844 rtx_insn *i1 =
2845 VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2846 rtx_insn *i2 =
2847 VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2849 if (INSN_UID (i1) == INSN_UID (i2))
2850 return 1;
2851 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2854 /* Free an entry in a transformed_insns hashtable. */
2855 static void
2856 free_transformed_insns (void *p)
2858 struct transformed_insns *pti = (struct transformed_insns *) p;
2860 vinsn_detach (pti->vinsn_old);
2861 vinsn_detach (pti->vinsn_new);
2862 free (pti);
2865 /* Init the s_i_d data for INSN which should be inited just once, when
2866 we first see the insn. */
2867 static void
2868 init_first_time_insn_data (insn_t insn)
2870 /* This should not be set if this is the first time we init data for
2871 insn. */
2872 gcc_assert (first_time_insn_init (insn));
2874 /* These are needed for nops too. */
2875 INSN_LIVE (insn) = get_regset_from_pool ();
2876 INSN_LIVE_VALID_P (insn) = false;
2878 if (!INSN_NOP_P (insn))
2880 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2881 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2882 INSN_TRANSFORMED_INSNS (insn)
2883 = htab_create (16, hash_transformed_insns,
2884 eq_transformed_insns, free_transformed_insns);
2885 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2889 /* Free almost all above data for INSN that is scheduled already.
2890 Used for extra-large basic blocks. */
2891 void
2892 free_data_for_scheduled_insn (insn_t insn)
2894 gcc_assert (! first_time_insn_init (insn));
2896 if (! INSN_ANALYZED_DEPS (insn))
2897 return;
2899 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2900 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2901 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2903 /* This is allocated only for bookkeeping insns. */
2904 if (INSN_ORIGINATORS (insn))
2905 BITMAP_FREE (INSN_ORIGINATORS (insn));
2906 free_deps (&INSN_DEPS_CONTEXT (insn));
2908 INSN_ANALYZED_DEPS (insn) = NULL;
2910 /* Clear the readonly flag so we would ICE when trying to recalculate
2911 the deps context (as we believe that it should not happen). */
2912 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2915 /* Free the same data as above for INSN. */
2916 static void
2917 free_first_time_insn_data (insn_t insn)
2919 gcc_assert (! first_time_insn_init (insn));
2921 free_data_for_scheduled_insn (insn);
2922 return_regset_to_pool (INSN_LIVE (insn));
2923 INSN_LIVE (insn) = NULL;
2924 INSN_LIVE_VALID_P (insn) = false;
2927 /* Initialize region-scope data structures for basic blocks. */
2928 static void
2929 init_global_and_expr_for_bb (basic_block bb)
2931 if (sel_bb_empty_p (bb))
2932 return;
2934 invalidate_av_set (bb);
2937 /* Data for global dependency analysis (to initialize CANT_MOVE and
2938 SCHED_GROUP_P). */
2939 static struct
2941 /* Previous insn. */
2942 insn_t prev_insn;
2943 } init_global_data;
2945 /* Determine if INSN is in the sched_group, is an asm or should not be
2946 cloned. After that initialize its expr. */
2947 static void
2948 init_global_and_expr_for_insn (insn_t insn)
2950 if (LABEL_P (insn))
2951 return;
2953 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2955 init_global_data.prev_insn = NULL;
2956 return;
2959 gcc_assert (INSN_P (insn));
2961 if (SCHED_GROUP_P (insn))
2962 /* Setup a sched_group. */
2964 insn_t prev_insn = init_global_data.prev_insn;
2966 if (prev_insn)
2967 INSN_SCHED_NEXT (prev_insn) = insn;
2969 init_global_data.prev_insn = insn;
2971 else
2972 init_global_data.prev_insn = NULL;
2974 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2975 || asm_noperands (PATTERN (insn)) >= 0)
2976 /* Mark INSN as an asm. */
2977 INSN_ASM_P (insn) = true;
2980 bool force_unique_p;
2981 ds_t spec_done_ds;
2983 /* Certain instructions cannot be cloned, and frame related insns and
2984 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2985 their block. */
2986 if (prologue_epilogue_contains (insn))
2988 if (RTX_FRAME_RELATED_P (insn))
2989 CANT_MOVE (insn) = 1;
2990 else
2992 rtx note;
2993 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2994 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2995 && ((enum insn_note) INTVAL (XEXP (note, 0))
2996 == NOTE_INSN_EPILOGUE_BEG))
2998 CANT_MOVE (insn) = 1;
2999 break;
3002 force_unique_p = true;
3004 else
3005 if (CANT_MOVE (insn)
3006 || INSN_ASM_P (insn)
3007 || SCHED_GROUP_P (insn)
3008 || CALL_P (insn)
3009 /* Exception handling insns are always unique. */
3010 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
3011 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
3012 || control_flow_insn_p (insn)
3013 || volatile_insn_p (PATTERN (insn))
3014 || (targetm.cannot_copy_insn_p
3015 && targetm.cannot_copy_insn_p (insn)))
3016 force_unique_p = true;
3017 else
3018 force_unique_p = false;
3020 if (targetm.sched.get_insn_spec_ds)
3022 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3023 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3025 else
3026 spec_done_ds = 0;
3028 /* Initialize INSN's expr. */
3029 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3030 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3031 spec_done_ds, 0, 0, vNULL, true,
3032 false, false, false, CANT_MOVE (insn));
3035 init_first_time_insn_data (insn);
3038 /* Scan the region and initialize instruction data for basic blocks BBS. */
3039 void
3040 sel_init_global_and_expr (bb_vec_t bbs)
3042 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3043 const struct sched_scan_info_def ssi =
3045 NULL, /* extend_bb */
3046 init_global_and_expr_for_bb, /* init_bb */
3047 extend_insn_data, /* extend_insn */
3048 init_global_and_expr_for_insn /* init_insn */
3051 sched_scan (&ssi, bbs);
3054 /* Finalize region-scope data structures for basic blocks. */
3055 static void
3056 finish_global_and_expr_for_bb (basic_block bb)
3058 av_set_clear (&BB_AV_SET (bb));
3059 BB_AV_LEVEL (bb) = 0;
3062 /* Finalize INSN's data. */
3063 static void
3064 finish_global_and_expr_insn (insn_t insn)
3066 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3067 return;
3069 gcc_assert (INSN_P (insn));
3071 if (INSN_LUID (insn) > 0)
3073 free_first_time_insn_data (insn);
3074 INSN_WS_LEVEL (insn) = 0;
3075 CANT_MOVE (insn) = 0;
3077 /* We can no longer assert this, as vinsns of this insn could be
3078 easily live in other insn's caches. This should be changed to
3079 a counter-like approach among all vinsns. */
3080 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3081 clear_expr (INSN_EXPR (insn));
3085 /* Finalize per instruction data for the whole region. */
3086 void
3087 sel_finish_global_and_expr (void)
3090 bb_vec_t bbs;
3091 int i;
3093 bbs.create (current_nr_blocks);
3095 for (i = 0; i < current_nr_blocks; i++)
3096 bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)));
3098 /* Clear AV_SETs and INSN_EXPRs. */
3100 const struct sched_scan_info_def ssi =
3102 NULL, /* extend_bb */
3103 finish_global_and_expr_for_bb, /* init_bb */
3104 NULL, /* extend_insn */
3105 finish_global_and_expr_insn /* init_insn */
3108 sched_scan (&ssi, bbs);
3111 bbs.release ();
3114 finish_insns ();
3118 /* In the below hooks, we merely calculate whether or not a dependence
3119 exists, and in what part of insn. However, we will need more data
3120 when we'll start caching dependence requests. */
3122 /* Container to hold information for dependency analysis. */
3123 static struct
3125 deps_t dc;
3127 /* A variable to track which part of rtx we are scanning in
3128 sched-deps.c: sched_analyze_insn (). */
3129 deps_where_t where;
3131 /* Current producer. */
3132 insn_t pro;
3134 /* Current consumer. */
3135 vinsn_t con;
3137 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3138 X is from { INSN, LHS, RHS }. */
3139 ds_t has_dep_p[DEPS_IN_NOWHERE];
3140 } has_dependence_data;
3142 /* Start analyzing dependencies of INSN. */
3143 static void
3144 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3146 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3148 has_dependence_data.where = DEPS_IN_INSN;
3151 /* Finish analyzing dependencies of an insn. */
3152 static void
3153 has_dependence_finish_insn (void)
3155 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3157 has_dependence_data.where = DEPS_IN_NOWHERE;
3160 /* Start analyzing dependencies of LHS. */
3161 static void
3162 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3164 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3166 if (VINSN_LHS (has_dependence_data.con) != NULL)
3167 has_dependence_data.where = DEPS_IN_LHS;
3170 /* Finish analyzing dependencies of an lhs. */
3171 static void
3172 has_dependence_finish_lhs (void)
3174 has_dependence_data.where = DEPS_IN_INSN;
3177 /* Start analyzing dependencies of RHS. */
3178 static void
3179 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3181 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3183 if (VINSN_RHS (has_dependence_data.con) != NULL)
3184 has_dependence_data.where = DEPS_IN_RHS;
3187 /* Start analyzing dependencies of an rhs. */
3188 static void
3189 has_dependence_finish_rhs (void)
3191 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3192 || has_dependence_data.where == DEPS_IN_INSN);
3194 has_dependence_data.where = DEPS_IN_INSN;
3197 /* Note a set of REGNO. */
3198 static void
3199 has_dependence_note_reg_set (int regno)
3201 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3203 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3204 VINSN_INSN_RTX
3205 (has_dependence_data.con)))
3207 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3209 if (reg_last->sets != NULL
3210 || reg_last->clobbers != NULL)
3211 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3213 if (reg_last->uses || reg_last->implicit_sets)
3214 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3218 /* Note a clobber of REGNO. */
3219 static void
3220 has_dependence_note_reg_clobber (int regno)
3222 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3224 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3225 VINSN_INSN_RTX
3226 (has_dependence_data.con)))
3228 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3230 if (reg_last->sets)
3231 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3233 if (reg_last->uses || reg_last->implicit_sets)
3234 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3238 /* Note a use of REGNO. */
3239 static void
3240 has_dependence_note_reg_use (int regno)
3242 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3244 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3245 VINSN_INSN_RTX
3246 (has_dependence_data.con)))
3248 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3250 if (reg_last->sets)
3251 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3253 if (reg_last->clobbers || reg_last->implicit_sets)
3254 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3256 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3257 is actually a check insn. We need to do this for any register
3258 read-read dependency with the check unless we track properly
3259 all registers written by BE_IN_SPEC-speculated insns, as
3260 we don't have explicit dependence lists. See PR 53975. */
3261 if (reg_last->uses)
3263 ds_t pro_spec_checked_ds;
3265 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3266 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3268 if (pro_spec_checked_ds != 0)
3269 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3270 NULL_RTX, NULL_RTX);
3275 /* Note a memory dependence. */
3276 static void
3277 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3278 rtx pending_mem ATTRIBUTE_UNUSED,
3279 insn_t pending_insn ATTRIBUTE_UNUSED,
3280 ds_t ds ATTRIBUTE_UNUSED)
3282 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3283 VINSN_INSN_RTX (has_dependence_data.con)))
3285 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3287 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3291 /* Note a dependence. */
3292 static void
3293 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3294 ds_t ds ATTRIBUTE_UNUSED)
3296 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3297 VINSN_INSN_RTX (has_dependence_data.con)))
3299 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3301 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3305 /* Mark the insn as having a hard dependence that prevents speculation. */
3306 void
3307 sel_mark_hard_insn (rtx insn)
3309 int i;
3311 /* Only work when we're in has_dependence_p mode.
3312 ??? This is a hack, this should actually be a hook. */
3313 if (!has_dependence_data.dc || !has_dependence_data.pro)
3314 return;
3316 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3317 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3319 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3320 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3323 /* This structure holds the hooks for the dependency analysis used when
3324 actually processing dependencies in the scheduler. */
3325 static struct sched_deps_info_def has_dependence_sched_deps_info;
3327 /* This initializes most of the fields of the above structure. */
3328 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3330 NULL,
3332 has_dependence_start_insn,
3333 has_dependence_finish_insn,
3334 has_dependence_start_lhs,
3335 has_dependence_finish_lhs,
3336 has_dependence_start_rhs,
3337 has_dependence_finish_rhs,
3338 has_dependence_note_reg_set,
3339 has_dependence_note_reg_clobber,
3340 has_dependence_note_reg_use,
3341 has_dependence_note_mem_dep,
3342 has_dependence_note_dep,
3344 0, /* use_cselib */
3345 0, /* use_deps_list */
3346 0 /* generate_spec_deps */
3349 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3350 static void
3351 setup_has_dependence_sched_deps_info (void)
3353 memcpy (&has_dependence_sched_deps_info,
3354 &const_has_dependence_sched_deps_info,
3355 sizeof (has_dependence_sched_deps_info));
3357 if (spec_info != NULL)
3358 has_dependence_sched_deps_info.generate_spec_deps = 1;
3360 sched_deps_info = &has_dependence_sched_deps_info;
3363 /* Remove all dependences found and recorded in has_dependence_data array. */
3364 void
3365 sel_clear_has_dependence (void)
3367 int i;
3369 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3370 has_dependence_data.has_dep_p[i] = 0;
3373 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3374 to the dependence information array in HAS_DEP_PP. */
3375 ds_t
3376 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3378 int i;
3379 ds_t ds;
3380 struct deps_desc *dc;
3382 if (INSN_SIMPLEJUMP_P (pred))
3383 /* Unconditional jump is just a transfer of control flow.
3384 Ignore it. */
3385 return false;
3387 dc = &INSN_DEPS_CONTEXT (pred);
3389 /* We init this field lazily. */
3390 if (dc->reg_last == NULL)
3391 init_deps_reg_last (dc);
3393 if (!dc->readonly)
3395 has_dependence_data.pro = NULL;
3396 /* Initialize empty dep context with information about PRED. */
3397 advance_deps_context (dc, pred);
3398 dc->readonly = 1;
3401 has_dependence_data.where = DEPS_IN_NOWHERE;
3402 has_dependence_data.pro = pred;
3403 has_dependence_data.con = EXPR_VINSN (expr);
3404 has_dependence_data.dc = dc;
3406 sel_clear_has_dependence ();
3408 /* Now catch all dependencies that would be generated between PRED and
3409 INSN. */
3410 setup_has_dependence_sched_deps_info ();
3411 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3412 has_dependence_data.dc = NULL;
3414 /* When a barrier was found, set DEPS_IN_INSN bits. */
3415 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3416 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3417 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3418 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3420 /* Do not allow stores to memory to move through checks. Currently
3421 we don't move this to sched-deps.c as the check doesn't have
3422 obvious places to which this dependence can be attached.
3423 FIMXE: this should go to a hook. */
3424 if (EXPR_LHS (expr)
3425 && MEM_P (EXPR_LHS (expr))
3426 && sel_insn_is_speculation_check (pred))
3427 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3429 *has_dep_pp = has_dependence_data.has_dep_p;
3430 ds = 0;
3431 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3432 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3433 NULL_RTX, NULL_RTX);
3435 return ds;
3439 /* Dependence hooks implementation that checks dependence latency constraints
3440 on the insns being scheduled. The entry point for these routines is
3441 tick_check_p predicate. */
3443 static struct
3445 /* An expr we are currently checking. */
3446 expr_t expr;
3448 /* A minimal cycle for its scheduling. */
3449 int cycle;
3451 /* Whether we have seen a true dependence while checking. */
3452 bool seen_true_dep_p;
3453 } tick_check_data;
3455 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3456 on PRO with status DS and weight DW. */
3457 static void
3458 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3460 expr_t con_expr = tick_check_data.expr;
3461 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3463 if (con_insn != pro_insn)
3465 enum reg_note dt;
3466 int tick;
3468 if (/* PROducer was removed from above due to pipelining. */
3469 !INSN_IN_STREAM_P (pro_insn)
3470 /* Or PROducer was originally on the next iteration regarding the
3471 CONsumer. */
3472 || (INSN_SCHED_TIMES (pro_insn)
3473 - EXPR_SCHED_TIMES (con_expr)) > 1)
3474 /* Don't count this dependence. */
3475 return;
3477 dt = ds_to_dt (ds);
3478 if (dt == REG_DEP_TRUE)
3479 tick_check_data.seen_true_dep_p = true;
3481 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3484 dep_def _dep, *dep = &_dep;
3486 init_dep (dep, pro_insn, con_insn, dt);
3488 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3491 /* When there are several kinds of dependencies between pro and con,
3492 only REG_DEP_TRUE should be taken into account. */
3493 if (tick > tick_check_data.cycle
3494 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3495 tick_check_data.cycle = tick;
3499 /* An implementation of note_dep hook. */
3500 static void
3501 tick_check_note_dep (insn_t pro, ds_t ds)
3503 tick_check_dep_with_dw (pro, ds, 0);
3506 /* An implementation of note_mem_dep hook. */
3507 static void
3508 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3510 dw_t dw;
3512 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3513 ? estimate_dep_weak (mem1, mem2)
3514 : 0);
3516 tick_check_dep_with_dw (pro, ds, dw);
3519 /* This structure contains hooks for dependence analysis used when determining
3520 whether an insn is ready for scheduling. */
3521 static struct sched_deps_info_def tick_check_sched_deps_info =
3523 NULL,
3525 NULL,
3526 NULL,
3527 NULL,
3528 NULL,
3529 NULL,
3530 NULL,
3531 haifa_note_reg_set,
3532 haifa_note_reg_clobber,
3533 haifa_note_reg_use,
3534 tick_check_note_mem_dep,
3535 tick_check_note_dep,
3537 0, 0, 0
3540 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3541 scheduled. Return 0 if all data from producers in DC is ready. */
3543 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3545 int cycles_left;
3546 /* Initialize variables. */
3547 tick_check_data.expr = expr;
3548 tick_check_data.cycle = 0;
3549 tick_check_data.seen_true_dep_p = false;
3550 sched_deps_info = &tick_check_sched_deps_info;
3552 gcc_assert (!dc->readonly);
3553 dc->readonly = 1;
3554 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3555 dc->readonly = 0;
3557 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3559 return cycles_left >= 0 ? cycles_left : 0;
3563 /* Functions to work with insns. */
3565 /* Returns true if LHS of INSN is the same as DEST of an insn
3566 being moved. */
3567 bool
3568 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3570 rtx lhs = INSN_LHS (insn);
3572 if (lhs == NULL || dest == NULL)
3573 return false;
3575 return rtx_equal_p (lhs, dest);
3578 /* Return s_i_d entry of INSN. Callable from debugger. */
3579 sel_insn_data_def
3580 insn_sid (insn_t insn)
3582 return *SID (insn);
3585 /* True when INSN is a speculative check. We can tell this by looking
3586 at the data structures of the selective scheduler, not by examining
3587 the pattern. */
3588 bool
3589 sel_insn_is_speculation_check (rtx insn)
3591 return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3594 /* Extracts machine mode MODE and destination location DST_LOC
3595 for given INSN. */
3596 void
3597 get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode)
3599 rtx pat = PATTERN (insn);
3601 gcc_assert (dst_loc);
3602 gcc_assert (GET_CODE (pat) == SET);
3604 *dst_loc = SET_DEST (pat);
3606 gcc_assert (*dst_loc);
3607 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3609 if (mode)
3610 *mode = GET_MODE (*dst_loc);
3613 /* Returns true when moving through JUMP will result in bookkeeping
3614 creation. */
3615 bool
3616 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3618 insn_t succ;
3619 succ_iterator si;
3621 FOR_EACH_SUCC (succ, si, jump)
3622 if (sel_num_cfg_preds_gt_1 (succ))
3623 return true;
3625 return false;
3628 /* Return 'true' if INSN is the only one in its basic block. */
3629 static bool
3630 insn_is_the_only_one_in_bb_p (insn_t insn)
3632 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3635 #ifdef ENABLE_CHECKING
3636 /* Check that the region we're scheduling still has at most one
3637 backedge. */
3638 static void
3639 verify_backedges (void)
3641 if (pipelining_p)
3643 int i, n = 0;
3644 edge e;
3645 edge_iterator ei;
3647 for (i = 0; i < current_nr_blocks; i++)
3648 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs)
3649 if (in_current_region_p (e->dest)
3650 && BLOCK_TO_BB (e->dest->index) < i)
3651 n++;
3653 gcc_assert (n <= 1);
3656 #endif
3659 /* Functions to work with control flow. */
3661 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3662 are sorted in topological order (it might have been invalidated by
3663 redirecting an edge). */
3664 static void
3665 sel_recompute_toporder (void)
3667 int i, n, rgn;
3668 int *postorder, n_blocks;
3670 postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun));
3671 n_blocks = post_order_compute (postorder, false, false);
3673 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3674 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3675 if (CONTAINING_RGN (postorder[i]) == rgn)
3677 BLOCK_TO_BB (postorder[i]) = n;
3678 BB_TO_BLOCK (n) = postorder[i];
3679 n++;
3682 /* Assert that we updated info for all blocks. We may miss some blocks if
3683 this function is called when redirecting an edge made a block
3684 unreachable, but that block is not deleted yet. */
3685 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3688 /* Tidy the possibly empty block BB. */
3689 static bool
3690 maybe_tidy_empty_bb (basic_block bb)
3692 basic_block succ_bb, pred_bb, note_bb;
3693 vec<basic_block> dom_bbs;
3694 edge e;
3695 edge_iterator ei;
3696 bool rescan_p;
3698 /* Keep empty bb only if this block immediately precedes EXIT and
3699 has incoming non-fallthrough edge, or it has no predecessors or
3700 successors. Otherwise remove it. */
3701 if (!sel_bb_empty_p (bb)
3702 || (single_succ_p (bb)
3703 && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
3704 && (!single_pred_p (bb)
3705 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3706 || EDGE_COUNT (bb->preds) == 0
3707 || EDGE_COUNT (bb->succs) == 0)
3708 return false;
3710 /* Do not attempt to redirect complex edges. */
3711 FOR_EACH_EDGE (e, ei, bb->preds)
3712 if (e->flags & EDGE_COMPLEX)
3713 return false;
3714 else if (e->flags & EDGE_FALLTHRU)
3716 rtx note;
3717 /* If prev bb ends with asm goto, see if any of the
3718 ASM_OPERANDS_LABELs don't point to the fallthru
3719 label. Do not attempt to redirect it in that case. */
3720 if (JUMP_P (BB_END (e->src))
3721 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3723 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3725 for (i = 0; i < n; ++i)
3726 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3727 return false;
3731 free_data_sets (bb);
3733 /* Do not delete BB if it has more than one successor.
3734 That can occur when we moving a jump. */
3735 if (!single_succ_p (bb))
3737 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3738 sel_merge_blocks (bb->prev_bb, bb);
3739 return true;
3742 succ_bb = single_succ (bb);
3743 rescan_p = true;
3744 pred_bb = NULL;
3745 dom_bbs.create (0);
3747 /* Save a pred/succ from the current region to attach the notes to. */
3748 note_bb = NULL;
3749 FOR_EACH_EDGE (e, ei, bb->preds)
3750 if (in_current_region_p (e->src))
3752 note_bb = e->src;
3753 break;
3755 if (note_bb == NULL)
3756 note_bb = succ_bb;
3758 /* Redirect all non-fallthru edges to the next bb. */
3759 while (rescan_p)
3761 rescan_p = false;
3763 FOR_EACH_EDGE (e, ei, bb->preds)
3765 pred_bb = e->src;
3767 if (!(e->flags & EDGE_FALLTHRU))
3769 /* We can not invalidate computed topological order by moving
3770 the edge destination block (E->SUCC) along a fallthru edge.
3772 We will update dominators here only when we'll get
3773 an unreachable block when redirecting, otherwise
3774 sel_redirect_edge_and_branch will take care of it. */
3775 if (e->dest != bb
3776 && single_pred_p (e->dest))
3777 dom_bbs.safe_push (e->dest);
3778 sel_redirect_edge_and_branch (e, succ_bb);
3779 rescan_p = true;
3780 break;
3782 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3783 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3784 still have to adjust it. */
3785 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3787 /* If possible, try to remove the unneeded conditional jump. */
3788 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3789 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3791 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3792 tidy_fallthru_edge (e);
3794 else
3795 sel_redirect_edge_and_branch (e, succ_bb);
3796 rescan_p = true;
3797 break;
3802 if (can_merge_blocks_p (bb->prev_bb, bb))
3803 sel_merge_blocks (bb->prev_bb, bb);
3804 else
3806 /* This is a block without fallthru predecessor. Just delete it. */
3807 gcc_assert (note_bb);
3808 move_bb_info (note_bb, bb);
3809 remove_empty_bb (bb, true);
3812 if (!dom_bbs.is_empty ())
3814 dom_bbs.safe_push (succ_bb);
3815 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3816 dom_bbs.release ();
3819 return true;
3822 /* Tidy the control flow after we have removed original insn from
3823 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3824 is true, also try to optimize control flow on non-empty blocks. */
3825 bool
3826 tidy_control_flow (basic_block xbb, bool full_tidying)
3828 bool changed = true;
3829 insn_t first, last;
3831 /* First check whether XBB is empty. */
3832 changed = maybe_tidy_empty_bb (xbb);
3833 if (changed || !full_tidying)
3834 return changed;
3836 /* Check if there is a unnecessary jump after insn left. */
3837 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3838 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3839 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3841 if (sel_remove_insn (BB_END (xbb), false, false))
3842 return true;
3843 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3846 first = sel_bb_head (xbb);
3847 last = sel_bb_end (xbb);
3848 if (MAY_HAVE_DEBUG_INSNS)
3850 if (first != last && DEBUG_INSN_P (first))
3852 first = NEXT_INSN (first);
3853 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3855 if (first != last && DEBUG_INSN_P (last))
3857 last = PREV_INSN (last);
3858 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3860 /* Check if there is an unnecessary jump in previous basic block leading
3861 to next basic block left after removing INSN from stream.
3862 If it is so, remove that jump and redirect edge to current
3863 basic block (where there was INSN before deletion). This way
3864 when NOP will be deleted several instructions later with its
3865 basic block we will not get a jump to next instruction, which
3866 can be harmful. */
3867 if (first == last
3868 && !sel_bb_empty_p (xbb)
3869 && INSN_NOP_P (last)
3870 /* Flow goes fallthru from current block to the next. */
3871 && EDGE_COUNT (xbb->succs) == 1
3872 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3873 /* When successor is an EXIT block, it may not be the next block. */
3874 && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun)
3875 /* And unconditional jump in previous basic block leads to
3876 next basic block of XBB and this jump can be safely removed. */
3877 && in_current_region_p (xbb->prev_bb)
3878 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3879 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3880 /* Also this jump is not at the scheduling boundary. */
3881 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3883 bool recompute_toporder_p;
3884 /* Clear data structures of jump - jump itself will be removed
3885 by sel_redirect_edge_and_branch. */
3886 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3887 recompute_toporder_p
3888 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3890 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3892 /* It can turn out that after removing unused jump, basic block
3893 that contained that jump, becomes empty too. In such case
3894 remove it too. */
3895 if (sel_bb_empty_p (xbb->prev_bb))
3896 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3897 if (recompute_toporder_p)
3898 sel_recompute_toporder ();
3901 #ifdef ENABLE_CHECKING
3902 verify_backedges ();
3903 verify_dominators (CDI_DOMINATORS);
3904 #endif
3906 return changed;
3909 /* Purge meaningless empty blocks in the middle of a region. */
3910 void
3911 purge_empty_blocks (void)
3913 int i;
3915 /* Do not attempt to delete the first basic block in the region. */
3916 for (i = 1; i < current_nr_blocks; )
3918 basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
3920 if (maybe_tidy_empty_bb (b))
3921 continue;
3923 i++;
3927 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3928 do not delete insn's data, because it will be later re-emitted.
3929 Return true if we have removed some blocks afterwards. */
3930 bool
3931 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3933 basic_block bb = BLOCK_FOR_INSN (insn);
3935 gcc_assert (INSN_IN_STREAM_P (insn));
3937 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3939 expr_t expr;
3940 av_set_iterator i;
3942 /* When we remove a debug insn that is head of a BB, it remains
3943 in the AV_SET of the block, but it shouldn't. */
3944 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3945 if (EXPR_INSN_RTX (expr) == insn)
3947 av_set_iter_remove (&i);
3948 break;
3952 if (only_disconnect)
3953 remove_insn (insn);
3954 else
3956 delete_insn (insn);
3957 clear_expr (INSN_EXPR (insn));
3960 /* It is necessary to NULL these fields in case we are going to re-insert
3961 INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT
3962 case, but also for NOPs that we will return to the nop pool. */
3963 SET_PREV_INSN (insn) = NULL_RTX;
3964 SET_NEXT_INSN (insn) = NULL_RTX;
3965 set_block_for_insn (insn, NULL);
3967 return tidy_control_flow (bb, full_tidying);
3970 /* Estimate number of the insns in BB. */
3971 static int
3972 sel_estimate_number_of_insns (basic_block bb)
3974 int res = 0;
3975 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3977 for (; insn != next_tail; insn = NEXT_INSN (insn))
3978 if (NONDEBUG_INSN_P (insn))
3979 res++;
3981 return res;
3984 /* We don't need separate luids for notes or labels. */
3985 static int
3986 sel_luid_for_non_insn (rtx x)
3988 gcc_assert (NOTE_P (x) || LABEL_P (x));
3990 return -1;
3993 /* Find the proper seqno for inserting at INSN by successors.
3994 Return -1 if no successors with positive seqno exist. */
3995 static int
3996 get_seqno_by_succs (rtx_insn *insn)
3998 basic_block bb = BLOCK_FOR_INSN (insn);
3999 rtx_insn *tmp = insn, *end = BB_END (bb);
4000 int seqno;
4001 insn_t succ = NULL;
4002 succ_iterator si;
4004 while (tmp != end)
4006 tmp = NEXT_INSN (tmp);
4007 if (INSN_P (tmp))
4008 return INSN_SEQNO (tmp);
4011 seqno = INT_MAX;
4013 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4014 if (INSN_SEQNO (succ) > 0)
4015 seqno = MIN (seqno, INSN_SEQNO (succ));
4017 if (seqno == INT_MAX)
4018 return -1;
4020 return seqno;
4023 /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute
4024 seqno in corner cases. */
4025 static int
4026 get_seqno_for_a_jump (insn_t insn, int old_seqno)
4028 int seqno;
4030 gcc_assert (INSN_SIMPLEJUMP_P (insn));
4032 if (!sel_bb_head_p (insn))
4033 seqno = INSN_SEQNO (PREV_INSN (insn));
4034 else
4036 basic_block bb = BLOCK_FOR_INSN (insn);
4038 if (single_pred_p (bb)
4039 && !in_current_region_p (single_pred (bb)))
4041 /* We can have preds outside a region when splitting edges
4042 for pipelining of an outer loop. Use succ instead.
4043 There should be only one of them. */
4044 insn_t succ = NULL;
4045 succ_iterator si;
4046 bool first = true;
4048 gcc_assert (flag_sel_sched_pipelining_outer_loops
4049 && current_loop_nest);
4050 FOR_EACH_SUCC_1 (succ, si, insn,
4051 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4053 gcc_assert (first);
4054 first = false;
4057 gcc_assert (succ != NULL);
4058 seqno = INSN_SEQNO (succ);
4060 else
4062 insn_t *preds;
4063 int n;
4065 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4067 gcc_assert (n > 0);
4068 /* For one predecessor, use simple method. */
4069 if (n == 1)
4070 seqno = INSN_SEQNO (preds[0]);
4071 else
4072 seqno = get_seqno_by_preds (insn);
4074 free (preds);
4078 /* We were unable to find a good seqno among preds. */
4079 if (seqno < 0)
4080 seqno = get_seqno_by_succs (insn);
4082 if (seqno < 0)
4084 /* The only case where this could be here legally is that the only
4085 unscheduled insn was a conditional jump that got removed and turned
4086 into this unconditional one. Initialize from the old seqno
4087 of that jump passed down to here. */
4088 seqno = old_seqno;
4091 gcc_assert (seqno >= 0);
4092 return seqno;
4095 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4096 with positive seqno exist. */
4098 get_seqno_by_preds (rtx_insn *insn)
4100 basic_block bb = BLOCK_FOR_INSN (insn);
4101 rtx_insn *tmp = insn, *head = BB_HEAD (bb);
4102 insn_t *preds;
4103 int n, i, seqno;
4105 while (tmp != head)
4107 tmp = PREV_INSN (tmp);
4108 if (INSN_P (tmp))
4109 return INSN_SEQNO (tmp);
4112 cfg_preds (bb, &preds, &n);
4113 for (i = 0, seqno = -1; i < n; i++)
4114 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4116 return seqno;
4121 /* Extend pass-scope data structures for basic blocks. */
4122 void
4123 sel_extend_global_bb_info (void)
4125 sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4128 /* Extend region-scope data structures for basic blocks. */
4129 static void
4130 extend_region_bb_info (void)
4132 sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
4135 /* Extend all data structures to fit for all basic blocks. */
4136 static void
4137 extend_bb_info (void)
4139 sel_extend_global_bb_info ();
4140 extend_region_bb_info ();
4143 /* Finalize pass-scope data structures for basic blocks. */
4144 void
4145 sel_finish_global_bb_info (void)
4147 sel_global_bb_info.release ();
4150 /* Finalize region-scope data structures for basic blocks. */
4151 static void
4152 finish_region_bb_info (void)
4154 sel_region_bb_info.release ();
4158 /* Data for each insn in current region. */
4159 vec<sel_insn_data_def> s_i_d = vNULL;
4161 /* Extend data structures for insns from current region. */
4162 static void
4163 extend_insn_data (void)
4165 int reserve;
4167 sched_extend_target ();
4168 sched_deps_init (false);
4170 /* Extend data structures for insns from current region. */
4171 reserve = (sched_max_luid + 1 - s_i_d.length ());
4172 if (reserve > 0 && ! s_i_d.space (reserve))
4174 int size;
4176 if (sched_max_luid / 2 > 1024)
4177 size = sched_max_luid + 1024;
4178 else
4179 size = 3 * sched_max_luid / 2;
4182 s_i_d.safe_grow_cleared (size);
4186 /* Finalize data structures for insns from current region. */
4187 static void
4188 finish_insns (void)
4190 unsigned i;
4192 /* Clear here all dependence contexts that may have left from insns that were
4193 removed during the scheduling. */
4194 for (i = 0; i < s_i_d.length (); i++)
4196 sel_insn_data_def *sid_entry = &s_i_d[i];
4198 if (sid_entry->live)
4199 return_regset_to_pool (sid_entry->live);
4200 if (sid_entry->analyzed_deps)
4202 BITMAP_FREE (sid_entry->analyzed_deps);
4203 BITMAP_FREE (sid_entry->found_deps);
4204 htab_delete (sid_entry->transformed_insns);
4205 free_deps (&sid_entry->deps_context);
4207 if (EXPR_VINSN (&sid_entry->expr))
4209 clear_expr (&sid_entry->expr);
4211 /* Also, clear CANT_MOVE bit here, because we really don't want it
4212 to be passed to the next region. */
4213 CANT_MOVE_BY_LUID (i) = 0;
4217 s_i_d.release ();
4220 /* A proxy to pass initialization data to init_insn (). */
4221 static sel_insn_data_def _insn_init_ssid;
4222 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4224 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4225 static bool insn_init_create_new_vinsn_p;
4227 /* Set all necessary data for initialization of the new insn[s]. */
4228 static expr_t
4229 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4231 expr_t x = &insn_init_ssid->expr;
4233 copy_expr_onside (x, expr);
4234 if (vi != NULL)
4236 insn_init_create_new_vinsn_p = false;
4237 change_vinsn_in_expr (x, vi);
4239 else
4240 insn_init_create_new_vinsn_p = true;
4242 insn_init_ssid->seqno = seqno;
4243 return x;
4246 /* Init data for INSN. */
4247 static void
4248 init_insn_data (insn_t insn)
4250 expr_t expr;
4251 sel_insn_data_t ssid = insn_init_ssid;
4253 /* The fields mentioned below are special and hence are not being
4254 propagated to the new insns. */
4255 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4256 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4257 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4259 expr = INSN_EXPR (insn);
4260 copy_expr (expr, &ssid->expr);
4261 prepare_insn_expr (insn, ssid->seqno);
4263 if (insn_init_create_new_vinsn_p)
4264 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4266 if (first_time_insn_init (insn))
4267 init_first_time_insn_data (insn);
4270 /* This is used to initialize spurious jumps generated by
4271 sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos
4272 in corner cases within get_seqno_for_a_jump. */
4273 static void
4274 init_simplejump_data (insn_t insn, int old_seqno)
4276 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4277 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4278 vNULL, true, false, false,
4279 false, true);
4280 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno);
4281 init_first_time_insn_data (insn);
4284 /* Perform deferred initialization of insns. This is used to process
4285 a new jump that may be created by redirect_edge. OLD_SEQNO is used
4286 for initializing simplejumps in init_simplejump_data. */
4287 static void
4288 sel_init_new_insn (insn_t insn, int flags, int old_seqno)
4290 /* We create data structures for bb when the first insn is emitted in it. */
4291 if (INSN_P (insn)
4292 && INSN_IN_STREAM_P (insn)
4293 && insn_is_the_only_one_in_bb_p (insn))
4295 extend_bb_info ();
4296 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4299 if (flags & INSN_INIT_TODO_LUID)
4301 sched_extend_luids ();
4302 sched_init_insn_luid (insn);
4305 if (flags & INSN_INIT_TODO_SSID)
4307 extend_insn_data ();
4308 init_insn_data (insn);
4309 clear_expr (&insn_init_ssid->expr);
4312 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4314 extend_insn_data ();
4315 init_simplejump_data (insn, old_seqno);
4318 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4319 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4323 /* Functions to init/finish work with lv sets. */
4325 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4326 static void
4327 init_lv_set (basic_block bb)
4329 gcc_assert (!BB_LV_SET_VALID_P (bb));
4331 BB_LV_SET (bb) = get_regset_from_pool ();
4332 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4333 BB_LV_SET_VALID_P (bb) = true;
4336 /* Copy liveness information to BB from FROM_BB. */
4337 static void
4338 copy_lv_set_from (basic_block bb, basic_block from_bb)
4340 gcc_assert (!BB_LV_SET_VALID_P (bb));
4342 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4343 BB_LV_SET_VALID_P (bb) = true;
4346 /* Initialize lv set of all bb headers. */
4347 void
4348 init_lv_sets (void)
4350 basic_block bb;
4352 /* Initialize of LV sets. */
4353 FOR_EACH_BB_FN (bb, cfun)
4354 init_lv_set (bb);
4356 /* Don't forget EXIT_BLOCK. */
4357 init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4360 /* Release lv set of HEAD. */
4361 static void
4362 free_lv_set (basic_block bb)
4364 gcc_assert (BB_LV_SET (bb) != NULL);
4366 return_regset_to_pool (BB_LV_SET (bb));
4367 BB_LV_SET (bb) = NULL;
4368 BB_LV_SET_VALID_P (bb) = false;
4371 /* Finalize lv sets of all bb headers. */
4372 void
4373 free_lv_sets (void)
4375 basic_block bb;
4377 /* Don't forget EXIT_BLOCK. */
4378 free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun));
4380 /* Free LV sets. */
4381 FOR_EACH_BB_FN (bb, cfun)
4382 if (BB_LV_SET (bb))
4383 free_lv_set (bb);
4386 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4387 compute_av() processes BB. This function is called when creating new basic
4388 blocks, as well as for blocks (either new or existing) where new jumps are
4389 created when the control flow is being updated. */
4390 static void
4391 invalidate_av_set (basic_block bb)
4393 BB_AV_LEVEL (bb) = -1;
4396 /* Create initial data sets for BB (they will be invalid). */
4397 static void
4398 create_initial_data_sets (basic_block bb)
4400 if (BB_LV_SET (bb))
4401 BB_LV_SET_VALID_P (bb) = false;
4402 else
4403 BB_LV_SET (bb) = get_regset_from_pool ();
4404 invalidate_av_set (bb);
4407 /* Free av set of BB. */
4408 static void
4409 free_av_set (basic_block bb)
4411 av_set_clear (&BB_AV_SET (bb));
4412 BB_AV_LEVEL (bb) = 0;
4415 /* Free data sets of BB. */
4416 void
4417 free_data_sets (basic_block bb)
4419 free_lv_set (bb);
4420 free_av_set (bb);
4423 /* Exchange lv sets of TO and FROM. */
4424 static void
4425 exchange_lv_sets (basic_block to, basic_block from)
4428 regset to_lv_set = BB_LV_SET (to);
4430 BB_LV_SET (to) = BB_LV_SET (from);
4431 BB_LV_SET (from) = to_lv_set;
4435 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4437 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4438 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4443 /* Exchange av sets of TO and FROM. */
4444 static void
4445 exchange_av_sets (basic_block to, basic_block from)
4448 av_set_t to_av_set = BB_AV_SET (to);
4450 BB_AV_SET (to) = BB_AV_SET (from);
4451 BB_AV_SET (from) = to_av_set;
4455 int to_av_level = BB_AV_LEVEL (to);
4457 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4458 BB_AV_LEVEL (from) = to_av_level;
4462 /* Exchange data sets of TO and FROM. */
4463 void
4464 exchange_data_sets (basic_block to, basic_block from)
4466 exchange_lv_sets (to, from);
4467 exchange_av_sets (to, from);
4470 /* Copy data sets of FROM to TO. */
4471 void
4472 copy_data_sets (basic_block to, basic_block from)
4474 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4475 gcc_assert (BB_AV_SET (to) == NULL);
4477 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4478 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4480 if (BB_AV_SET_VALID_P (from))
4482 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4484 if (BB_LV_SET_VALID_P (from))
4486 gcc_assert (BB_LV_SET (to) != NULL);
4487 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4491 /* Return an av set for INSN, if any. */
4492 av_set_t
4493 get_av_set (insn_t insn)
4495 av_set_t av_set;
4497 gcc_assert (AV_SET_VALID_P (insn));
4499 if (sel_bb_head_p (insn))
4500 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4501 else
4502 av_set = NULL;
4504 return av_set;
4507 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4509 get_av_level (insn_t insn)
4511 int av_level;
4513 gcc_assert (INSN_P (insn));
4515 if (sel_bb_head_p (insn))
4516 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4517 else
4518 av_level = INSN_WS_LEVEL (insn);
4520 return av_level;
4525 /* Variables to work with control-flow graph. */
4527 /* The basic block that already has been processed by the sched_data_update (),
4528 but hasn't been in sel_add_bb () yet. */
4529 static vec<basic_block>
4530 last_added_blocks = vNULL;
4532 /* A pool for allocating successor infos. */
4533 static struct
4535 /* A stack for saving succs_info structures. */
4536 struct succs_info *stack;
4538 /* Its size. */
4539 int size;
4541 /* Top of the stack. */
4542 int top;
4544 /* Maximal value of the top. */
4545 int max_top;
4546 } succs_info_pool;
4548 /* Functions to work with control-flow graph. */
4550 /* Return basic block note of BB. */
4551 rtx_insn *
4552 sel_bb_head (basic_block bb)
4554 rtx_insn *head;
4556 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4558 gcc_assert (exit_insn != NULL_RTX);
4559 head = exit_insn;
4561 else
4563 rtx_note *note = bb_note (bb);
4564 head = next_nonnote_insn (note);
4566 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4567 head = NULL;
4570 return head;
4573 /* Return true if INSN is a basic block header. */
4574 bool
4575 sel_bb_head_p (insn_t insn)
4577 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4580 /* Return last insn of BB. */
4581 rtx_insn *
4582 sel_bb_end (basic_block bb)
4584 if (sel_bb_empty_p (bb))
4585 return NULL;
4587 gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
4589 return BB_END (bb);
4592 /* Return true if INSN is the last insn in its basic block. */
4593 bool
4594 sel_bb_end_p (insn_t insn)
4596 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4599 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4600 bool
4601 sel_bb_empty_p (basic_block bb)
4603 return sel_bb_head (bb) == NULL;
4606 /* True when BB belongs to the current scheduling region. */
4607 bool
4608 in_current_region_p (basic_block bb)
4610 if (bb->index < NUM_FIXED_BLOCKS)
4611 return false;
4613 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4616 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4617 basic_block
4618 fallthru_bb_of_jump (const rtx_insn *jump)
4620 if (!JUMP_P (jump))
4621 return NULL;
4623 if (!any_condjump_p (jump))
4624 return NULL;
4626 /* A basic block that ends with a conditional jump may still have one successor
4627 (and be followed by a barrier), we are not interested. */
4628 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4629 return NULL;
4631 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4634 /* Remove all notes from BB. */
4635 static void
4636 init_bb (basic_block bb)
4638 remove_notes (bb_note (bb), BB_END (bb));
4639 BB_NOTE_LIST (bb) = note_list;
4642 void
4643 sel_init_bbs (bb_vec_t bbs)
4645 const struct sched_scan_info_def ssi =
4647 extend_bb_info, /* extend_bb */
4648 init_bb, /* init_bb */
4649 NULL, /* extend_insn */
4650 NULL /* init_insn */
4653 sched_scan (&ssi, bbs);
4656 /* Restore notes for the whole region. */
4657 static void
4658 sel_restore_notes (void)
4660 int bb;
4661 insn_t insn;
4663 for (bb = 0; bb < current_nr_blocks; bb++)
4665 basic_block first, last;
4667 first = EBB_FIRST_BB (bb);
4668 last = EBB_LAST_BB (bb)->next_bb;
4672 note_list = BB_NOTE_LIST (first);
4673 restore_other_notes (NULL, first);
4674 BB_NOTE_LIST (first) = NULL;
4676 FOR_BB_INSNS (first, insn)
4677 if (NONDEBUG_INSN_P (insn))
4678 reemit_notes (insn);
4680 first = first->next_bb;
4682 while (first != last);
4686 /* Free per-bb data structures. */
4687 void
4688 sel_finish_bbs (void)
4690 sel_restore_notes ();
4692 /* Remove current loop preheader from this loop. */
4693 if (current_loop_nest)
4694 sel_remove_loop_preheader ();
4696 finish_region_bb_info ();
4699 /* Return true if INSN has a single successor of type FLAGS. */
4700 bool
4701 sel_insn_has_single_succ_p (insn_t insn, int flags)
4703 insn_t succ;
4704 succ_iterator si;
4705 bool first_p = true;
4707 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4709 if (first_p)
4710 first_p = false;
4711 else
4712 return false;
4715 return true;
4718 /* Allocate successor's info. */
4719 static struct succs_info *
4720 alloc_succs_info (void)
4722 if (succs_info_pool.top == succs_info_pool.max_top)
4724 int i;
4726 if (++succs_info_pool.max_top >= succs_info_pool.size)
4727 gcc_unreachable ();
4729 i = ++succs_info_pool.top;
4730 succs_info_pool.stack[i].succs_ok.create (10);
4731 succs_info_pool.stack[i].succs_other.create (10);
4732 succs_info_pool.stack[i].probs_ok.create (10);
4734 else
4735 succs_info_pool.top++;
4737 return &succs_info_pool.stack[succs_info_pool.top];
4740 /* Free successor's info. */
4741 void
4742 free_succs_info (struct succs_info * sinfo)
4744 gcc_assert (succs_info_pool.top >= 0
4745 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4746 succs_info_pool.top--;
4748 /* Clear stale info. */
4749 sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4750 sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4751 sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4752 sinfo->all_prob = 0;
4753 sinfo->succs_ok_n = 0;
4754 sinfo->all_succs_n = 0;
4757 /* Compute successor info for INSN. FLAGS are the flags passed
4758 to the FOR_EACH_SUCC_1 iterator. */
4759 struct succs_info *
4760 compute_succs_info (insn_t insn, short flags)
4762 succ_iterator si;
4763 insn_t succ;
4764 struct succs_info *sinfo = alloc_succs_info ();
4766 /* Traverse *all* successors and decide what to do with each. */
4767 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4769 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4770 perform code motion through inner loops. */
4771 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4773 if (current_flags & flags)
4775 sinfo->succs_ok.safe_push (succ);
4776 sinfo->probs_ok.safe_push (
4777 /* FIXME: Improve calculation when skipping
4778 inner loop to exits. */
4779 si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
4780 sinfo->succs_ok_n++;
4782 else
4783 sinfo->succs_other.safe_push (succ);
4785 /* Compute all_prob. */
4786 if (!si.bb_end)
4787 sinfo->all_prob = REG_BR_PROB_BASE;
4788 else
4789 sinfo->all_prob += si.e1->probability;
4791 sinfo->all_succs_n++;
4794 return sinfo;
4797 /* Return the predecessors of BB in PREDS and their number in N.
4798 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4799 static void
4800 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4802 edge e;
4803 edge_iterator ei;
4805 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4807 FOR_EACH_EDGE (e, ei, bb->preds)
4809 basic_block pred_bb = e->src;
4810 insn_t bb_end = BB_END (pred_bb);
4812 if (!in_current_region_p (pred_bb))
4814 gcc_assert (flag_sel_sched_pipelining_outer_loops
4815 && current_loop_nest);
4816 continue;
4819 if (sel_bb_empty_p (pred_bb))
4820 cfg_preds_1 (pred_bb, preds, n, size);
4821 else
4823 if (*n == *size)
4824 *preds = XRESIZEVEC (insn_t, *preds,
4825 (*size = 2 * *size + 1));
4826 (*preds)[(*n)++] = bb_end;
4830 gcc_assert (*n != 0
4831 || (flag_sel_sched_pipelining_outer_loops
4832 && current_loop_nest));
4835 /* Find all predecessors of BB and record them in PREDS and their number
4836 in N. Empty blocks are skipped, and only normal (forward in-region)
4837 edges are processed. */
4838 static void
4839 cfg_preds (basic_block bb, insn_t **preds, int *n)
4841 int size = 0;
4843 *preds = NULL;
4844 *n = 0;
4845 cfg_preds_1 (bb, preds, n, &size);
4848 /* Returns true if we are moving INSN through join point. */
4849 bool
4850 sel_num_cfg_preds_gt_1 (insn_t insn)
4852 basic_block bb;
4854 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4855 return false;
4857 bb = BLOCK_FOR_INSN (insn);
4859 while (1)
4861 if (EDGE_COUNT (bb->preds) > 1)
4862 return true;
4864 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4865 bb = EDGE_PRED (bb, 0)->src;
4867 if (!sel_bb_empty_p (bb))
4868 break;
4871 return false;
4874 /* Returns true when BB should be the end of an ebb. Adapted from the
4875 code in sched-ebb.c. */
4876 bool
4877 bb_ends_ebb_p (basic_block bb)
4879 basic_block next_bb = bb_next_bb (bb);
4880 edge e;
4882 if (next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4883 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4884 || (LABEL_P (BB_HEAD (next_bb))
4885 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4886 Work around that. */
4887 && !single_pred_p (next_bb)))
4888 return true;
4890 if (!in_current_region_p (next_bb))
4891 return true;
4893 e = find_fallthru_edge (bb->succs);
4894 if (e)
4896 gcc_assert (e->dest == next_bb);
4898 return false;
4901 return true;
4904 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4905 successor of INSN. */
4906 bool
4907 in_same_ebb_p (insn_t insn, insn_t succ)
4909 basic_block ptr = BLOCK_FOR_INSN (insn);
4911 for (;;)
4913 if (ptr == BLOCK_FOR_INSN (succ))
4914 return true;
4916 if (bb_ends_ebb_p (ptr))
4917 return false;
4919 ptr = bb_next_bb (ptr);
4922 gcc_unreachable ();
4923 return false;
4926 /* Recomputes the reverse topological order for the function and
4927 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4928 modified appropriately. */
4929 static void
4930 recompute_rev_top_order (void)
4932 int *postorder;
4933 int n_blocks, i;
4935 if (!rev_top_order_index
4936 || rev_top_order_index_len < last_basic_block_for_fn (cfun))
4938 rev_top_order_index_len = last_basic_block_for_fn (cfun);
4939 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4940 rev_top_order_index_len);
4943 postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
4945 n_blocks = post_order_compute (postorder, true, false);
4946 gcc_assert (n_basic_blocks_for_fn (cfun) == n_blocks);
4948 /* Build reverse function: for each basic block with BB->INDEX == K
4949 rev_top_order_index[K] is it's reverse topological sort number. */
4950 for (i = 0; i < n_blocks; i++)
4952 gcc_assert (postorder[i] < rev_top_order_index_len);
4953 rev_top_order_index[postorder[i]] = i;
4956 free (postorder);
4959 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4960 void
4961 clear_outdated_rtx_info (basic_block bb)
4963 rtx_insn *insn;
4965 FOR_BB_INSNS (bb, insn)
4966 if (INSN_P (insn))
4968 SCHED_GROUP_P (insn) = 0;
4969 INSN_AFTER_STALL_P (insn) = 0;
4970 INSN_SCHED_TIMES (insn) = 0;
4971 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4973 /* We cannot use the changed caches, as previously we could ignore
4974 the LHS dependence due to enabled renaming and transform
4975 the expression, and currently we'll be unable to do this. */
4976 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4980 /* Add BB_NOTE to the pool of available basic block notes. */
4981 static void
4982 return_bb_to_pool (basic_block bb)
4984 rtx_note *note = bb_note (bb);
4986 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4987 && bb->aux == NULL);
4989 /* It turns out that current cfg infrastructure does not support
4990 reuse of basic blocks. Don't bother for now. */
4991 /*bb_note_pool.safe_push (note);*/
4994 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4995 static rtx_note *
4996 get_bb_note_from_pool (void)
4998 if (bb_note_pool.is_empty ())
4999 return NULL;
5000 else
5002 rtx_note *note = bb_note_pool.pop ();
5004 SET_PREV_INSN (note) = NULL_RTX;
5005 SET_NEXT_INSN (note) = NULL_RTX;
5007 return note;
5011 /* Free bb_note_pool. */
5012 void
5013 free_bb_note_pool (void)
5015 bb_note_pool.release ();
5018 /* Setup scheduler pool and successor structure. */
5019 void
5020 alloc_sched_pools (void)
5022 int succs_size;
5024 succs_size = MAX_WS + 1;
5025 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
5026 succs_info_pool.size = succs_size;
5027 succs_info_pool.top = -1;
5028 succs_info_pool.max_top = -1;
5031 /* Free the pools. */
5032 void
5033 free_sched_pools (void)
5035 int i;
5037 sched_lists_pool.release ();
5038 gcc_assert (succs_info_pool.top == -1);
5039 for (i = 0; i <= succs_info_pool.max_top; i++)
5041 succs_info_pool.stack[i].succs_ok.release ();
5042 succs_info_pool.stack[i].succs_other.release ();
5043 succs_info_pool.stack[i].probs_ok.release ();
5045 free (succs_info_pool.stack);
5049 /* Returns a position in RGN where BB can be inserted retaining
5050 topological order. */
5051 static int
5052 find_place_to_insert_bb (basic_block bb, int rgn)
5054 bool has_preds_outside_rgn = false;
5055 edge e;
5056 edge_iterator ei;
5058 /* Find whether we have preds outside the region. */
5059 FOR_EACH_EDGE (e, ei, bb->preds)
5060 if (!in_current_region_p (e->src))
5062 has_preds_outside_rgn = true;
5063 break;
5066 /* Recompute the top order -- needed when we have > 1 pred
5067 and in case we don't have preds outside. */
5068 if (flag_sel_sched_pipelining_outer_loops
5069 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5071 int i, bbi = bb->index, cur_bbi;
5073 recompute_rev_top_order ();
5074 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5076 cur_bbi = BB_TO_BLOCK (i);
5077 if (rev_top_order_index[bbi]
5078 < rev_top_order_index[cur_bbi])
5079 break;
5082 /* We skipped the right block, so we increase i. We accommodate
5083 it for increasing by step later, so we decrease i. */
5084 return (i + 1) - 1;
5086 else if (has_preds_outside_rgn)
5088 /* This is the case when we generate an extra empty block
5089 to serve as region head during pipelining. */
5090 e = EDGE_SUCC (bb, 0);
5091 gcc_assert (EDGE_COUNT (bb->succs) == 1
5092 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5093 && (BLOCK_TO_BB (e->dest->index) == 0));
5094 return -1;
5097 /* We don't have preds outside the region. We should have
5098 the only pred, because the multiple preds case comes from
5099 the pipelining of outer loops, and that is handled above.
5100 Just take the bbi of this single pred. */
5101 if (EDGE_COUNT (bb->succs) > 0)
5103 int pred_bbi;
5105 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5107 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5108 return BLOCK_TO_BB (pred_bbi);
5110 else
5111 /* BB has no successors. It is safe to put it in the end. */
5112 return current_nr_blocks - 1;
5115 /* Deletes an empty basic block freeing its data. */
5116 static void
5117 delete_and_free_basic_block (basic_block bb)
5119 gcc_assert (sel_bb_empty_p (bb));
5121 if (BB_LV_SET (bb))
5122 free_lv_set (bb);
5124 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5126 /* Can't assert av_set properties because we use sel_aremove_bb
5127 when removing loop preheader from the region. At the point of
5128 removing the preheader we already have deallocated sel_region_bb_info. */
5129 gcc_assert (BB_LV_SET (bb) == NULL
5130 && !BB_LV_SET_VALID_P (bb)
5131 && BB_AV_LEVEL (bb) == 0
5132 && BB_AV_SET (bb) == NULL);
5134 delete_basic_block (bb);
5137 /* Add BB to the current region and update the region data. */
5138 static void
5139 add_block_to_current_region (basic_block bb)
5141 int i, pos, bbi = -2, rgn;
5143 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5144 bbi = find_place_to_insert_bb (bb, rgn);
5145 bbi += 1;
5146 pos = RGN_BLOCKS (rgn) + bbi;
5148 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5149 && ebb_head[bbi] == pos);
5151 /* Make a place for the new block. */
5152 extend_regions ();
5154 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5155 BLOCK_TO_BB (rgn_bb_table[i])++;
5157 memmove (rgn_bb_table + pos + 1,
5158 rgn_bb_table + pos,
5159 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5161 /* Initialize data for BB. */
5162 rgn_bb_table[pos] = bb->index;
5163 BLOCK_TO_BB (bb->index) = bbi;
5164 CONTAINING_RGN (bb->index) = rgn;
5166 RGN_NR_BLOCKS (rgn)++;
5168 for (i = rgn + 1; i <= nr_regions; i++)
5169 RGN_BLOCKS (i)++;
5172 /* Remove BB from the current region and update the region data. */
5173 static void
5174 remove_bb_from_region (basic_block bb)
5176 int i, pos, bbi = -2, rgn;
5178 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5179 bbi = BLOCK_TO_BB (bb->index);
5180 pos = RGN_BLOCKS (rgn) + bbi;
5182 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5183 && ebb_head[bbi] == pos);
5185 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5186 BLOCK_TO_BB (rgn_bb_table[i])--;
5188 memmove (rgn_bb_table + pos,
5189 rgn_bb_table + pos + 1,
5190 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5192 RGN_NR_BLOCKS (rgn)--;
5193 for (i = rgn + 1; i <= nr_regions; i++)
5194 RGN_BLOCKS (i)--;
5197 /* Add BB to the current region and update all data. If BB is NULL, add all
5198 blocks from last_added_blocks vector. */
5199 static void
5200 sel_add_bb (basic_block bb)
5202 /* Extend luids so that new notes will receive zero luids. */
5203 sched_extend_luids ();
5204 sched_init_bbs ();
5205 sel_init_bbs (last_added_blocks);
5207 /* When bb is passed explicitly, the vector should contain
5208 the only element that equals to bb; otherwise, the vector
5209 should not be NULL. */
5210 gcc_assert (last_added_blocks.exists ());
5212 if (bb != NULL)
5214 gcc_assert (last_added_blocks.length () == 1
5215 && last_added_blocks[0] == bb);
5216 add_block_to_current_region (bb);
5218 /* We associate creating/deleting data sets with the first insn
5219 appearing / disappearing in the bb. */
5220 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5221 create_initial_data_sets (bb);
5223 last_added_blocks.release ();
5225 else
5226 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5228 int i;
5229 basic_block temp_bb = NULL;
5231 for (i = 0;
5232 last_added_blocks.iterate (i, &bb); i++)
5234 add_block_to_current_region (bb);
5235 temp_bb = bb;
5238 /* We need to fetch at least one bb so we know the region
5239 to update. */
5240 gcc_assert (temp_bb != NULL);
5241 bb = temp_bb;
5243 last_added_blocks.release ();
5246 rgn_setup_region (CONTAINING_RGN (bb->index));
5249 /* Remove BB from the current region and update all data.
5250 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5251 static void
5252 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5254 unsigned idx = bb->index;
5256 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5258 remove_bb_from_region (bb);
5259 return_bb_to_pool (bb);
5260 bitmap_clear_bit (blocks_to_reschedule, idx);
5262 if (remove_from_cfg_p)
5264 basic_block succ = single_succ (bb);
5265 delete_and_free_basic_block (bb);
5266 set_immediate_dominator (CDI_DOMINATORS, succ,
5267 recompute_dominator (CDI_DOMINATORS, succ));
5270 rgn_setup_region (CONTAINING_RGN (idx));
5273 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5274 static void
5275 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5277 if (in_current_region_p (merge_bb))
5278 concat_note_lists (BB_NOTE_LIST (empty_bb),
5279 &BB_NOTE_LIST (merge_bb));
5280 BB_NOTE_LIST (empty_bb) = NULL;
5284 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5285 region, but keep it in CFG. */
5286 static void
5287 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5289 /* The block should contain just a note or a label.
5290 We try to check whether it is unused below. */
5291 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5292 || LABEL_P (BB_HEAD (empty_bb)));
5294 /* If basic block has predecessors or successors, redirect them. */
5295 if (remove_from_cfg_p
5296 && (EDGE_COUNT (empty_bb->preds) > 0
5297 || EDGE_COUNT (empty_bb->succs) > 0))
5299 basic_block pred;
5300 basic_block succ;
5302 /* We need to init PRED and SUCC before redirecting edges. */
5303 if (EDGE_COUNT (empty_bb->preds) > 0)
5305 edge e;
5307 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5309 e = EDGE_PRED (empty_bb, 0);
5310 gcc_assert (e->src == empty_bb->prev_bb
5311 && (e->flags & EDGE_FALLTHRU));
5313 pred = empty_bb->prev_bb;
5315 else
5316 pred = NULL;
5318 if (EDGE_COUNT (empty_bb->succs) > 0)
5320 /* We do not check fallthruness here as above, because
5321 after removing a jump the edge may actually be not fallthru. */
5322 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5323 succ = EDGE_SUCC (empty_bb, 0)->dest;
5325 else
5326 succ = NULL;
5328 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5330 edge e = EDGE_PRED (empty_bb, 0);
5332 if (e->flags & EDGE_FALLTHRU)
5333 redirect_edge_succ_nodup (e, succ);
5334 else
5335 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5338 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5340 edge e = EDGE_SUCC (empty_bb, 0);
5342 if (find_edge (pred, e->dest) == NULL)
5343 redirect_edge_pred (e, pred);
5347 /* Finish removing. */
5348 sel_remove_bb (empty_bb, remove_from_cfg_p);
5351 /* An implementation of create_basic_block hook, which additionally updates
5352 per-bb data structures. */
5353 static basic_block
5354 sel_create_basic_block (void *headp, void *endp, basic_block after)
5356 basic_block new_bb;
5357 rtx_note *new_bb_note;
5359 gcc_assert (flag_sel_sched_pipelining_outer_loops
5360 || !last_added_blocks.exists ());
5362 new_bb_note = get_bb_note_from_pool ();
5364 if (new_bb_note == NULL_RTX)
5365 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5366 else
5368 new_bb = create_basic_block_structure ((rtx_insn *) headp,
5369 (rtx_insn *) endp,
5370 new_bb_note, after);
5371 new_bb->aux = NULL;
5374 last_added_blocks.safe_push (new_bb);
5376 return new_bb;
5379 /* Implement sched_init_only_bb (). */
5380 static void
5381 sel_init_only_bb (basic_block bb, basic_block after)
5383 gcc_assert (after == NULL);
5385 extend_regions ();
5386 rgn_make_new_region_out_of_new_block (bb);
5389 /* Update the latch when we've splitted or merged it from FROM block to TO.
5390 This should be checked for all outer loops, too. */
5391 static void
5392 change_loops_latches (basic_block from, basic_block to)
5394 gcc_assert (from != to);
5396 if (current_loop_nest)
5398 struct loop *loop;
5400 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5401 if (considered_for_pipelining_p (loop) && loop->latch == from)
5403 gcc_assert (loop == current_loop_nest);
5404 loop->latch = to;
5405 gcc_assert (loop_latch_edge (loop));
5410 /* Splits BB on two basic blocks, adding it to the region and extending
5411 per-bb data structures. Returns the newly created bb. */
5412 static basic_block
5413 sel_split_block (basic_block bb, rtx after)
5415 basic_block new_bb;
5416 insn_t insn;
5418 new_bb = sched_split_block_1 (bb, after);
5419 sel_add_bb (new_bb);
5421 /* This should be called after sel_add_bb, because this uses
5422 CONTAINING_RGN for the new block, which is not yet initialized.
5423 FIXME: this function may be a no-op now. */
5424 change_loops_latches (bb, new_bb);
5426 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5427 FOR_BB_INSNS (new_bb, insn)
5428 if (INSN_P (insn))
5429 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5431 if (sel_bb_empty_p (bb))
5433 gcc_assert (!sel_bb_empty_p (new_bb));
5435 /* NEW_BB has data sets that need to be updated and BB holds
5436 data sets that should be removed. Exchange these data sets
5437 so that we won't lose BB's valid data sets. */
5438 exchange_data_sets (new_bb, bb);
5439 free_data_sets (bb);
5442 if (!sel_bb_empty_p (new_bb)
5443 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5444 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5446 return new_bb;
5449 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5450 Otherwise returns NULL. */
5451 static rtx_insn *
5452 check_for_new_jump (basic_block bb, int prev_max_uid)
5454 rtx_insn *end;
5456 end = sel_bb_end (bb);
5457 if (end && INSN_UID (end) >= prev_max_uid)
5458 return end;
5459 return NULL;
5462 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5463 New means having UID at least equal to PREV_MAX_UID. */
5464 static rtx_insn *
5465 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5467 rtx_insn *jump;
5469 /* Return immediately if no new insns were emitted. */
5470 if (get_max_uid () == prev_max_uid)
5471 return NULL;
5473 /* Now check both blocks for new jumps. It will ever be only one. */
5474 if ((jump = check_for_new_jump (from, prev_max_uid)))
5475 return jump;
5477 if (jump_bb != NULL
5478 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5479 return jump;
5480 return NULL;
5483 /* Splits E and adds the newly created basic block to the current region.
5484 Returns this basic block. */
5485 basic_block
5486 sel_split_edge (edge e)
5488 basic_block new_bb, src, other_bb = NULL;
5489 int prev_max_uid;
5490 rtx_insn *jump;
5492 src = e->src;
5493 prev_max_uid = get_max_uid ();
5494 new_bb = split_edge (e);
5496 if (flag_sel_sched_pipelining_outer_loops
5497 && current_loop_nest)
5499 int i;
5500 basic_block bb;
5502 /* Some of the basic blocks might not have been added to the loop.
5503 Add them here, until this is fixed in force_fallthru. */
5504 for (i = 0;
5505 last_added_blocks.iterate (i, &bb); i++)
5506 if (!bb->loop_father)
5508 add_bb_to_loop (bb, e->dest->loop_father);
5510 gcc_assert (!other_bb && (new_bb->index != bb->index));
5511 other_bb = bb;
5515 /* Add all last_added_blocks to the region. */
5516 sel_add_bb (NULL);
5518 jump = find_new_jump (src, new_bb, prev_max_uid);
5519 if (jump)
5520 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5522 /* Put the correct lv set on this block. */
5523 if (other_bb && !sel_bb_empty_p (other_bb))
5524 compute_live (sel_bb_head (other_bb));
5526 return new_bb;
5529 /* Implement sched_create_empty_bb (). */
5530 static basic_block
5531 sel_create_empty_bb (basic_block after)
5533 basic_block new_bb;
5535 new_bb = sched_create_empty_bb_1 (after);
5537 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5538 later. */
5539 gcc_assert (last_added_blocks.length () == 1
5540 && last_added_blocks[0] == new_bb);
5542 last_added_blocks.release ();
5543 return new_bb;
5546 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5547 will be splitted to insert a check. */
5548 basic_block
5549 sel_create_recovery_block (insn_t orig_insn)
5551 basic_block first_bb, second_bb, recovery_block;
5552 basic_block before_recovery = NULL;
5553 rtx_insn *jump;
5555 first_bb = BLOCK_FOR_INSN (orig_insn);
5556 if (sel_bb_end_p (orig_insn))
5558 /* Avoid introducing an empty block while splitting. */
5559 gcc_assert (single_succ_p (first_bb));
5560 second_bb = single_succ (first_bb);
5562 else
5563 second_bb = sched_split_block (first_bb, orig_insn);
5565 recovery_block = sched_create_recovery_block (&before_recovery);
5566 if (before_recovery)
5567 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR_FOR_FN (cfun));
5569 gcc_assert (sel_bb_empty_p (recovery_block));
5570 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5571 if (current_loops != NULL)
5572 add_bb_to_loop (recovery_block, first_bb->loop_father);
5574 sel_add_bb (recovery_block);
5576 jump = BB_END (recovery_block);
5577 gcc_assert (sel_bb_head (recovery_block) == jump);
5578 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5580 return recovery_block;
5583 /* Merge basic block B into basic block A. */
5584 static void
5585 sel_merge_blocks (basic_block a, basic_block b)
5587 gcc_assert (sel_bb_empty_p (b)
5588 && EDGE_COUNT (b->preds) == 1
5589 && EDGE_PRED (b, 0)->src == b->prev_bb);
5591 move_bb_info (b->prev_bb, b);
5592 remove_empty_bb (b, false);
5593 merge_blocks (a, b);
5594 change_loops_latches (b, a);
5597 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5598 data structures for possibly created bb and insns. */
5599 void
5600 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5602 basic_block jump_bb, src, orig_dest = e->dest;
5603 int prev_max_uid;
5604 rtx_insn *jump;
5605 int old_seqno = -1;
5607 /* This function is now used only for bookkeeping code creation, where
5608 we'll never get the single pred of orig_dest block and thus will not
5609 hit unreachable blocks when updating dominator info. */
5610 gcc_assert (!sel_bb_empty_p (e->src)
5611 && !single_pred_p (orig_dest));
5612 src = e->src;
5613 prev_max_uid = get_max_uid ();
5614 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5615 when the conditional jump being redirected may become unconditional. */
5616 if (any_condjump_p (BB_END (src))
5617 && INSN_SEQNO (BB_END (src)) >= 0)
5618 old_seqno = INSN_SEQNO (BB_END (src));
5620 jump_bb = redirect_edge_and_branch_force (e, to);
5621 if (jump_bb != NULL)
5622 sel_add_bb (jump_bb);
5624 /* This function could not be used to spoil the loop structure by now,
5625 thus we don't care to update anything. But check it to be sure. */
5626 if (current_loop_nest
5627 && pipelining_p)
5628 gcc_assert (loop_latch_edge (current_loop_nest));
5630 jump = find_new_jump (src, jump_bb, prev_max_uid);
5631 if (jump)
5632 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP,
5633 old_seqno);
5634 set_immediate_dominator (CDI_DOMINATORS, to,
5635 recompute_dominator (CDI_DOMINATORS, to));
5636 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5637 recompute_dominator (CDI_DOMINATORS, orig_dest));
5640 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5641 redirected edge are in reverse topological order. */
5642 bool
5643 sel_redirect_edge_and_branch (edge e, basic_block to)
5645 bool latch_edge_p;
5646 basic_block src, orig_dest = e->dest;
5647 int prev_max_uid;
5648 rtx_insn *jump;
5649 edge redirected;
5650 bool recompute_toporder_p = false;
5651 bool maybe_unreachable = single_pred_p (orig_dest);
5652 int old_seqno = -1;
5654 latch_edge_p = (pipelining_p
5655 && current_loop_nest
5656 && e == loop_latch_edge (current_loop_nest));
5658 src = e->src;
5659 prev_max_uid = get_max_uid ();
5661 /* Compute and pass old_seqno down to sel_init_new_insn only for the case
5662 when the conditional jump being redirected may become unconditional. */
5663 if (any_condjump_p (BB_END (src))
5664 && INSN_SEQNO (BB_END (src)) >= 0)
5665 old_seqno = INSN_SEQNO (BB_END (src));
5667 redirected = redirect_edge_and_branch (e, to);
5669 gcc_assert (redirected && !last_added_blocks.exists ());
5671 /* When we've redirected a latch edge, update the header. */
5672 if (latch_edge_p)
5674 current_loop_nest->header = to;
5675 gcc_assert (loop_latch_edge (current_loop_nest));
5678 /* In rare situations, the topological relation between the blocks connected
5679 by the redirected edge can change (see PR42245 for an example). Update
5680 block_to_bb/bb_to_block. */
5681 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5682 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5683 recompute_toporder_p = true;
5685 jump = find_new_jump (src, NULL, prev_max_uid);
5686 if (jump)
5687 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP, old_seqno);
5689 /* Only update dominator info when we don't have unreachable blocks.
5690 Otherwise we'll update in maybe_tidy_empty_bb. */
5691 if (!maybe_unreachable)
5693 set_immediate_dominator (CDI_DOMINATORS, to,
5694 recompute_dominator (CDI_DOMINATORS, to));
5695 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5696 recompute_dominator (CDI_DOMINATORS, orig_dest));
5698 return recompute_toporder_p;
5701 /* This variable holds the cfg hooks used by the selective scheduler. */
5702 static struct cfg_hooks sel_cfg_hooks;
5704 /* Register sel-sched cfg hooks. */
5705 void
5706 sel_register_cfg_hooks (void)
5708 sched_split_block = sel_split_block;
5710 orig_cfg_hooks = get_cfg_hooks ();
5711 sel_cfg_hooks = orig_cfg_hooks;
5713 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5715 set_cfg_hooks (sel_cfg_hooks);
5717 sched_init_only_bb = sel_init_only_bb;
5718 sched_split_block = sel_split_block;
5719 sched_create_empty_bb = sel_create_empty_bb;
5722 /* Unregister sel-sched cfg hooks. */
5723 void
5724 sel_unregister_cfg_hooks (void)
5726 sched_create_empty_bb = NULL;
5727 sched_split_block = NULL;
5728 sched_init_only_bb = NULL;
5730 set_cfg_hooks (orig_cfg_hooks);
5734 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5735 LABEL is where this jump should be directed. */
5736 rtx_insn *
5737 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5739 rtx_insn *insn_rtx;
5741 gcc_assert (!INSN_P (pattern));
5743 start_sequence ();
5745 if (label == NULL_RTX)
5746 insn_rtx = emit_insn (pattern);
5747 else if (DEBUG_INSN_P (label))
5748 insn_rtx = emit_debug_insn (pattern);
5749 else
5751 insn_rtx = emit_jump_insn (pattern);
5752 JUMP_LABEL (insn_rtx) = label;
5753 ++LABEL_NUSES (label);
5756 end_sequence ();
5758 sched_extend_luids ();
5759 sched_extend_target ();
5760 sched_deps_init (false);
5762 /* Initialize INSN_CODE now. */
5763 recog_memoized (insn_rtx);
5764 return insn_rtx;
5767 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5768 must not be clonable. */
5769 vinsn_t
5770 create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
5772 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5774 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5775 return vinsn_create (insn_rtx, force_unique_p);
5778 /* Create a copy of INSN_RTX. */
5779 rtx_insn *
5780 create_copy_of_insn_rtx (rtx insn_rtx)
5782 rtx_insn *res;
5783 rtx link;
5785 if (DEBUG_INSN_P (insn_rtx))
5786 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5787 insn_rtx);
5789 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5791 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5792 NULL_RTX);
5794 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5795 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5796 there too, but are supposed to be sticky, so we copy them. */
5797 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5798 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5799 && REG_NOTE_KIND (link) != REG_EQUAL
5800 && REG_NOTE_KIND (link) != REG_EQUIV)
5802 if (GET_CODE (link) == EXPR_LIST)
5803 add_reg_note (res, REG_NOTE_KIND (link),
5804 copy_insn_1 (XEXP (link, 0)));
5805 else
5806 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5809 return res;
5812 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5813 void
5814 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5816 vinsn_detach (EXPR_VINSN (expr));
5818 EXPR_VINSN (expr) = new_vinsn;
5819 vinsn_attach (new_vinsn);
5822 /* Helpers for global init. */
5823 /* This structure is used to be able to call existing bundling mechanism
5824 and calculate insn priorities. */
5825 static struct haifa_sched_info sched_sel_haifa_sched_info =
5827 NULL, /* init_ready_list */
5828 NULL, /* can_schedule_ready_p */
5829 NULL, /* schedule_more_p */
5830 NULL, /* new_ready */
5831 NULL, /* rgn_rank */
5832 sel_print_insn, /* rgn_print_insn */
5833 contributes_to_priority,
5834 NULL, /* insn_finishes_block_p */
5836 NULL, NULL,
5837 NULL, NULL,
5838 0, 0,
5840 NULL, /* add_remove_insn */
5841 NULL, /* begin_schedule_ready */
5842 NULL, /* begin_move_insn */
5843 NULL, /* advance_target_bb */
5845 NULL,
5846 NULL,
5848 SEL_SCHED | NEW_BBS
5851 /* Setup special insns used in the scheduler. */
5852 void
5853 setup_nop_and_exit_insns (void)
5855 gcc_assert (nop_pattern == NULL_RTX
5856 && exit_insn == NULL_RTX);
5858 nop_pattern = constm1_rtx;
5860 start_sequence ();
5861 emit_insn (nop_pattern);
5862 exit_insn = get_insns ();
5863 end_sequence ();
5864 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR_FOR_FN (cfun));
5867 /* Free special insns used in the scheduler. */
5868 void
5869 free_nop_and_exit_insns (void)
5871 exit_insn = NULL;
5872 nop_pattern = NULL_RTX;
5875 /* Setup a special vinsn used in new insns initialization. */
5876 void
5877 setup_nop_vinsn (void)
5879 nop_vinsn = vinsn_create (exit_insn, false);
5880 vinsn_attach (nop_vinsn);
5883 /* Free a special vinsn used in new insns initialization. */
5884 void
5885 free_nop_vinsn (void)
5887 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5888 vinsn_detach (nop_vinsn);
5889 nop_vinsn = NULL;
5892 /* Call a set_sched_flags hook. */
5893 void
5894 sel_set_sched_flags (void)
5896 /* ??? This means that set_sched_flags were called, and we decided to
5897 support speculation. However, set_sched_flags also modifies flags
5898 on current_sched_info, doing this only at global init. And we
5899 sometimes change c_s_i later. So put the correct flags again. */
5900 if (spec_info && targetm.sched.set_sched_flags)
5901 targetm.sched.set_sched_flags (spec_info);
5904 /* Setup pointers to global sched info structures. */
5905 void
5906 sel_setup_sched_infos (void)
5908 rgn_setup_common_sched_info ();
5910 memcpy (&sel_common_sched_info, common_sched_info,
5911 sizeof (sel_common_sched_info));
5913 sel_common_sched_info.fix_recovery_cfg = NULL;
5914 sel_common_sched_info.add_block = NULL;
5915 sel_common_sched_info.estimate_number_of_insns
5916 = sel_estimate_number_of_insns;
5917 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5918 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5920 common_sched_info = &sel_common_sched_info;
5922 current_sched_info = &sched_sel_haifa_sched_info;
5923 current_sched_info->sched_max_insns_priority =
5924 get_rgn_sched_max_insns_priority ();
5926 sel_set_sched_flags ();
5930 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5931 *BB_ORD_INDEX after that is increased. */
5932 static void
5933 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5935 RGN_NR_BLOCKS (rgn) += 1;
5936 RGN_DONT_CALC_DEPS (rgn) = 0;
5937 RGN_HAS_REAL_EBB (rgn) = 0;
5938 CONTAINING_RGN (bb->index) = rgn;
5939 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5940 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5941 (*bb_ord_index)++;
5943 /* FIXME: it is true only when not scheduling ebbs. */
5944 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5947 /* Functions to support pipelining of outer loops. */
5949 /* Creates a new empty region and returns it's number. */
5950 static int
5951 sel_create_new_region (void)
5953 int new_rgn_number = nr_regions;
5955 RGN_NR_BLOCKS (new_rgn_number) = 0;
5957 /* FIXME: This will work only when EBBs are not created. */
5958 if (new_rgn_number != 0)
5959 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5960 RGN_NR_BLOCKS (new_rgn_number - 1);
5961 else
5962 RGN_BLOCKS (new_rgn_number) = 0;
5964 /* Set the blocks of the next region so the other functions may
5965 calculate the number of blocks in the region. */
5966 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5967 RGN_NR_BLOCKS (new_rgn_number);
5969 nr_regions++;
5971 return new_rgn_number;
5974 /* If X has a smaller topological sort number than Y, returns -1;
5975 if greater, returns 1. */
5976 static int
5977 bb_top_order_comparator (const void *x, const void *y)
5979 basic_block bb1 = *(const basic_block *) x;
5980 basic_block bb2 = *(const basic_block *) y;
5982 gcc_assert (bb1 == bb2
5983 || rev_top_order_index[bb1->index]
5984 != rev_top_order_index[bb2->index]);
5986 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5987 bbs with greater number should go earlier. */
5988 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5989 return -1;
5990 else
5991 return 1;
5994 /* Create a region for LOOP and return its number. If we don't want
5995 to pipeline LOOP, return -1. */
5996 static int
5997 make_region_from_loop (struct loop *loop)
5999 unsigned int i;
6000 int new_rgn_number = -1;
6001 struct loop *inner;
6003 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6004 int bb_ord_index = 0;
6005 basic_block *loop_blocks;
6006 basic_block preheader_block;
6008 if (loop->num_nodes
6009 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
6010 return -1;
6012 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
6013 for (inner = loop->inner; inner; inner = inner->inner)
6014 if (flow_bb_inside_loop_p (inner, loop->latch))
6015 return -1;
6017 loop->ninsns = num_loop_insns (loop);
6018 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
6019 return -1;
6021 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
6023 for (i = 0; i < loop->num_nodes; i++)
6024 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
6026 free (loop_blocks);
6027 return -1;
6030 preheader_block = loop_preheader_edge (loop)->src;
6031 gcc_assert (preheader_block);
6032 gcc_assert (loop_blocks[0] == loop->header);
6034 new_rgn_number = sel_create_new_region ();
6036 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6037 bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6039 for (i = 0; i < loop->num_nodes; i++)
6041 /* Add only those blocks that haven't been scheduled in the inner loop.
6042 The exception is the basic blocks with bookkeeping code - they should
6043 be added to the region (and they actually don't belong to the loop
6044 body, but to the region containing that loop body). */
6046 gcc_assert (new_rgn_number >= 0);
6048 if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6050 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6051 new_rgn_number);
6052 bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6056 free (loop_blocks);
6057 MARK_LOOP_FOR_PIPELINING (loop);
6059 return new_rgn_number;
6062 /* Create a new region from preheader blocks LOOP_BLOCKS. */
6063 void
6064 make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6066 unsigned int i;
6067 int new_rgn_number = -1;
6068 basic_block bb;
6070 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6071 int bb_ord_index = 0;
6073 new_rgn_number = sel_create_new_region ();
6075 FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6077 gcc_assert (new_rgn_number >= 0);
6079 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6082 vec_free (loop_blocks);
6086 /* Create region(s) from loop nest LOOP, such that inner loops will be
6087 pipelined before outer loops. Returns true when a region for LOOP
6088 is created. */
6089 static bool
6090 make_regions_from_loop_nest (struct loop *loop)
6092 struct loop *cur_loop;
6093 int rgn_number;
6095 /* Traverse all inner nodes of the loop. */
6096 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6097 if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6098 return false;
6100 /* At this moment all regular inner loops should have been pipelined.
6101 Try to create a region from this loop. */
6102 rgn_number = make_region_from_loop (loop);
6104 if (rgn_number < 0)
6105 return false;
6107 loop_nests.safe_push (loop);
6108 return true;
6111 /* Initalize data structures needed. */
6112 void
6113 sel_init_pipelining (void)
6115 /* Collect loop information to be used in outer loops pipelining. */
6116 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6117 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6118 | LOOPS_HAVE_RECORDED_EXITS
6119 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6120 current_loop_nest = NULL;
6122 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block_for_fn (cfun));
6123 bitmap_clear (bbs_in_loop_rgns);
6125 recompute_rev_top_order ();
6128 /* Returns a struct loop for region RGN. */
6129 loop_p
6130 get_loop_nest_for_rgn (unsigned int rgn)
6132 /* Regions created with extend_rgns don't have corresponding loop nests,
6133 because they don't represent loops. */
6134 if (rgn < loop_nests.length ())
6135 return loop_nests[rgn];
6136 else
6137 return NULL;
6140 /* True when LOOP was included into pipelining regions. */
6141 bool
6142 considered_for_pipelining_p (struct loop *loop)
6144 if (loop_depth (loop) == 0)
6145 return false;
6147 /* Now, the loop could be too large or irreducible. Check whether its
6148 region is in LOOP_NESTS.
6149 We determine the region number of LOOP as the region number of its
6150 latch. We can't use header here, because this header could be
6151 just removed preheader and it will give us the wrong region number.
6152 Latch can't be used because it could be in the inner loop too. */
6153 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6155 int rgn = CONTAINING_RGN (loop->latch->index);
6157 gcc_assert ((unsigned) rgn < loop_nests.length ());
6158 return true;
6161 return false;
6164 /* Makes regions from the rest of the blocks, after loops are chosen
6165 for pipelining. */
6166 static void
6167 make_regions_from_the_rest (void)
6169 int cur_rgn_blocks;
6170 int *loop_hdr;
6171 int i;
6173 basic_block bb;
6174 edge e;
6175 edge_iterator ei;
6176 int *degree;
6178 /* Index in rgn_bb_table where to start allocating new regions. */
6179 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6181 /* Make regions from all the rest basic blocks - those that don't belong to
6182 any loop or belong to irreducible loops. Prepare the data structures
6183 for extend_rgns. */
6185 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6186 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6187 loop. */
6188 loop_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
6189 degree = XCNEWVEC (int, last_basic_block_for_fn (cfun));
6192 /* For each basic block that belongs to some loop assign the number
6193 of innermost loop it belongs to. */
6194 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
6195 loop_hdr[i] = -1;
6197 FOR_EACH_BB_FN (bb, cfun)
6199 if (bb->loop_father && bb->loop_father->num != 0
6200 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6201 loop_hdr[bb->index] = bb->loop_father->num;
6204 /* For each basic block degree is calculated as the number of incoming
6205 edges, that are going out of bbs that are not yet scheduled.
6206 The basic blocks that are scheduled have degree value of zero. */
6207 FOR_EACH_BB_FN (bb, cfun)
6209 degree[bb->index] = 0;
6211 if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6213 FOR_EACH_EDGE (e, ei, bb->preds)
6214 if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6215 degree[bb->index]++;
6217 else
6218 degree[bb->index] = -1;
6221 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6223 /* Any block that did not end up in a region is placed into a region
6224 by itself. */
6225 FOR_EACH_BB_FN (bb, cfun)
6226 if (degree[bb->index] >= 0)
6228 rgn_bb_table[cur_rgn_blocks] = bb->index;
6229 RGN_NR_BLOCKS (nr_regions) = 1;
6230 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6231 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6232 RGN_HAS_REAL_EBB (nr_regions) = 0;
6233 CONTAINING_RGN (bb->index) = nr_regions++;
6234 BLOCK_TO_BB (bb->index) = 0;
6237 free (degree);
6238 free (loop_hdr);
6241 /* Free data structures used in pipelining of loops. */
6242 void sel_finish_pipelining (void)
6244 struct loop *loop;
6246 /* Release aux fields so we don't free them later by mistake. */
6247 FOR_EACH_LOOP (loop, 0)
6248 loop->aux = NULL;
6250 loop_optimizer_finalize ();
6252 loop_nests.release ();
6254 free (rev_top_order_index);
6255 rev_top_order_index = NULL;
6258 /* This function replaces the find_rgns when
6259 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6260 void
6261 sel_find_rgns (void)
6263 sel_init_pipelining ();
6264 extend_regions ();
6266 if (current_loops)
6268 loop_p loop;
6270 FOR_EACH_LOOP (loop, (flag_sel_sched_pipelining_outer_loops
6271 ? LI_FROM_INNERMOST
6272 : LI_ONLY_INNERMOST))
6273 make_regions_from_loop_nest (loop);
6276 /* Make regions from all the rest basic blocks and schedule them.
6277 These blocks include blocks that don't belong to any loop or belong
6278 to irreducible loops. */
6279 make_regions_from_the_rest ();
6281 /* We don't need bbs_in_loop_rgns anymore. */
6282 sbitmap_free (bbs_in_loop_rgns);
6283 bbs_in_loop_rgns = NULL;
6286 /* Add the preheader blocks from previous loop to current region taking
6287 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6288 This function is only used with -fsel-sched-pipelining-outer-loops. */
6289 void
6290 sel_add_loop_preheaders (bb_vec_t *bbs)
6292 int i;
6293 basic_block bb;
6294 vec<basic_block> *preheader_blocks
6295 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6297 if (!preheader_blocks)
6298 return;
6300 for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6302 bbs->safe_push (bb);
6303 last_added_blocks.safe_push (bb);
6304 sel_add_bb (bb);
6307 vec_free (preheader_blocks);
6310 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6311 Please note that the function should also work when pipelining_p is
6312 false, because it is used when deciding whether we should or should
6313 not reschedule pipelined code. */
6314 bool
6315 sel_is_loop_preheader_p (basic_block bb)
6317 if (current_loop_nest)
6319 struct loop *outer;
6321 if (preheader_removed)
6322 return false;
6324 /* Preheader is the first block in the region. */
6325 if (BLOCK_TO_BB (bb->index) == 0)
6326 return true;
6328 /* We used to find a preheader with the topological information.
6329 Check that the above code is equivalent to what we did before. */
6331 if (in_current_region_p (current_loop_nest->header))
6332 gcc_assert (!(BLOCK_TO_BB (bb->index)
6333 < BLOCK_TO_BB (current_loop_nest->header->index)));
6335 /* Support the situation when the latch block of outer loop
6336 could be from here. */
6337 for (outer = loop_outer (current_loop_nest);
6338 outer;
6339 outer = loop_outer (outer))
6340 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6341 gcc_unreachable ();
6344 return false;
6347 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6348 can be removed, making the corresponding edge fallthrough (assuming that
6349 all basic blocks between JUMP_BB and DEST_BB are empty). */
6350 static bool
6351 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6353 if (!onlyjump_p (BB_END (jump_bb))
6354 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6355 return false;
6357 /* Several outgoing edges, abnormal edge or destination of jump is
6358 not DEST_BB. */
6359 if (EDGE_COUNT (jump_bb->succs) != 1
6360 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6361 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6362 return false;
6364 /* If not anything of the upper. */
6365 return true;
6368 /* Removes the loop preheader from the current region and saves it in
6369 PREHEADER_BLOCKS of the father loop, so they will be added later to
6370 region that represents an outer loop. */
6371 static void
6372 sel_remove_loop_preheader (void)
6374 int i, old_len;
6375 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6376 basic_block bb;
6377 bool all_empty_p = true;
6378 vec<basic_block> *preheader_blocks
6379 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6381 vec_check_alloc (preheader_blocks, 0);
6383 gcc_assert (current_loop_nest);
6384 old_len = preheader_blocks->length ();
6386 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6387 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6389 bb = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i));
6391 /* If the basic block belongs to region, but doesn't belong to
6392 corresponding loop, then it should be a preheader. */
6393 if (sel_is_loop_preheader_p (bb))
6395 preheader_blocks->safe_push (bb);
6396 if (BB_END (bb) != bb_note (bb))
6397 all_empty_p = false;
6401 /* Remove these blocks only after iterating over the whole region. */
6402 for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6404 bb = (*preheader_blocks)[i];
6405 sel_remove_bb (bb, false);
6408 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6410 if (!all_empty_p)
6411 /* Immediately create new region from preheader. */
6412 make_region_from_loop_preheader (preheader_blocks);
6413 else
6415 /* If all preheader blocks are empty - dont create new empty region.
6416 Instead, remove them completely. */
6417 FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6419 edge e;
6420 edge_iterator ei;
6421 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6423 /* Redirect all incoming edges to next basic block. */
6424 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6426 if (! (e->flags & EDGE_FALLTHRU))
6427 redirect_edge_and_branch (e, bb->next_bb);
6428 else
6429 redirect_edge_succ (e, bb->next_bb);
6431 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6432 delete_and_free_basic_block (bb);
6434 /* Check if after deleting preheader there is a nonconditional
6435 jump in PREV_BB that leads to the next basic block NEXT_BB.
6436 If it is so - delete this jump and clear data sets of its
6437 basic block if it becomes empty. */
6438 if (next_bb->prev_bb == prev_bb
6439 && prev_bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
6440 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6442 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6443 if (BB_END (prev_bb) == bb_note (prev_bb))
6444 free_data_sets (prev_bb);
6447 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6448 recompute_dominator (CDI_DOMINATORS,
6449 next_bb));
6452 vec_free (preheader_blocks);
6454 else
6455 /* Store preheader within the father's loop structure. */
6456 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6457 preheader_blocks);
6460 #endif