2011-08-19 Andrew Stubbs <ams@codesourcery.com>
[official-gcc.git] / gcc / sel-sched-ir.c
blob4878460f8824a089d0d834c747787e01834e0611
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "hard-reg-set.h"
29 #include "regs.h"
30 #include "function.h"
31 #include "flags.h"
32 #include "insn-config.h"
33 #include "insn-attr.h"
34 #include "except.h"
35 #include "recog.h"
36 #include "params.h"
37 #include "target.h"
38 #include "timevar.h"
39 #include "tree-pass.h"
40 #include "sched-int.h"
41 #include "ggc.h"
42 #include "tree.h"
43 #include "vec.h"
44 #include "langhooks.h"
45 #include "rtlhooks-def.h"
46 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
48 #ifdef INSN_SCHEDULING
49 #include "sel-sched-ir.h"
50 /* We don't have to use it except for sel_print_insn. */
51 #include "sel-sched-dump.h"
53 /* A vector holding bb info for whole scheduling pass. */
54 VEC(sel_global_bb_info_def, heap) *sel_global_bb_info = NULL;
56 /* A vector holding bb info. */
57 VEC(sel_region_bb_info_def, heap) *sel_region_bb_info = NULL;
59 /* A pool for allocating all lists. */
60 alloc_pool sched_lists_pool;
62 /* This contains information about successors for compute_av_set. */
63 struct succs_info current_succs;
65 /* Data structure to describe interaction with the generic scheduler utils. */
66 static struct common_sched_info_def sel_common_sched_info;
68 /* The loop nest being pipelined. */
69 struct loop *current_loop_nest;
71 /* LOOP_NESTS is a vector containing the corresponding loop nest for
72 each region. */
73 static VEC(loop_p, heap) *loop_nests = NULL;
75 /* Saves blocks already in loop regions, indexed by bb->index. */
76 static sbitmap bbs_in_loop_rgns = NULL;
78 /* CFG hooks that are saved before changing create_basic_block hook. */
79 static struct cfg_hooks orig_cfg_hooks;
82 /* Array containing reverse topological index of function basic blocks,
83 indexed by BB->INDEX. */
84 static int *rev_top_order_index = NULL;
86 /* Length of the above array. */
87 static int rev_top_order_index_len = -1;
89 /* A regset pool structure. */
90 static struct
92 /* The stack to which regsets are returned. */
93 regset *v;
95 /* Its pointer. */
96 int n;
98 /* Its size. */
99 int s;
101 /* In VV we save all generated regsets so that, when destructing the
102 pool, we can compare it with V and check that every regset was returned
103 back to pool. */
104 regset *vv;
106 /* The pointer of VV stack. */
107 int nn;
109 /* Its size. */
110 int ss;
112 /* The difference between allocated and returned regsets. */
113 int diff;
114 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
116 /* This represents the nop pool. */
117 static struct
119 /* The vector which holds previously emitted nops. */
120 insn_t *v;
122 /* Its pointer. */
123 int n;
125 /* Its size. */
126 int s;
127 } nop_pool = { NULL, 0, 0 };
129 /* The pool for basic block notes. */
130 static rtx_vec_t bb_note_pool;
132 /* A NOP pattern used to emit placeholder insns. */
133 rtx nop_pattern = NULL_RTX;
134 /* A special instruction that resides in EXIT_BLOCK.
135 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
136 rtx exit_insn = NULL_RTX;
138 /* TRUE if while scheduling current region, which is loop, its preheader
139 was removed. */
140 bool preheader_removed = false;
143 /* Forward static declarations. */
144 static void fence_clear (fence_t);
146 static void deps_init_id (idata_t, insn_t, bool);
147 static void init_id_from_df (idata_t, insn_t, bool);
148 static expr_t set_insn_init (expr_t, vinsn_t, int);
150 static void cfg_preds (basic_block, insn_t **, int *);
151 static void prepare_insn_expr (insn_t, int);
152 static void free_history_vect (VEC (expr_history_def, heap) **);
154 static void move_bb_info (basic_block, basic_block);
155 static void remove_empty_bb (basic_block, bool);
156 static void sel_merge_blocks (basic_block, basic_block);
157 static void sel_remove_loop_preheader (void);
158 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
160 static bool insn_is_the_only_one_in_bb_p (insn_t);
161 static void create_initial_data_sets (basic_block);
163 static void free_av_set (basic_block);
164 static void invalidate_av_set (basic_block);
165 static void extend_insn_data (void);
166 static void sel_init_new_insn (insn_t, int);
167 static void finish_insns (void);
169 /* Various list functions. */
171 /* Copy an instruction list L. */
172 ilist_t
173 ilist_copy (ilist_t l)
175 ilist_t head = NULL, *tailp = &head;
177 while (l)
179 ilist_add (tailp, ILIST_INSN (l));
180 tailp = &ILIST_NEXT (*tailp);
181 l = ILIST_NEXT (l);
184 return head;
187 /* Invert an instruction list L. */
188 ilist_t
189 ilist_invert (ilist_t l)
191 ilist_t res = NULL;
193 while (l)
195 ilist_add (&res, ILIST_INSN (l));
196 l = ILIST_NEXT (l);
199 return res;
202 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
203 void
204 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
206 bnd_t bnd;
208 _list_add (lp);
209 bnd = BLIST_BND (*lp);
211 BND_TO (bnd) = to;
212 BND_PTR (bnd) = ptr;
213 BND_AV (bnd) = NULL;
214 BND_AV1 (bnd) = NULL;
215 BND_DC (bnd) = dc;
218 /* Remove the list note pointed to by LP. */
219 void
220 blist_remove (blist_t *lp)
222 bnd_t b = BLIST_BND (*lp);
224 av_set_clear (&BND_AV (b));
225 av_set_clear (&BND_AV1 (b));
226 ilist_clear (&BND_PTR (b));
228 _list_remove (lp);
231 /* Init a fence tail L. */
232 void
233 flist_tail_init (flist_tail_t l)
235 FLIST_TAIL_HEAD (l) = NULL;
236 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
239 /* Try to find fence corresponding to INSN in L. */
240 fence_t
241 flist_lookup (flist_t l, insn_t insn)
243 while (l)
245 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
246 return FLIST_FENCE (l);
248 l = FLIST_NEXT (l);
251 return NULL;
254 /* Init the fields of F before running fill_insns. */
255 static void
256 init_fence_for_scheduling (fence_t f)
258 FENCE_BNDS (f) = NULL;
259 FENCE_PROCESSED_P (f) = false;
260 FENCE_SCHEDULED_P (f) = false;
263 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
264 static void
265 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
266 insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
267 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
268 int cycle, int cycle_issued_insns, int issue_more,
269 bool starts_cycle_p, bool after_stall_p)
271 fence_t f;
273 _list_add (lp);
274 f = FLIST_FENCE (*lp);
276 FENCE_INSN (f) = insn;
278 gcc_assert (state != NULL);
279 FENCE_STATE (f) = state;
281 FENCE_CYCLE (f) = cycle;
282 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
283 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
284 FENCE_AFTER_STALL_P (f) = after_stall_p;
286 gcc_assert (dc != NULL);
287 FENCE_DC (f) = dc;
289 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
290 FENCE_TC (f) = tc;
292 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
293 FENCE_ISSUE_MORE (f) = issue_more;
294 FENCE_EXECUTING_INSNS (f) = executing_insns;
295 FENCE_READY_TICKS (f) = ready_ticks;
296 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
297 FENCE_SCHED_NEXT (f) = sched_next;
299 init_fence_for_scheduling (f);
302 /* Remove the head node of the list pointed to by LP. */
303 static void
304 flist_remove (flist_t *lp)
306 if (FENCE_INSN (FLIST_FENCE (*lp)))
307 fence_clear (FLIST_FENCE (*lp));
308 _list_remove (lp);
311 /* Clear the fence list pointed to by LP. */
312 void
313 flist_clear (flist_t *lp)
315 while (*lp)
316 flist_remove (lp);
319 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
320 void
321 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
323 def_t d;
325 _list_add (dl);
326 d = DEF_LIST_DEF (*dl);
328 d->orig_insn = original_insn;
329 d->crosses_call = crosses_call;
333 /* Functions to work with target contexts. */
335 /* Bulk target context. It is convenient for debugging purposes to ensure
336 that there are no uninitialized (null) target contexts. */
337 static tc_t bulk_tc = (tc_t) 1;
339 /* Target hooks wrappers. In the future we can provide some default
340 implementations for them. */
342 /* Allocate a store for the target context. */
343 static tc_t
344 alloc_target_context (void)
346 return (targetm.sched.alloc_sched_context
347 ? targetm.sched.alloc_sched_context () : bulk_tc);
350 /* Init target context TC.
351 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
352 Overwise, copy current backend context to TC. */
353 static void
354 init_target_context (tc_t tc, bool clean_p)
356 if (targetm.sched.init_sched_context)
357 targetm.sched.init_sched_context (tc, clean_p);
360 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
361 int init_target_context (). */
362 tc_t
363 create_target_context (bool clean_p)
365 tc_t tc = alloc_target_context ();
367 init_target_context (tc, clean_p);
368 return tc;
371 /* Copy TC to the current backend context. */
372 void
373 set_target_context (tc_t tc)
375 if (targetm.sched.set_sched_context)
376 targetm.sched.set_sched_context (tc);
379 /* TC is about to be destroyed. Free any internal data. */
380 static void
381 clear_target_context (tc_t tc)
383 if (targetm.sched.clear_sched_context)
384 targetm.sched.clear_sched_context (tc);
387 /* Clear and free it. */
388 static void
389 delete_target_context (tc_t tc)
391 clear_target_context (tc);
393 if (targetm.sched.free_sched_context)
394 targetm.sched.free_sched_context (tc);
397 /* Make a copy of FROM in TO.
398 NB: May be this should be a hook. */
399 static void
400 copy_target_context (tc_t to, tc_t from)
402 tc_t tmp = create_target_context (false);
404 set_target_context (from);
405 init_target_context (to, false);
407 set_target_context (tmp);
408 delete_target_context (tmp);
411 /* Create a copy of TC. */
412 static tc_t
413 create_copy_of_target_context (tc_t tc)
415 tc_t copy = alloc_target_context ();
417 copy_target_context (copy, tc);
419 return copy;
422 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
423 is the same as in init_target_context (). */
424 void
425 reset_target_context (tc_t tc, bool clean_p)
427 clear_target_context (tc);
428 init_target_context (tc, clean_p);
431 /* Functions to work with dependence contexts.
432 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
433 context. It accumulates information about processed insns to decide if
434 current insn is dependent on the processed ones. */
436 /* Make a copy of FROM in TO. */
437 static void
438 copy_deps_context (deps_t to, deps_t from)
440 init_deps (to, false);
441 deps_join (to, from);
444 /* Allocate store for dep context. */
445 static deps_t
446 alloc_deps_context (void)
448 return XNEW (struct deps_desc);
451 /* Allocate and initialize dep context. */
452 static deps_t
453 create_deps_context (void)
455 deps_t dc = alloc_deps_context ();
457 init_deps (dc, false);
458 return dc;
461 /* Create a copy of FROM. */
462 static deps_t
463 create_copy_of_deps_context (deps_t from)
465 deps_t to = alloc_deps_context ();
467 copy_deps_context (to, from);
468 return to;
471 /* Clean up internal data of DC. */
472 static void
473 clear_deps_context (deps_t dc)
475 free_deps (dc);
478 /* Clear and free DC. */
479 static void
480 delete_deps_context (deps_t dc)
482 clear_deps_context (dc);
483 free (dc);
486 /* Clear and init DC. */
487 static void
488 reset_deps_context (deps_t dc)
490 clear_deps_context (dc);
491 init_deps (dc, false);
494 /* This structure describes the dependence analysis hooks for advancing
495 dependence context. */
496 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
498 NULL,
500 NULL, /* start_insn */
501 NULL, /* finish_insn */
502 NULL, /* start_lhs */
503 NULL, /* finish_lhs */
504 NULL, /* start_rhs */
505 NULL, /* finish_rhs */
506 haifa_note_reg_set,
507 haifa_note_reg_clobber,
508 haifa_note_reg_use,
509 NULL, /* note_mem_dep */
510 NULL, /* note_dep */
512 0, 0, 0
515 /* Process INSN and add its impact on DC. */
516 void
517 advance_deps_context (deps_t dc, insn_t insn)
519 sched_deps_info = &advance_deps_context_sched_deps_info;
520 deps_analyze_insn (dc, insn);
524 /* Functions to work with DFA states. */
526 /* Allocate store for a DFA state. */
527 static state_t
528 state_alloc (void)
530 return xmalloc (dfa_state_size);
533 /* Allocate and initialize DFA state. */
534 static state_t
535 state_create (void)
537 state_t state = state_alloc ();
539 state_reset (state);
540 advance_state (state);
541 return state;
544 /* Free DFA state. */
545 static void
546 state_free (state_t state)
548 free (state);
551 /* Make a copy of FROM in TO. */
552 static void
553 state_copy (state_t to, state_t from)
555 memcpy (to, from, dfa_state_size);
558 /* Create a copy of FROM. */
559 static state_t
560 state_create_copy (state_t from)
562 state_t to = state_alloc ();
564 state_copy (to, from);
565 return to;
569 /* Functions to work with fences. */
571 /* Clear the fence. */
572 static void
573 fence_clear (fence_t f)
575 state_t s = FENCE_STATE (f);
576 deps_t dc = FENCE_DC (f);
577 void *tc = FENCE_TC (f);
579 ilist_clear (&FENCE_BNDS (f));
581 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
582 || (s == NULL && dc == NULL && tc == NULL));
584 free (s);
586 if (dc != NULL)
587 delete_deps_context (dc);
589 if (tc != NULL)
590 delete_target_context (tc);
591 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
592 free (FENCE_READY_TICKS (f));
593 FENCE_READY_TICKS (f) = NULL;
596 /* Init a list of fences with successors of OLD_FENCE. */
597 void
598 init_fences (insn_t old_fence)
600 insn_t succ;
601 succ_iterator si;
602 bool first = true;
603 int ready_ticks_size = get_max_uid () + 1;
605 FOR_EACH_SUCC_1 (succ, si, old_fence,
606 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
609 if (first)
610 first = false;
611 else
612 gcc_assert (flag_sel_sched_pipelining_outer_loops);
614 flist_add (&fences, succ,
615 state_create (),
616 create_deps_context () /* dc */,
617 create_target_context (true) /* tc */,
618 NULL_RTX /* last_scheduled_insn */,
619 NULL, /* executing_insns */
620 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
621 ready_ticks_size,
622 NULL_RTX /* sched_next */,
623 1 /* cycle */, 0 /* cycle_issued_insns */,
624 issue_rate, /* issue_more */
625 1 /* starts_cycle_p */, 0 /* after_stall_p */);
629 /* Merges two fences (filling fields of fence F with resulting values) by
630 following rules: 1) state, target context and last scheduled insn are
631 propagated from fallthrough edge if it is available;
632 2) deps context and cycle is propagated from more probable edge;
633 3) all other fields are set to corresponding constant values.
635 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
636 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
637 and AFTER_STALL_P are the corresponding fields of the second fence. */
638 static void
639 merge_fences (fence_t f, insn_t insn,
640 state_t state, deps_t dc, void *tc,
641 rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
642 int *ready_ticks, int ready_ticks_size,
643 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
645 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
647 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
648 && !sched_next && !FENCE_SCHED_NEXT (f));
650 /* Check if we can decide which path fences came.
651 If we can't (or don't want to) - reset all. */
652 if (last_scheduled_insn == NULL
653 || last_scheduled_insn_old == NULL
654 /* This is a case when INSN is reachable on several paths from
655 one insn (this can happen when pipelining of outer loops is on and
656 there are two edges: one going around of inner loop and the other -
657 right through it; in such case just reset everything). */
658 || last_scheduled_insn == last_scheduled_insn_old)
660 state_reset (FENCE_STATE (f));
661 state_free (state);
663 reset_deps_context (FENCE_DC (f));
664 delete_deps_context (dc);
666 reset_target_context (FENCE_TC (f), true);
667 delete_target_context (tc);
669 if (cycle > FENCE_CYCLE (f))
670 FENCE_CYCLE (f) = cycle;
672 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
673 FENCE_ISSUE_MORE (f) = issue_rate;
674 VEC_free (rtx, gc, executing_insns);
675 free (ready_ticks);
676 if (FENCE_EXECUTING_INSNS (f))
677 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
678 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
679 if (FENCE_READY_TICKS (f))
680 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
682 else
684 edge edge_old = NULL, edge_new = NULL;
685 edge candidate;
686 succ_iterator si;
687 insn_t succ;
689 /* Find fallthrough edge. */
690 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
691 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
693 if (!candidate
694 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
695 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
697 /* No fallthrough edge leading to basic block of INSN. */
698 state_reset (FENCE_STATE (f));
699 state_free (state);
701 reset_target_context (FENCE_TC (f), true);
702 delete_target_context (tc);
704 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
705 FENCE_ISSUE_MORE (f) = issue_rate;
707 else
708 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
710 /* Would be weird if same insn is successor of several fallthrough
711 edges. */
712 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
713 != BLOCK_FOR_INSN (last_scheduled_insn_old));
715 state_free (FENCE_STATE (f));
716 FENCE_STATE (f) = state;
718 delete_target_context (FENCE_TC (f));
719 FENCE_TC (f) = tc;
721 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
722 FENCE_ISSUE_MORE (f) = issue_more;
724 else
726 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
727 state_free (state);
728 delete_target_context (tc);
730 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
731 != BLOCK_FOR_INSN (last_scheduled_insn));
734 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
735 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
736 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
738 if (succ == insn)
740 /* No same successor allowed from several edges. */
741 gcc_assert (!edge_old);
742 edge_old = si.e1;
745 /* Find edge of second predecessor (last_scheduled_insn->insn). */
746 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
747 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
749 if (succ == insn)
751 /* No same successor allowed from several edges. */
752 gcc_assert (!edge_new);
753 edge_new = si.e1;
757 /* Check if we can choose most probable predecessor. */
758 if (edge_old == NULL || edge_new == NULL)
760 reset_deps_context (FENCE_DC (f));
761 delete_deps_context (dc);
762 VEC_free (rtx, gc, executing_insns);
763 free (ready_ticks);
765 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
766 if (FENCE_EXECUTING_INSNS (f))
767 VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
768 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
769 if (FENCE_READY_TICKS (f))
770 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
772 else
773 if (edge_new->probability > edge_old->probability)
775 delete_deps_context (FENCE_DC (f));
776 FENCE_DC (f) = dc;
777 VEC_free (rtx, gc, FENCE_EXECUTING_INSNS (f));
778 FENCE_EXECUTING_INSNS (f) = executing_insns;
779 free (FENCE_READY_TICKS (f));
780 FENCE_READY_TICKS (f) = ready_ticks;
781 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
782 FENCE_CYCLE (f) = cycle;
784 else
786 /* Leave DC and CYCLE untouched. */
787 delete_deps_context (dc);
788 VEC_free (rtx, gc, executing_insns);
789 free (ready_ticks);
793 /* Fill remaining invariant fields. */
794 if (after_stall_p)
795 FENCE_AFTER_STALL_P (f) = 1;
797 FENCE_ISSUED_INSNS (f) = 0;
798 FENCE_STARTS_CYCLE_P (f) = 1;
799 FENCE_SCHED_NEXT (f) = NULL;
802 /* Add a new fence to NEW_FENCES list, initializing it from all
803 other parameters. */
804 static void
805 add_to_fences (flist_tail_t new_fences, insn_t insn,
806 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
807 VEC(rtx, gc) *executing_insns, int *ready_ticks,
808 int ready_ticks_size, rtx sched_next, int cycle,
809 int cycle_issued_insns, int issue_rate,
810 bool starts_cycle_p, bool after_stall_p)
812 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
814 if (! f)
816 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
817 last_scheduled_insn, executing_insns, ready_ticks,
818 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
819 issue_rate, starts_cycle_p, after_stall_p);
821 FLIST_TAIL_TAILP (new_fences)
822 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
824 else
826 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
827 executing_insns, ready_ticks, ready_ticks_size,
828 sched_next, cycle, issue_rate, after_stall_p);
832 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
833 void
834 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
836 fence_t f, old;
837 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
839 old = FLIST_FENCE (old_fences);
840 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
841 FENCE_INSN (FLIST_FENCE (old_fences)));
842 if (f)
844 merge_fences (f, old->insn, old->state, old->dc, old->tc,
845 old->last_scheduled_insn, old->executing_insns,
846 old->ready_ticks, old->ready_ticks_size,
847 old->sched_next, old->cycle, old->issue_more,
848 old->after_stall_p);
850 else
852 _list_add (tailp);
853 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
854 *FLIST_FENCE (*tailp) = *old;
855 init_fence_for_scheduling (FLIST_FENCE (*tailp));
857 FENCE_INSN (old) = NULL;
860 /* Add a new fence to NEW_FENCES list and initialize most of its data
861 as a clean one. */
862 void
863 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
865 int ready_ticks_size = get_max_uid () + 1;
867 add_to_fences (new_fences,
868 succ, state_create (), create_deps_context (),
869 create_target_context (true),
870 NULL_RTX, NULL,
871 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
872 NULL_RTX, FENCE_CYCLE (fence) + 1,
873 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
876 /* Add a new fence to NEW_FENCES list and initialize all of its data
877 from FENCE and SUCC. */
878 void
879 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
881 int * new_ready_ticks
882 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
884 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
885 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
886 add_to_fences (new_fences,
887 succ, state_create_copy (FENCE_STATE (fence)),
888 create_copy_of_deps_context (FENCE_DC (fence)),
889 create_copy_of_target_context (FENCE_TC (fence)),
890 FENCE_LAST_SCHEDULED_INSN (fence),
891 VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
892 new_ready_ticks,
893 FENCE_READY_TICKS_SIZE (fence),
894 FENCE_SCHED_NEXT (fence),
895 FENCE_CYCLE (fence),
896 FENCE_ISSUED_INSNS (fence),
897 FENCE_ISSUE_MORE (fence),
898 FENCE_STARTS_CYCLE_P (fence),
899 FENCE_AFTER_STALL_P (fence));
903 /* Functions to work with regset and nop pools. */
905 /* Returns the new regset from pool. It might have some of the bits set
906 from the previous usage. */
907 regset
908 get_regset_from_pool (void)
910 regset rs;
912 if (regset_pool.n != 0)
913 rs = regset_pool.v[--regset_pool.n];
914 else
915 /* We need to create the regset. */
917 rs = ALLOC_REG_SET (&reg_obstack);
919 if (regset_pool.nn == regset_pool.ss)
920 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
921 (regset_pool.ss = 2 * regset_pool.ss + 1));
922 regset_pool.vv[regset_pool.nn++] = rs;
925 regset_pool.diff++;
927 return rs;
930 /* Same as above, but returns the empty regset. */
931 regset
932 get_clear_regset_from_pool (void)
934 regset rs = get_regset_from_pool ();
936 CLEAR_REG_SET (rs);
937 return rs;
940 /* Return regset RS to the pool for future use. */
941 void
942 return_regset_to_pool (regset rs)
944 gcc_assert (rs);
945 regset_pool.diff--;
947 if (regset_pool.n == regset_pool.s)
948 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
949 (regset_pool.s = 2 * regset_pool.s + 1));
950 regset_pool.v[regset_pool.n++] = rs;
953 #ifdef ENABLE_CHECKING
954 /* This is used as a qsort callback for sorting regset pool stacks.
955 X and XX are addresses of two regsets. They are never equal. */
956 static int
957 cmp_v_in_regset_pool (const void *x, const void *xx)
959 return *((const regset *) x) - *((const regset *) xx);
961 #endif
963 /* Free the regset pool possibly checking for memory leaks. */
964 void
965 free_regset_pool (void)
967 #ifdef ENABLE_CHECKING
969 regset *v = regset_pool.v;
970 int i = 0;
971 int n = regset_pool.n;
973 regset *vv = regset_pool.vv;
974 int ii = 0;
975 int nn = regset_pool.nn;
977 int diff = 0;
979 gcc_assert (n <= nn);
981 /* Sort both vectors so it will be possible to compare them. */
982 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
983 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
985 while (ii < nn)
987 if (v[i] == vv[ii])
988 i++;
989 else
990 /* VV[II] was lost. */
991 diff++;
993 ii++;
996 gcc_assert (diff == regset_pool.diff);
998 #endif
1000 /* If not true - we have a memory leak. */
1001 gcc_assert (regset_pool.diff == 0);
1003 while (regset_pool.n)
1005 --regset_pool.n;
1006 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1009 free (regset_pool.v);
1010 regset_pool.v = NULL;
1011 regset_pool.s = 0;
1013 free (regset_pool.vv);
1014 regset_pool.vv = NULL;
1015 regset_pool.nn = 0;
1016 regset_pool.ss = 0;
1018 regset_pool.diff = 0;
1022 /* Functions to work with nop pools. NOP insns are used as temporary
1023 placeholders of the insns being scheduled to allow correct update of
1024 the data sets. When update is finished, NOPs are deleted. */
1026 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1027 nops sel-sched generates. */
1028 static vinsn_t nop_vinsn = NULL;
1030 /* Emit a nop before INSN, taking it from pool. */
1031 insn_t
1032 get_nop_from_pool (insn_t insn)
1034 insn_t nop;
1035 bool old_p = nop_pool.n != 0;
1036 int flags;
1038 if (old_p)
1039 nop = nop_pool.v[--nop_pool.n];
1040 else
1041 nop = nop_pattern;
1043 nop = emit_insn_before (nop, insn);
1045 if (old_p)
1046 flags = INSN_INIT_TODO_SSID;
1047 else
1048 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1050 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1051 sel_init_new_insn (nop, flags);
1053 return nop;
1056 /* Remove NOP from the instruction stream and return it to the pool. */
1057 void
1058 return_nop_to_pool (insn_t nop, bool full_tidying)
1060 gcc_assert (INSN_IN_STREAM_P (nop));
1061 sel_remove_insn (nop, false, full_tidying);
1063 if (nop_pool.n == nop_pool.s)
1064 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1065 (nop_pool.s = 2 * nop_pool.s + 1));
1066 nop_pool.v[nop_pool.n++] = nop;
1069 /* Free the nop pool. */
1070 void
1071 free_nop_pool (void)
1073 nop_pool.n = 0;
1074 nop_pool.s = 0;
1075 free (nop_pool.v);
1076 nop_pool.v = NULL;
1080 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1081 The callback is given two rtxes XX and YY and writes the new rtxes
1082 to NX and NY in case some needs to be skipped. */
1083 static int
1084 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1086 const_rtx x = *xx;
1087 const_rtx y = *yy;
1089 if (GET_CODE (x) == UNSPEC
1090 && (targetm.sched.skip_rtx_p == NULL
1091 || targetm.sched.skip_rtx_p (x)))
1093 *nx = XVECEXP (x, 0, 0);
1094 *ny = CONST_CAST_RTX (y);
1095 return 1;
1098 if (GET_CODE (y) == UNSPEC
1099 && (targetm.sched.skip_rtx_p == NULL
1100 || targetm.sched.skip_rtx_p (y)))
1102 *nx = CONST_CAST_RTX (x);
1103 *ny = XVECEXP (y, 0, 0);
1104 return 1;
1107 return 0;
1110 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1111 to support ia64 speculation. When changes are needed, new rtx X and new mode
1112 NMODE are written, and the callback returns true. */
1113 static int
1114 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1115 rtx *nx, enum machine_mode* nmode)
1117 if (GET_CODE (x) == UNSPEC
1118 && targetm.sched.skip_rtx_p
1119 && targetm.sched.skip_rtx_p (x))
1121 *nx = XVECEXP (x, 0 ,0);
1122 *nmode = VOIDmode;
1123 return 1;
1126 return 0;
1129 /* Returns LHS and RHS are ok to be scheduled separately. */
1130 static bool
1131 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1133 if (lhs == NULL || rhs == NULL)
1134 return false;
1136 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1137 to use reg, if const can be used. Moreover, scheduling const as rhs may
1138 lead to mode mismatch cause consts don't have modes but they could be
1139 merged from branches where the same const used in different modes. */
1140 if (CONSTANT_P (rhs))
1141 return false;
1143 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1144 if (COMPARISON_P (rhs))
1145 return false;
1147 /* Do not allow single REG to be an rhs. */
1148 if (REG_P (rhs))
1149 return false;
1151 /* See comment at find_used_regs_1 (*1) for explanation of this
1152 restriction. */
1153 /* FIXME: remove this later. */
1154 if (MEM_P (lhs))
1155 return false;
1157 /* This will filter all tricky things like ZERO_EXTRACT etc.
1158 For now we don't handle it. */
1159 if (!REG_P (lhs) && !MEM_P (lhs))
1160 return false;
1162 return true;
1165 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1166 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1167 used e.g. for insns from recovery blocks. */
1168 static void
1169 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1171 hash_rtx_callback_function hrcf;
1172 int insn_class;
1174 VINSN_INSN_RTX (vi) = insn;
1175 VINSN_COUNT (vi) = 0;
1176 vi->cost = -1;
1178 if (INSN_NOP_P (insn))
1179 return;
1181 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1182 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1183 else
1184 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1186 /* Hash vinsn depending on whether it is separable or not. */
1187 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1188 if (VINSN_SEPARABLE_P (vi))
1190 rtx rhs = VINSN_RHS (vi);
1192 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1193 NULL, NULL, false, hrcf);
1194 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1195 VOIDmode, NULL, NULL,
1196 false, hrcf);
1198 else
1200 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1201 NULL, NULL, false, hrcf);
1202 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1205 insn_class = haifa_classify_insn (insn);
1206 if (insn_class >= 2
1207 && (!targetm.sched.get_insn_spec_ds
1208 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1209 == 0)))
1210 VINSN_MAY_TRAP_P (vi) = true;
1211 else
1212 VINSN_MAY_TRAP_P (vi) = false;
1215 /* Indicate that VI has become the part of an rtx object. */
1216 void
1217 vinsn_attach (vinsn_t vi)
1219 /* Assert that VI is not pending for deletion. */
1220 gcc_assert (VINSN_INSN_RTX (vi));
1222 VINSN_COUNT (vi)++;
1225 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1226 VINSN_TYPE (VI). */
1227 static vinsn_t
1228 vinsn_create (insn_t insn, bool force_unique_p)
1230 vinsn_t vi = XCNEW (struct vinsn_def);
1232 vinsn_init (vi, insn, force_unique_p);
1233 return vi;
1236 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1237 the copy. */
1238 vinsn_t
1239 vinsn_copy (vinsn_t vi, bool reattach_p)
1241 rtx copy;
1242 bool unique = VINSN_UNIQUE_P (vi);
1243 vinsn_t new_vi;
1245 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1246 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1247 if (reattach_p)
1249 vinsn_detach (vi);
1250 vinsn_attach (new_vi);
1253 return new_vi;
1256 /* Delete the VI vinsn and free its data. */
1257 static void
1258 vinsn_delete (vinsn_t vi)
1260 gcc_assert (VINSN_COUNT (vi) == 0);
1262 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1264 return_regset_to_pool (VINSN_REG_SETS (vi));
1265 return_regset_to_pool (VINSN_REG_USES (vi));
1266 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1269 free (vi);
1272 /* Indicate that VI is no longer a part of some rtx object.
1273 Remove VI if it is no longer needed. */
1274 void
1275 vinsn_detach (vinsn_t vi)
1277 gcc_assert (VINSN_COUNT (vi) > 0);
1279 if (--VINSN_COUNT (vi) == 0)
1280 vinsn_delete (vi);
1283 /* Returns TRUE if VI is a branch. */
1284 bool
1285 vinsn_cond_branch_p (vinsn_t vi)
1287 insn_t insn;
1289 if (!VINSN_UNIQUE_P (vi))
1290 return false;
1292 insn = VINSN_INSN_RTX (vi);
1293 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1294 return false;
1296 return control_flow_insn_p (insn);
1299 /* Return latency of INSN. */
1300 static int
1301 sel_insn_rtx_cost (rtx insn)
1303 int cost;
1305 /* A USE insn, or something else we don't need to
1306 understand. We can't pass these directly to
1307 result_ready_cost or insn_default_latency because it will
1308 trigger a fatal error for unrecognizable insns. */
1309 if (recog_memoized (insn) < 0)
1310 cost = 0;
1311 else
1313 cost = insn_default_latency (insn);
1315 if (cost < 0)
1316 cost = 0;
1319 return cost;
1322 /* Return the cost of the VI.
1323 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1325 sel_vinsn_cost (vinsn_t vi)
1327 int cost = vi->cost;
1329 if (cost < 0)
1331 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1332 vi->cost = cost;
1335 return cost;
1339 /* Functions for insn emitting. */
1341 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1342 EXPR and SEQNO. */
1343 insn_t
1344 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1346 insn_t new_insn;
1348 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1350 new_insn = emit_insn_after (pattern, after);
1351 set_insn_init (expr, NULL, seqno);
1352 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1354 return new_insn;
1357 /* Force newly generated vinsns to be unique. */
1358 static bool init_insn_force_unique_p = false;
1360 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1361 initialize its data from EXPR and SEQNO. */
1362 insn_t
1363 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1364 insn_t after)
1366 insn_t insn;
1368 gcc_assert (!init_insn_force_unique_p);
1370 init_insn_force_unique_p = true;
1371 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1372 CANT_MOVE (insn) = 1;
1373 init_insn_force_unique_p = false;
1375 return insn;
1378 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1379 take it as a new vinsn instead of EXPR's vinsn.
1380 We simplify insns later, after scheduling region in
1381 simplify_changed_insns. */
1382 insn_t
1383 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1384 insn_t after)
1386 expr_t emit_expr;
1387 insn_t insn;
1388 int flags;
1390 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1391 seqno);
1392 insn = EXPR_INSN_RTX (emit_expr);
1393 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1395 flags = INSN_INIT_TODO_SSID;
1396 if (INSN_LUID (insn) == 0)
1397 flags |= INSN_INIT_TODO_LUID;
1398 sel_init_new_insn (insn, flags);
1400 return insn;
1403 /* Move insn from EXPR after AFTER. */
1404 insn_t
1405 sel_move_insn (expr_t expr, int seqno, insn_t after)
1407 insn_t insn = EXPR_INSN_RTX (expr);
1408 basic_block bb = BLOCK_FOR_INSN (after);
1409 insn_t next = NEXT_INSN (after);
1411 /* Assert that in move_op we disconnected this insn properly. */
1412 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1413 PREV_INSN (insn) = after;
1414 NEXT_INSN (insn) = next;
1416 NEXT_INSN (after) = insn;
1417 PREV_INSN (next) = insn;
1419 /* Update links from insn to bb and vice versa. */
1420 df_insn_change_bb (insn, bb);
1421 if (BB_END (bb) == after)
1422 BB_END (bb) = insn;
1424 prepare_insn_expr (insn, seqno);
1425 return insn;
1429 /* Functions to work with right-hand sides. */
1431 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1432 VECT and return true when found. Use NEW_VINSN for comparison only when
1433 COMPARE_VINSNS is true. Write to INDP the index on which
1434 the search has stopped, such that inserting the new element at INDP will
1435 retain VECT's sort order. */
1436 static bool
1437 find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
1438 unsigned uid, vinsn_t new_vinsn,
1439 bool compare_vinsns, int *indp)
1441 expr_history_def *arr;
1442 int i, j, len = VEC_length (expr_history_def, vect);
1444 if (len == 0)
1446 *indp = 0;
1447 return false;
1450 arr = VEC_address (expr_history_def, vect);
1451 i = 0, j = len - 1;
1453 while (i <= j)
1455 unsigned auid = arr[i].uid;
1456 vinsn_t avinsn = arr[i].new_expr_vinsn;
1458 if (auid == uid
1459 /* When undoing transformation on a bookkeeping copy, the new vinsn
1460 may not be exactly equal to the one that is saved in the vector.
1461 This is because the insn whose copy we're checking was possibly
1462 substituted itself. */
1463 && (! compare_vinsns
1464 || vinsn_equal_p (avinsn, new_vinsn)))
1466 *indp = i;
1467 return true;
1469 else if (auid > uid)
1470 break;
1471 i++;
1474 *indp = i;
1475 return false;
1478 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1479 the position found or -1, if no such value is in vector.
1480 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1482 find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
1483 vinsn_t new_vinsn, bool originators_p)
1485 int ind;
1487 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1488 false, &ind))
1489 return ind;
1491 if (INSN_ORIGINATORS (insn) && originators_p)
1493 unsigned uid;
1494 bitmap_iterator bi;
1496 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1497 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1498 return ind;
1501 return -1;
1504 /* Insert new element in a sorted history vector pointed to by PVECT,
1505 if it is not there already. The element is searched using
1506 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1507 the history of a transformation. */
1508 void
1509 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
1510 unsigned uid, enum local_trans_type type,
1511 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1512 ds_t spec_ds)
1514 VEC(expr_history_def, heap) *vect = *pvect;
1515 expr_history_def temp;
1516 bool res;
1517 int ind;
1519 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1521 if (res)
1523 expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
1525 /* It is possible that speculation types of expressions that were
1526 propagated through different paths will be different here. In this
1527 case, merge the status to get the correct check later. */
1528 if (phist->spec_ds != spec_ds)
1529 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1530 return;
1533 temp.uid = uid;
1534 temp.old_expr_vinsn = old_expr_vinsn;
1535 temp.new_expr_vinsn = new_expr_vinsn;
1536 temp.spec_ds = spec_ds;
1537 temp.type = type;
1539 vinsn_attach (old_expr_vinsn);
1540 vinsn_attach (new_expr_vinsn);
1541 VEC_safe_insert (expr_history_def, heap, vect, ind, &temp);
1542 *pvect = vect;
1545 /* Free history vector PVECT. */
1546 static void
1547 free_history_vect (VEC (expr_history_def, heap) **pvect)
1549 unsigned i;
1550 expr_history_def *phist;
1552 if (! *pvect)
1553 return;
1555 for (i = 0;
1556 VEC_iterate (expr_history_def, *pvect, i, phist);
1557 i++)
1559 vinsn_detach (phist->old_expr_vinsn);
1560 vinsn_detach (phist->new_expr_vinsn);
1563 VEC_free (expr_history_def, heap, *pvect);
1564 *pvect = NULL;
1567 /* Merge vector FROM to PVECT. */
1568 static void
1569 merge_history_vect (VEC (expr_history_def, heap) **pvect,
1570 VEC (expr_history_def, heap) *from)
1572 expr_history_def *phist;
1573 int i;
1575 /* We keep this vector sorted. */
1576 for (i = 0; VEC_iterate (expr_history_def, from, i, phist); i++)
1577 insert_in_history_vect (pvect, phist->uid, phist->type,
1578 phist->old_expr_vinsn, phist->new_expr_vinsn,
1579 phist->spec_ds);
1582 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1583 bool
1584 vinsn_equal_p (vinsn_t x, vinsn_t y)
1586 rtx_equal_p_callback_function repcf;
1588 if (x == y)
1589 return true;
1591 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1592 return false;
1594 if (VINSN_HASH (x) != VINSN_HASH (y))
1595 return false;
1597 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1598 if (VINSN_SEPARABLE_P (x))
1600 /* Compare RHSes of VINSNs. */
1601 gcc_assert (VINSN_RHS (x));
1602 gcc_assert (VINSN_RHS (y));
1604 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1607 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1611 /* Functions for working with expressions. */
1613 /* Initialize EXPR. */
1614 static void
1615 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1616 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1617 ds_t spec_to_check_ds, int orig_sched_cycle,
1618 VEC(expr_history_def, heap) *history, signed char target_available,
1619 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1620 bool cant_move)
1622 vinsn_attach (vi);
1624 EXPR_VINSN (expr) = vi;
1625 EXPR_SPEC (expr) = spec;
1626 EXPR_USEFULNESS (expr) = use;
1627 EXPR_PRIORITY (expr) = priority;
1628 EXPR_PRIORITY_ADJ (expr) = 0;
1629 EXPR_SCHED_TIMES (expr) = sched_times;
1630 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1631 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1632 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1633 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1635 if (history)
1636 EXPR_HISTORY_OF_CHANGES (expr) = history;
1637 else
1638 EXPR_HISTORY_OF_CHANGES (expr) = NULL;
1640 EXPR_TARGET_AVAILABLE (expr) = target_available;
1641 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1642 EXPR_WAS_RENAMED (expr) = was_renamed;
1643 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1644 EXPR_CANT_MOVE (expr) = cant_move;
1647 /* Make a copy of the expr FROM into the expr TO. */
1648 void
1649 copy_expr (expr_t to, expr_t from)
1651 VEC(expr_history_def, heap) *temp = NULL;
1653 if (EXPR_HISTORY_OF_CHANGES (from))
1655 unsigned i;
1656 expr_history_def *phist;
1658 temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
1659 for (i = 0;
1660 VEC_iterate (expr_history_def, temp, i, phist);
1661 i++)
1663 vinsn_attach (phist->old_expr_vinsn);
1664 vinsn_attach (phist->new_expr_vinsn);
1668 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1669 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1670 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1671 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1672 EXPR_ORIG_SCHED_CYCLE (from), temp,
1673 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1674 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1675 EXPR_CANT_MOVE (from));
1678 /* Same, but the final expr will not ever be in av sets, so don't copy
1679 "uninteresting" data such as bitmap cache. */
1680 void
1681 copy_expr_onside (expr_t to, expr_t from)
1683 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1684 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1685 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
1686 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1687 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1688 EXPR_CANT_MOVE (from));
1691 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1692 initializing new insns. */
1693 static void
1694 prepare_insn_expr (insn_t insn, int seqno)
1696 expr_t expr = INSN_EXPR (insn);
1697 ds_t ds;
1699 INSN_SEQNO (insn) = seqno;
1700 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1701 EXPR_SPEC (expr) = 0;
1702 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1703 EXPR_WAS_SUBSTITUTED (expr) = 0;
1704 EXPR_WAS_RENAMED (expr) = 0;
1705 EXPR_TARGET_AVAILABLE (expr) = 1;
1706 INSN_LIVE_VALID_P (insn) = false;
1708 /* ??? If this expression is speculative, make its dependence
1709 as weak as possible. We can filter this expression later
1710 in process_spec_exprs, because we do not distinguish
1711 between the status we got during compute_av_set and the
1712 existing status. To be fixed. */
1713 ds = EXPR_SPEC_DONE_DS (expr);
1714 if (ds)
1715 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1717 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1720 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1721 is non-null when expressions are merged from different successors at
1722 a split point. */
1723 static void
1724 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1726 if (EXPR_TARGET_AVAILABLE (to) < 0
1727 || EXPR_TARGET_AVAILABLE (from) < 0)
1728 EXPR_TARGET_AVAILABLE (to) = -1;
1729 else
1731 /* We try to detect the case when one of the expressions
1732 can only be reached through another one. In this case,
1733 we can do better. */
1734 if (split_point == NULL)
1736 int toind, fromind;
1738 toind = EXPR_ORIG_BB_INDEX (to);
1739 fromind = EXPR_ORIG_BB_INDEX (from);
1741 if (toind && toind == fromind)
1742 /* Do nothing -- everything is done in
1743 merge_with_other_exprs. */
1745 else
1746 EXPR_TARGET_AVAILABLE (to) = -1;
1748 else
1749 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1753 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1754 is non-null when expressions are merged from different successors at
1755 a split point. */
1756 static void
1757 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1759 ds_t old_to_ds, old_from_ds;
1761 old_to_ds = EXPR_SPEC_DONE_DS (to);
1762 old_from_ds = EXPR_SPEC_DONE_DS (from);
1764 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1765 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1766 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1768 /* When merging e.g. control & data speculative exprs, or a control
1769 speculative with a control&data speculative one, we really have
1770 to change vinsn too. Also, when speculative status is changed,
1771 we also need to record this as a transformation in expr's history. */
1772 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1774 old_to_ds = ds_get_speculation_types (old_to_ds);
1775 old_from_ds = ds_get_speculation_types (old_from_ds);
1777 if (old_to_ds != old_from_ds)
1779 ds_t record_ds;
1781 /* When both expressions are speculative, we need to change
1782 the vinsn first. */
1783 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1785 int res;
1787 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1788 gcc_assert (res >= 0);
1791 if (split_point != NULL)
1793 /* Record the change with proper status. */
1794 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1795 record_ds &= ~(old_to_ds & SPECULATIVE);
1796 record_ds &= ~(old_from_ds & SPECULATIVE);
1798 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1799 INSN_UID (split_point), TRANS_SPECULATION,
1800 EXPR_VINSN (from), EXPR_VINSN (to),
1801 record_ds);
1808 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1809 this is done along different paths. */
1810 void
1811 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1813 /* Choose the maximum of the specs of merged exprs. This is required
1814 for correctness of bookkeeping. */
1815 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1816 EXPR_SPEC (to) = EXPR_SPEC (from);
1818 if (split_point)
1819 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1820 else
1821 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1822 EXPR_USEFULNESS (from));
1824 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1825 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1827 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1828 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1830 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1831 EXPR_ORIG_BB_INDEX (to) = 0;
1833 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1834 EXPR_ORIG_SCHED_CYCLE (from));
1836 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1837 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1838 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1840 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1841 EXPR_HISTORY_OF_CHANGES (from));
1842 update_target_availability (to, from, split_point);
1843 update_speculative_bits (to, from, split_point);
1846 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1847 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1848 are merged from different successors at a split point. */
1849 void
1850 merge_expr (expr_t to, expr_t from, insn_t split_point)
1852 vinsn_t to_vi = EXPR_VINSN (to);
1853 vinsn_t from_vi = EXPR_VINSN (from);
1855 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1857 /* Make sure that speculative pattern is propagated into exprs that
1858 have non-speculative one. This will provide us with consistent
1859 speculative bits and speculative patterns inside expr. */
1860 if (EXPR_SPEC_DONE_DS (to) == 0
1861 && EXPR_SPEC_DONE_DS (from) != 0)
1862 change_vinsn_in_expr (to, EXPR_VINSN (from));
1864 merge_expr_data (to, from, split_point);
1865 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1868 /* Clear the information of this EXPR. */
1869 void
1870 clear_expr (expr_t expr)
1873 vinsn_detach (EXPR_VINSN (expr));
1874 EXPR_VINSN (expr) = NULL;
1876 free_history_vect (&EXPR_HISTORY_OF_CHANGES (expr));
1879 /* For a given LV_SET, mark EXPR having unavailable target register. */
1880 static void
1881 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1883 if (EXPR_SEPARABLE_P (expr))
1885 if (REG_P (EXPR_LHS (expr))
1886 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1888 /* If it's an insn like r1 = use (r1, ...), and it exists in
1889 different forms in each of the av_sets being merged, we can't say
1890 whether original destination register is available or not.
1891 However, this still works if destination register is not used
1892 in the original expression: if the branch at which LV_SET we're
1893 looking here is not actually 'other branch' in sense that same
1894 expression is available through it (but it can't be determined
1895 at computation stage because of transformations on one of the
1896 branches), it still won't affect the availability.
1897 Liveness of a register somewhere on a code motion path means
1898 it's either read somewhere on a codemotion path, live on
1899 'other' branch, live at the point immediately following
1900 the original operation, or is read by the original operation.
1901 The latter case is filtered out in the condition below.
1902 It still doesn't cover the case when register is defined and used
1903 somewhere within the code motion path, and in this case we could
1904 miss a unifying code motion along both branches using a renamed
1905 register, but it won't affect a code correctness since upon
1906 an actual code motion a bookkeeping code would be generated. */
1907 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1908 EXPR_LHS (expr)))
1909 EXPR_TARGET_AVAILABLE (expr) = -1;
1910 else
1911 EXPR_TARGET_AVAILABLE (expr) = false;
1914 else
1916 unsigned regno;
1917 reg_set_iterator rsi;
1919 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1920 0, regno, rsi)
1921 if (bitmap_bit_p (lv_set, regno))
1923 EXPR_TARGET_AVAILABLE (expr) = false;
1924 break;
1927 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1928 0, regno, rsi)
1929 if (bitmap_bit_p (lv_set, regno))
1931 EXPR_TARGET_AVAILABLE (expr) = false;
1932 break;
1937 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1938 or dependence status have changed, 2 when also the target register
1939 became unavailable, 0 if nothing had to be changed. */
1941 speculate_expr (expr_t expr, ds_t ds)
1943 int res;
1944 rtx orig_insn_rtx;
1945 rtx spec_pat;
1946 ds_t target_ds, current_ds;
1948 /* Obtain the status we need to put on EXPR. */
1949 target_ds = (ds & SPECULATIVE);
1950 current_ds = EXPR_SPEC_DONE_DS (expr);
1951 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1953 orig_insn_rtx = EXPR_INSN_RTX (expr);
1955 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1957 switch (res)
1959 case 0:
1960 EXPR_SPEC_DONE_DS (expr) = ds;
1961 return current_ds != ds ? 1 : 0;
1963 case 1:
1965 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1966 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1968 change_vinsn_in_expr (expr, spec_vinsn);
1969 EXPR_SPEC_DONE_DS (expr) = ds;
1970 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1972 /* Do not allow clobbering the address register of speculative
1973 insns. */
1974 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1975 expr_dest_reg (expr)))
1977 EXPR_TARGET_AVAILABLE (expr) = false;
1978 return 2;
1981 return 1;
1984 case -1:
1985 return -1;
1987 default:
1988 gcc_unreachable ();
1989 return -1;
1993 /* Return a destination register, if any, of EXPR. */
1995 expr_dest_reg (expr_t expr)
1997 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
1999 if (dest != NULL_RTX && REG_P (dest))
2000 return dest;
2002 return NULL_RTX;
2005 /* Returns the REGNO of the R's destination. */
2006 unsigned
2007 expr_dest_regno (expr_t expr)
2009 rtx dest = expr_dest_reg (expr);
2011 gcc_assert (dest != NULL_RTX);
2012 return REGNO (dest);
2015 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2016 AV_SET having unavailable target register. */
2017 void
2018 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2020 expr_t expr;
2021 av_set_iterator avi;
2023 FOR_EACH_EXPR (expr, avi, join_set)
2024 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2025 set_unavailable_target_for_expr (expr, lv_set);
2029 /* Returns true if REG (at least partially) is present in REGS. */
2030 bool
2031 register_unavailable_p (regset regs, rtx reg)
2033 unsigned regno, end_regno;
2035 regno = REGNO (reg);
2036 if (bitmap_bit_p (regs, regno))
2037 return true;
2039 end_regno = END_REGNO (reg);
2041 while (++regno < end_regno)
2042 if (bitmap_bit_p (regs, regno))
2043 return true;
2045 return false;
2048 /* Av set functions. */
2050 /* Add a new element to av set SETP.
2051 Return the element added. */
2052 static av_set_t
2053 av_set_add_element (av_set_t *setp)
2055 /* Insert at the beginning of the list. */
2056 _list_add (setp);
2057 return *setp;
2060 /* Add EXPR to SETP. */
2061 void
2062 av_set_add (av_set_t *setp, expr_t expr)
2064 av_set_t elem;
2066 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2067 elem = av_set_add_element (setp);
2068 copy_expr (_AV_SET_EXPR (elem), expr);
2071 /* Same, but do not copy EXPR. */
2072 static void
2073 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2075 av_set_t elem;
2077 elem = av_set_add_element (setp);
2078 *_AV_SET_EXPR (elem) = *expr;
2081 /* Remove expr pointed to by IP from the av_set. */
2082 void
2083 av_set_iter_remove (av_set_iterator *ip)
2085 clear_expr (_AV_SET_EXPR (*ip->lp));
2086 _list_iter_remove (ip);
2089 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2090 sense of vinsn_equal_p function. Return NULL if no such expr is
2091 in SET was found. */
2092 expr_t
2093 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2095 expr_t expr;
2096 av_set_iterator i;
2098 FOR_EACH_EXPR (expr, i, set)
2099 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2100 return expr;
2101 return NULL;
2104 /* Same, but also remove the EXPR found. */
2105 static expr_t
2106 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2108 expr_t expr;
2109 av_set_iterator i;
2111 FOR_EACH_EXPR_1 (expr, i, setp)
2112 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2114 _list_iter_remove_nofree (&i);
2115 return expr;
2117 return NULL;
2120 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2121 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2122 Returns NULL if no such expr is in SET was found. */
2123 static expr_t
2124 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2126 expr_t cur_expr;
2127 av_set_iterator i;
2129 FOR_EACH_EXPR (cur_expr, i, set)
2131 if (cur_expr == expr)
2132 continue;
2133 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2134 return cur_expr;
2137 return NULL;
2140 /* If other expression is already in AVP, remove one of them. */
2141 expr_t
2142 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2144 expr_t expr2;
2146 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2147 if (expr2 != NULL)
2149 /* Reset target availability on merge, since taking it only from one
2150 of the exprs would be controversial for different code. */
2151 EXPR_TARGET_AVAILABLE (expr2) = -1;
2152 EXPR_USEFULNESS (expr2) = 0;
2154 merge_expr (expr2, expr, NULL);
2156 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2157 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2159 av_set_iter_remove (ip);
2160 return expr2;
2163 return expr;
2166 /* Return true if there is an expr that correlates to VI in SET. */
2167 bool
2168 av_set_is_in_p (av_set_t set, vinsn_t vi)
2170 return av_set_lookup (set, vi) != NULL;
2173 /* Return a copy of SET. */
2174 av_set_t
2175 av_set_copy (av_set_t set)
2177 expr_t expr;
2178 av_set_iterator i;
2179 av_set_t res = NULL;
2181 FOR_EACH_EXPR (expr, i, set)
2182 av_set_add (&res, expr);
2184 return res;
2187 /* Join two av sets that do not have common elements by attaching second set
2188 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2189 _AV_SET_NEXT of first set's last element). */
2190 static void
2191 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2193 gcc_assert (*to_tailp == NULL);
2194 *to_tailp = *fromp;
2195 *fromp = NULL;
2198 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2199 pointed to by FROMP afterwards. */
2200 void
2201 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2203 expr_t expr1;
2204 av_set_iterator i;
2206 /* Delete from TOP all exprs, that present in FROMP. */
2207 FOR_EACH_EXPR_1 (expr1, i, top)
2209 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2211 if (expr2)
2213 merge_expr (expr2, expr1, insn);
2214 av_set_iter_remove (&i);
2218 join_distinct_sets (i.lp, fromp);
2221 /* Same as above, but also update availability of target register in
2222 TOP judging by TO_LV_SET and FROM_LV_SET. */
2223 void
2224 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2225 regset from_lv_set, insn_t insn)
2227 expr_t expr1;
2228 av_set_iterator i;
2229 av_set_t *to_tailp, in_both_set = NULL;
2231 /* Delete from TOP all expres, that present in FROMP. */
2232 FOR_EACH_EXPR_1 (expr1, i, top)
2234 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2236 if (expr2)
2238 /* It may be that the expressions have different destination
2239 registers, in which case we need to check liveness here. */
2240 if (EXPR_SEPARABLE_P (expr1))
2242 int regno1 = (REG_P (EXPR_LHS (expr1))
2243 ? (int) expr_dest_regno (expr1) : -1);
2244 int regno2 = (REG_P (EXPR_LHS (expr2))
2245 ? (int) expr_dest_regno (expr2) : -1);
2247 /* ??? We don't have a way to check restrictions for
2248 *other* register on the current path, we did it only
2249 for the current target register. Give up. */
2250 if (regno1 != regno2)
2251 EXPR_TARGET_AVAILABLE (expr2) = -1;
2253 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2254 EXPR_TARGET_AVAILABLE (expr2) = -1;
2256 merge_expr (expr2, expr1, insn);
2257 av_set_add_nocopy (&in_both_set, expr2);
2258 av_set_iter_remove (&i);
2260 else
2261 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2262 FROM_LV_SET. */
2263 set_unavailable_target_for_expr (expr1, from_lv_set);
2265 to_tailp = i.lp;
2267 /* These expressions are not present in TOP. Check liveness
2268 restrictions on TO_LV_SET. */
2269 FOR_EACH_EXPR (expr1, i, *fromp)
2270 set_unavailable_target_for_expr (expr1, to_lv_set);
2272 join_distinct_sets (i.lp, &in_both_set);
2273 join_distinct_sets (to_tailp, fromp);
2276 /* Clear av_set pointed to by SETP. */
2277 void
2278 av_set_clear (av_set_t *setp)
2280 expr_t expr;
2281 av_set_iterator i;
2283 FOR_EACH_EXPR_1 (expr, i, setp)
2284 av_set_iter_remove (&i);
2286 gcc_assert (*setp == NULL);
2289 /* Leave only one non-speculative element in the SETP. */
2290 void
2291 av_set_leave_one_nonspec (av_set_t *setp)
2293 expr_t expr;
2294 av_set_iterator i;
2295 bool has_one_nonspec = false;
2297 /* Keep all speculative exprs, and leave one non-speculative
2298 (the first one). */
2299 FOR_EACH_EXPR_1 (expr, i, setp)
2301 if (!EXPR_SPEC_DONE_DS (expr))
2303 if (has_one_nonspec)
2304 av_set_iter_remove (&i);
2305 else
2306 has_one_nonspec = true;
2311 /* Return the N'th element of the SET. */
2312 expr_t
2313 av_set_element (av_set_t set, int n)
2315 expr_t expr;
2316 av_set_iterator i;
2318 FOR_EACH_EXPR (expr, i, set)
2319 if (n-- == 0)
2320 return expr;
2322 gcc_unreachable ();
2323 return NULL;
2326 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2327 void
2328 av_set_substract_cond_branches (av_set_t *avp)
2330 av_set_iterator i;
2331 expr_t expr;
2333 FOR_EACH_EXPR_1 (expr, i, avp)
2334 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2335 av_set_iter_remove (&i);
2338 /* Multiplies usefulness attribute of each member of av-set *AVP by
2339 value PROB / ALL_PROB. */
2340 void
2341 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2343 av_set_iterator i;
2344 expr_t expr;
2346 FOR_EACH_EXPR (expr, i, av)
2347 EXPR_USEFULNESS (expr) = (all_prob
2348 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2349 : 0);
2352 /* Leave in AVP only those expressions, which are present in AV,
2353 and return it, merging history expressions. */
2354 void
2355 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2357 av_set_iterator i;
2358 expr_t expr, expr2;
2360 FOR_EACH_EXPR_1 (expr, i, avp)
2361 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2362 av_set_iter_remove (&i);
2363 else
2364 /* When updating av sets in bookkeeping blocks, we can add more insns
2365 there which will be transformed but the upper av sets will not
2366 reflect those transformations. We then fail to undo those
2367 when searching for such insns. So merge the history saved
2368 in the av set of the block we are processing. */
2369 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2370 EXPR_HISTORY_OF_CHANGES (expr2));
2375 /* Dependence hooks to initialize insn data. */
2377 /* This is used in hooks callable from dependence analysis when initializing
2378 instruction's data. */
2379 static struct
2381 /* Where the dependence was found (lhs/rhs). */
2382 deps_where_t where;
2384 /* The actual data object to initialize. */
2385 idata_t id;
2387 /* True when the insn should not be made clonable. */
2388 bool force_unique_p;
2390 /* True when insn should be treated as of type USE, i.e. never renamed. */
2391 bool force_use_p;
2392 } deps_init_id_data;
2395 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2396 clonable. */
2397 static void
2398 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2400 int type;
2402 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2403 That clonable insns which can be separated into lhs and rhs have type SET.
2404 Other clonable insns have type USE. */
2405 type = GET_CODE (insn);
2407 /* Only regular insns could be cloned. */
2408 if (type == INSN && !force_unique_p)
2409 type = SET;
2410 else if (type == JUMP_INSN && simplejump_p (insn))
2411 type = PC;
2412 else if (type == DEBUG_INSN)
2413 type = !force_unique_p ? USE : INSN;
2415 IDATA_TYPE (id) = type;
2416 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2417 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2418 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2421 /* Start initializing insn data. */
2422 static void
2423 deps_init_id_start_insn (insn_t insn)
2425 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2427 setup_id_for_insn (deps_init_id_data.id, insn,
2428 deps_init_id_data.force_unique_p);
2429 deps_init_id_data.where = DEPS_IN_INSN;
2432 /* Start initializing lhs data. */
2433 static void
2434 deps_init_id_start_lhs (rtx lhs)
2436 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2437 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2439 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2441 IDATA_LHS (deps_init_id_data.id) = lhs;
2442 deps_init_id_data.where = DEPS_IN_LHS;
2446 /* Finish initializing lhs data. */
2447 static void
2448 deps_init_id_finish_lhs (void)
2450 deps_init_id_data.where = DEPS_IN_INSN;
2453 /* Note a set of REGNO. */
2454 static void
2455 deps_init_id_note_reg_set (int regno)
2457 haifa_note_reg_set (regno);
2459 if (deps_init_id_data.where == DEPS_IN_RHS)
2460 deps_init_id_data.force_use_p = true;
2462 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2463 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2465 #ifdef STACK_REGS
2466 /* Make instructions that set stack registers to be ineligible for
2467 renaming to avoid issues with find_used_regs. */
2468 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2469 deps_init_id_data.force_use_p = true;
2470 #endif
2473 /* Note a clobber of REGNO. */
2474 static void
2475 deps_init_id_note_reg_clobber (int regno)
2477 haifa_note_reg_clobber (regno);
2479 if (deps_init_id_data.where == DEPS_IN_RHS)
2480 deps_init_id_data.force_use_p = true;
2482 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2483 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2486 /* Note a use of REGNO. */
2487 static void
2488 deps_init_id_note_reg_use (int regno)
2490 haifa_note_reg_use (regno);
2492 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2493 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2496 /* Start initializing rhs data. */
2497 static void
2498 deps_init_id_start_rhs (rtx rhs)
2500 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2502 /* And there was no sel_deps_reset_to_insn (). */
2503 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2505 IDATA_RHS (deps_init_id_data.id) = rhs;
2506 deps_init_id_data.where = DEPS_IN_RHS;
2510 /* Finish initializing rhs data. */
2511 static void
2512 deps_init_id_finish_rhs (void)
2514 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2515 || deps_init_id_data.where == DEPS_IN_INSN);
2516 deps_init_id_data.where = DEPS_IN_INSN;
2519 /* Finish initializing insn data. */
2520 static void
2521 deps_init_id_finish_insn (void)
2523 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2525 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2527 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2528 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2530 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2531 || deps_init_id_data.force_use_p)
2533 /* This should be a USE, as we don't want to schedule its RHS
2534 separately. However, we still want to have them recorded
2535 for the purposes of substitution. That's why we don't
2536 simply call downgrade_to_use () here. */
2537 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2538 gcc_assert (!lhs == !rhs);
2540 IDATA_TYPE (deps_init_id_data.id) = USE;
2544 deps_init_id_data.where = DEPS_IN_NOWHERE;
2547 /* This is dependence info used for initializing insn's data. */
2548 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2550 /* This initializes most of the static part of the above structure. */
2551 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2553 NULL,
2555 deps_init_id_start_insn,
2556 deps_init_id_finish_insn,
2557 deps_init_id_start_lhs,
2558 deps_init_id_finish_lhs,
2559 deps_init_id_start_rhs,
2560 deps_init_id_finish_rhs,
2561 deps_init_id_note_reg_set,
2562 deps_init_id_note_reg_clobber,
2563 deps_init_id_note_reg_use,
2564 NULL, /* note_mem_dep */
2565 NULL, /* note_dep */
2567 0, /* use_cselib */
2568 0, /* use_deps_list */
2569 0 /* generate_spec_deps */
2572 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2573 we don't actually need information about lhs and rhs. */
2574 static void
2575 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2577 rtx pat = PATTERN (insn);
2579 if (NONJUMP_INSN_P (insn)
2580 && GET_CODE (pat) == SET
2581 && !force_unique_p)
2583 IDATA_RHS (id) = SET_SRC (pat);
2584 IDATA_LHS (id) = SET_DEST (pat);
2586 else
2587 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2590 /* Possibly downgrade INSN to USE. */
2591 static void
2592 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2594 bool must_be_use = false;
2595 unsigned uid = INSN_UID (insn);
2596 df_ref *rec;
2597 rtx lhs = IDATA_LHS (id);
2598 rtx rhs = IDATA_RHS (id);
2600 /* We downgrade only SETs. */
2601 if (IDATA_TYPE (id) != SET)
2602 return;
2604 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2606 IDATA_TYPE (id) = USE;
2607 return;
2610 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2612 df_ref def = *rec;
2614 if (DF_REF_INSN (def)
2615 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2616 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2618 must_be_use = true;
2619 break;
2622 #ifdef STACK_REGS
2623 /* Make instructions that set stack registers to be ineligible for
2624 renaming to avoid issues with find_used_regs. */
2625 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2627 must_be_use = true;
2628 break;
2630 #endif
2633 if (must_be_use)
2634 IDATA_TYPE (id) = USE;
2637 /* Setup register sets describing INSN in ID. */
2638 static void
2639 setup_id_reg_sets (idata_t id, insn_t insn)
2641 unsigned uid = INSN_UID (insn);
2642 df_ref *rec;
2643 regset tmp = get_clear_regset_from_pool ();
2645 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2647 df_ref def = *rec;
2648 unsigned int regno = DF_REF_REGNO (def);
2650 /* Post modifies are treated like clobbers by sched-deps.c. */
2651 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2652 | DF_REF_PRE_POST_MODIFY)))
2653 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2654 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2656 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2658 #ifdef STACK_REGS
2659 /* For stack registers, treat writes to them as writes
2660 to the first one to be consistent with sched-deps.c. */
2661 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2662 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2663 #endif
2665 /* Mark special refs that generate read/write def pair. */
2666 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2667 || regno == STACK_POINTER_REGNUM)
2668 bitmap_set_bit (tmp, regno);
2671 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2673 df_ref use = *rec;
2674 unsigned int regno = DF_REF_REGNO (use);
2676 /* When these refs are met for the first time, skip them, as
2677 these uses are just counterparts of some defs. */
2678 if (bitmap_bit_p (tmp, regno))
2679 bitmap_clear_bit (tmp, regno);
2680 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2682 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2684 #ifdef STACK_REGS
2685 /* For stack registers, treat reads from them as reads from
2686 the first one to be consistent with sched-deps.c. */
2687 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2688 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2689 #endif
2693 return_regset_to_pool (tmp);
2696 /* Initialize instruction data for INSN in ID using DF's data. */
2697 static void
2698 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2700 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2702 setup_id_for_insn (id, insn, force_unique_p);
2703 setup_id_lhs_rhs (id, insn, force_unique_p);
2705 if (INSN_NOP_P (insn))
2706 return;
2708 maybe_downgrade_id_to_use (id, insn);
2709 setup_id_reg_sets (id, insn);
2712 /* Initialize instruction data for INSN in ID. */
2713 static void
2714 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2716 struct deps_desc _dc, *dc = &_dc;
2718 deps_init_id_data.where = DEPS_IN_NOWHERE;
2719 deps_init_id_data.id = id;
2720 deps_init_id_data.force_unique_p = force_unique_p;
2721 deps_init_id_data.force_use_p = false;
2723 init_deps (dc, false);
2725 memcpy (&deps_init_id_sched_deps_info,
2726 &const_deps_init_id_sched_deps_info,
2727 sizeof (deps_init_id_sched_deps_info));
2729 if (spec_info != NULL)
2730 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2732 sched_deps_info = &deps_init_id_sched_deps_info;
2734 deps_analyze_insn (dc, insn);
2736 free_deps (dc);
2738 deps_init_id_data.id = NULL;
2742 struct sched_scan_info_def
2744 /* This hook notifies scheduler frontend to extend its internal per basic
2745 block data structures. This hook should be called once before a series of
2746 calls to bb_init (). */
2747 void (*extend_bb) (void);
2749 /* This hook makes scheduler frontend to initialize its internal data
2750 structures for the passed basic block. */
2751 void (*init_bb) (basic_block);
2753 /* This hook notifies scheduler frontend to extend its internal per insn data
2754 structures. This hook should be called once before a series of calls to
2755 insn_init (). */
2756 void (*extend_insn) (void);
2758 /* This hook makes scheduler frontend to initialize its internal data
2759 structures for the passed insn. */
2760 void (*init_insn) (rtx);
2763 /* A driver function to add a set of basic blocks (BBS) to the
2764 scheduling region. */
2765 static void
2766 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2768 unsigned i;
2769 basic_block bb;
2771 if (ssi->extend_bb)
2772 ssi->extend_bb ();
2774 if (ssi->init_bb)
2775 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
2776 ssi->init_bb (bb);
2778 if (ssi->extend_insn)
2779 ssi->extend_insn ();
2781 if (ssi->init_insn)
2782 FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
2784 rtx insn;
2786 FOR_BB_INSNS (bb, insn)
2787 ssi->init_insn (insn);
2791 /* Implement hooks for collecting fundamental insn properties like if insn is
2792 an ASM or is within a SCHED_GROUP. */
2794 /* True when a "one-time init" data for INSN was already inited. */
2795 static bool
2796 first_time_insn_init (insn_t insn)
2798 return INSN_LIVE (insn) == NULL;
2801 /* Hash an entry in a transformed_insns hashtable. */
2802 static hashval_t
2803 hash_transformed_insns (const void *p)
2805 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2808 /* Compare the entries in a transformed_insns hashtable. */
2809 static int
2810 eq_transformed_insns (const void *p, const void *q)
2812 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2813 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2815 if (INSN_UID (i1) == INSN_UID (i2))
2816 return 1;
2817 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2820 /* Free an entry in a transformed_insns hashtable. */
2821 static void
2822 free_transformed_insns (void *p)
2824 struct transformed_insns *pti = (struct transformed_insns *) p;
2826 vinsn_detach (pti->vinsn_old);
2827 vinsn_detach (pti->vinsn_new);
2828 free (pti);
2831 /* Init the s_i_d data for INSN which should be inited just once, when
2832 we first see the insn. */
2833 static void
2834 init_first_time_insn_data (insn_t insn)
2836 /* This should not be set if this is the first time we init data for
2837 insn. */
2838 gcc_assert (first_time_insn_init (insn));
2840 /* These are needed for nops too. */
2841 INSN_LIVE (insn) = get_regset_from_pool ();
2842 INSN_LIVE_VALID_P (insn) = false;
2844 if (!INSN_NOP_P (insn))
2846 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2847 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2848 INSN_TRANSFORMED_INSNS (insn)
2849 = htab_create (16, hash_transformed_insns,
2850 eq_transformed_insns, free_transformed_insns);
2851 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2855 /* Free almost all above data for INSN that is scheduled already.
2856 Used for extra-large basic blocks. */
2857 void
2858 free_data_for_scheduled_insn (insn_t insn)
2860 gcc_assert (! first_time_insn_init (insn));
2862 if (! INSN_ANALYZED_DEPS (insn))
2863 return;
2865 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2866 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2867 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2869 /* This is allocated only for bookkeeping insns. */
2870 if (INSN_ORIGINATORS (insn))
2871 BITMAP_FREE (INSN_ORIGINATORS (insn));
2872 free_deps (&INSN_DEPS_CONTEXT (insn));
2874 INSN_ANALYZED_DEPS (insn) = NULL;
2876 /* Clear the readonly flag so we would ICE when trying to recalculate
2877 the deps context (as we believe that it should not happen). */
2878 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2881 /* Free the same data as above for INSN. */
2882 static void
2883 free_first_time_insn_data (insn_t insn)
2885 gcc_assert (! first_time_insn_init (insn));
2887 free_data_for_scheduled_insn (insn);
2888 return_regset_to_pool (INSN_LIVE (insn));
2889 INSN_LIVE (insn) = NULL;
2890 INSN_LIVE_VALID_P (insn) = false;
2893 /* Initialize region-scope data structures for basic blocks. */
2894 static void
2895 init_global_and_expr_for_bb (basic_block bb)
2897 if (sel_bb_empty_p (bb))
2898 return;
2900 invalidate_av_set (bb);
2903 /* Data for global dependency analysis (to initialize CANT_MOVE and
2904 SCHED_GROUP_P). */
2905 static struct
2907 /* Previous insn. */
2908 insn_t prev_insn;
2909 } init_global_data;
2911 /* Determine if INSN is in the sched_group, is an asm or should not be
2912 cloned. After that initialize its expr. */
2913 static void
2914 init_global_and_expr_for_insn (insn_t insn)
2916 if (LABEL_P (insn))
2917 return;
2919 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2921 init_global_data.prev_insn = NULL_RTX;
2922 return;
2925 gcc_assert (INSN_P (insn));
2927 if (SCHED_GROUP_P (insn))
2928 /* Setup a sched_group. */
2930 insn_t prev_insn = init_global_data.prev_insn;
2932 if (prev_insn)
2933 INSN_SCHED_NEXT (prev_insn) = insn;
2935 init_global_data.prev_insn = insn;
2937 else
2938 init_global_data.prev_insn = NULL_RTX;
2940 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2941 || asm_noperands (PATTERN (insn)) >= 0)
2942 /* Mark INSN as an asm. */
2943 INSN_ASM_P (insn) = true;
2946 bool force_unique_p;
2947 ds_t spec_done_ds;
2949 /* Certain instructions cannot be cloned, and frame related insns and
2950 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2951 their block. */
2952 if (prologue_epilogue_contains (insn))
2954 if (RTX_FRAME_RELATED_P (insn))
2955 CANT_MOVE (insn) = 1;
2956 else
2958 rtx note;
2959 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2960 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2961 && ((enum insn_note) INTVAL (XEXP (note, 0))
2962 == NOTE_INSN_EPILOGUE_BEG))
2964 CANT_MOVE (insn) = 1;
2965 break;
2968 force_unique_p = true;
2970 else
2971 if (CANT_MOVE (insn)
2972 || INSN_ASM_P (insn)
2973 || SCHED_GROUP_P (insn)
2974 || CALL_P (insn)
2975 /* Exception handling insns are always unique. */
2976 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2977 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2978 || control_flow_insn_p (insn)
2979 || volatile_insn_p (PATTERN (insn))
2980 || (targetm.cannot_copy_insn_p
2981 && targetm.cannot_copy_insn_p (insn)))
2982 force_unique_p = true;
2983 else
2984 force_unique_p = false;
2986 if (targetm.sched.get_insn_spec_ds)
2988 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
2989 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
2991 else
2992 spec_done_ds = 0;
2994 /* Initialize INSN's expr. */
2995 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
2996 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
2997 spec_done_ds, 0, 0, NULL, true, false, false, false,
2998 CANT_MOVE (insn));
3001 init_first_time_insn_data (insn);
3004 /* Scan the region and initialize instruction data for basic blocks BBS. */
3005 void
3006 sel_init_global_and_expr (bb_vec_t bbs)
3008 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3009 const struct sched_scan_info_def ssi =
3011 NULL, /* extend_bb */
3012 init_global_and_expr_for_bb, /* init_bb */
3013 extend_insn_data, /* extend_insn */
3014 init_global_and_expr_for_insn /* init_insn */
3017 sched_scan (&ssi, bbs);
3020 /* Finalize region-scope data structures for basic blocks. */
3021 static void
3022 finish_global_and_expr_for_bb (basic_block bb)
3024 av_set_clear (&BB_AV_SET (bb));
3025 BB_AV_LEVEL (bb) = 0;
3028 /* Finalize INSN's data. */
3029 static void
3030 finish_global_and_expr_insn (insn_t insn)
3032 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3033 return;
3035 gcc_assert (INSN_P (insn));
3037 if (INSN_LUID (insn) > 0)
3039 free_first_time_insn_data (insn);
3040 INSN_WS_LEVEL (insn) = 0;
3041 CANT_MOVE (insn) = 0;
3043 /* We can no longer assert this, as vinsns of this insn could be
3044 easily live in other insn's caches. This should be changed to
3045 a counter-like approach among all vinsns. */
3046 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3047 clear_expr (INSN_EXPR (insn));
3051 /* Finalize per instruction data for the whole region. */
3052 void
3053 sel_finish_global_and_expr (void)
3056 bb_vec_t bbs;
3057 int i;
3059 bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
3061 for (i = 0; i < current_nr_blocks; i++)
3062 VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
3064 /* Clear AV_SETs and INSN_EXPRs. */
3066 const struct sched_scan_info_def ssi =
3068 NULL, /* extend_bb */
3069 finish_global_and_expr_for_bb, /* init_bb */
3070 NULL, /* extend_insn */
3071 finish_global_and_expr_insn /* init_insn */
3074 sched_scan (&ssi, bbs);
3077 VEC_free (basic_block, heap, bbs);
3080 finish_insns ();
3084 /* In the below hooks, we merely calculate whether or not a dependence
3085 exists, and in what part of insn. However, we will need more data
3086 when we'll start caching dependence requests. */
3088 /* Container to hold information for dependency analysis. */
3089 static struct
3091 deps_t dc;
3093 /* A variable to track which part of rtx we are scanning in
3094 sched-deps.c: sched_analyze_insn (). */
3095 deps_where_t where;
3097 /* Current producer. */
3098 insn_t pro;
3100 /* Current consumer. */
3101 vinsn_t con;
3103 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3104 X is from { INSN, LHS, RHS }. */
3105 ds_t has_dep_p[DEPS_IN_NOWHERE];
3106 } has_dependence_data;
3108 /* Start analyzing dependencies of INSN. */
3109 static void
3110 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3112 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3114 has_dependence_data.where = DEPS_IN_INSN;
3117 /* Finish analyzing dependencies of an insn. */
3118 static void
3119 has_dependence_finish_insn (void)
3121 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3123 has_dependence_data.where = DEPS_IN_NOWHERE;
3126 /* Start analyzing dependencies of LHS. */
3127 static void
3128 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3130 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3132 if (VINSN_LHS (has_dependence_data.con) != NULL)
3133 has_dependence_data.where = DEPS_IN_LHS;
3136 /* Finish analyzing dependencies of an lhs. */
3137 static void
3138 has_dependence_finish_lhs (void)
3140 has_dependence_data.where = DEPS_IN_INSN;
3143 /* Start analyzing dependencies of RHS. */
3144 static void
3145 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3147 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3149 if (VINSN_RHS (has_dependence_data.con) != NULL)
3150 has_dependence_data.where = DEPS_IN_RHS;
3153 /* Start analyzing dependencies of an rhs. */
3154 static void
3155 has_dependence_finish_rhs (void)
3157 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3158 || has_dependence_data.where == DEPS_IN_INSN);
3160 has_dependence_data.where = DEPS_IN_INSN;
3163 /* Note a set of REGNO. */
3164 static void
3165 has_dependence_note_reg_set (int regno)
3167 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3169 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3170 VINSN_INSN_RTX
3171 (has_dependence_data.con)))
3173 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3175 if (reg_last->sets != NULL
3176 || reg_last->clobbers != NULL)
3177 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3179 if (reg_last->uses)
3180 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3184 /* Note a clobber of REGNO. */
3185 static void
3186 has_dependence_note_reg_clobber (int regno)
3188 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3190 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3191 VINSN_INSN_RTX
3192 (has_dependence_data.con)))
3194 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3196 if (reg_last->sets)
3197 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3199 if (reg_last->uses)
3200 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3204 /* Note a use of REGNO. */
3205 static void
3206 has_dependence_note_reg_use (int regno)
3208 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3210 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3211 VINSN_INSN_RTX
3212 (has_dependence_data.con)))
3214 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3216 if (reg_last->sets)
3217 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3219 if (reg_last->clobbers)
3220 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3222 /* Handle BE_IN_SPEC. */
3223 if (reg_last->uses)
3225 ds_t pro_spec_checked_ds;
3227 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3228 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3230 if (pro_spec_checked_ds != 0
3231 && bitmap_bit_p (INSN_REG_SETS (has_dependence_data.pro), regno))
3232 /* Merge BE_IN_SPEC bits into *DSP. */
3233 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3234 NULL_RTX, NULL_RTX);
3239 /* Note a memory dependence. */
3240 static void
3241 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3242 rtx pending_mem ATTRIBUTE_UNUSED,
3243 insn_t pending_insn ATTRIBUTE_UNUSED,
3244 ds_t ds ATTRIBUTE_UNUSED)
3246 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3247 VINSN_INSN_RTX (has_dependence_data.con)))
3249 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3251 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3255 /* Note a dependence. */
3256 static void
3257 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3258 ds_t ds ATTRIBUTE_UNUSED)
3260 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3261 VINSN_INSN_RTX (has_dependence_data.con)))
3263 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3265 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3269 /* Mark the insn as having a hard dependence that prevents speculation. */
3270 void
3271 sel_mark_hard_insn (rtx insn)
3273 int i;
3275 /* Only work when we're in has_dependence_p mode.
3276 ??? This is a hack, this should actually be a hook. */
3277 if (!has_dependence_data.dc || !has_dependence_data.pro)
3278 return;
3280 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3281 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3283 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3284 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3287 /* This structure holds the hooks for the dependency analysis used when
3288 actually processing dependencies in the scheduler. */
3289 static struct sched_deps_info_def has_dependence_sched_deps_info;
3291 /* This initializes most of the fields of the above structure. */
3292 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3294 NULL,
3296 has_dependence_start_insn,
3297 has_dependence_finish_insn,
3298 has_dependence_start_lhs,
3299 has_dependence_finish_lhs,
3300 has_dependence_start_rhs,
3301 has_dependence_finish_rhs,
3302 has_dependence_note_reg_set,
3303 has_dependence_note_reg_clobber,
3304 has_dependence_note_reg_use,
3305 has_dependence_note_mem_dep,
3306 has_dependence_note_dep,
3308 0, /* use_cselib */
3309 0, /* use_deps_list */
3310 0 /* generate_spec_deps */
3313 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3314 static void
3315 setup_has_dependence_sched_deps_info (void)
3317 memcpy (&has_dependence_sched_deps_info,
3318 &const_has_dependence_sched_deps_info,
3319 sizeof (has_dependence_sched_deps_info));
3321 if (spec_info != NULL)
3322 has_dependence_sched_deps_info.generate_spec_deps = 1;
3324 sched_deps_info = &has_dependence_sched_deps_info;
3327 /* Remove all dependences found and recorded in has_dependence_data array. */
3328 void
3329 sel_clear_has_dependence (void)
3331 int i;
3333 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3334 has_dependence_data.has_dep_p[i] = 0;
3337 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3338 to the dependence information array in HAS_DEP_PP. */
3339 ds_t
3340 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3342 int i;
3343 ds_t ds;
3344 struct deps_desc *dc;
3346 if (INSN_SIMPLEJUMP_P (pred))
3347 /* Unconditional jump is just a transfer of control flow.
3348 Ignore it. */
3349 return false;
3351 dc = &INSN_DEPS_CONTEXT (pred);
3353 /* We init this field lazily. */
3354 if (dc->reg_last == NULL)
3355 init_deps_reg_last (dc);
3357 if (!dc->readonly)
3359 has_dependence_data.pro = NULL;
3360 /* Initialize empty dep context with information about PRED. */
3361 advance_deps_context (dc, pred);
3362 dc->readonly = 1;
3365 has_dependence_data.where = DEPS_IN_NOWHERE;
3366 has_dependence_data.pro = pred;
3367 has_dependence_data.con = EXPR_VINSN (expr);
3368 has_dependence_data.dc = dc;
3370 sel_clear_has_dependence ();
3372 /* Now catch all dependencies that would be generated between PRED and
3373 INSN. */
3374 setup_has_dependence_sched_deps_info ();
3375 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3376 has_dependence_data.dc = NULL;
3378 /* When a barrier was found, set DEPS_IN_INSN bits. */
3379 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3380 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3381 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3382 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3384 /* Do not allow stores to memory to move through checks. Currently
3385 we don't move this to sched-deps.c as the check doesn't have
3386 obvious places to which this dependence can be attached.
3387 FIMXE: this should go to a hook. */
3388 if (EXPR_LHS (expr)
3389 && MEM_P (EXPR_LHS (expr))
3390 && sel_insn_is_speculation_check (pred))
3391 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3393 *has_dep_pp = has_dependence_data.has_dep_p;
3394 ds = 0;
3395 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3396 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3397 NULL_RTX, NULL_RTX);
3399 return ds;
3403 /* Dependence hooks implementation that checks dependence latency constraints
3404 on the insns being scheduled. The entry point for these routines is
3405 tick_check_p predicate. */
3407 static struct
3409 /* An expr we are currently checking. */
3410 expr_t expr;
3412 /* A minimal cycle for its scheduling. */
3413 int cycle;
3415 /* Whether we have seen a true dependence while checking. */
3416 bool seen_true_dep_p;
3417 } tick_check_data;
3419 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3420 on PRO with status DS and weight DW. */
3421 static void
3422 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3424 expr_t con_expr = tick_check_data.expr;
3425 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3427 if (con_insn != pro_insn)
3429 enum reg_note dt;
3430 int tick;
3432 if (/* PROducer was removed from above due to pipelining. */
3433 !INSN_IN_STREAM_P (pro_insn)
3434 /* Or PROducer was originally on the next iteration regarding the
3435 CONsumer. */
3436 || (INSN_SCHED_TIMES (pro_insn)
3437 - EXPR_SCHED_TIMES (con_expr)) > 1)
3438 /* Don't count this dependence. */
3439 return;
3441 dt = ds_to_dt (ds);
3442 if (dt == REG_DEP_TRUE)
3443 tick_check_data.seen_true_dep_p = true;
3445 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3448 dep_def _dep, *dep = &_dep;
3450 init_dep (dep, pro_insn, con_insn, dt);
3452 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3455 /* When there are several kinds of dependencies between pro and con,
3456 only REG_DEP_TRUE should be taken into account. */
3457 if (tick > tick_check_data.cycle
3458 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3459 tick_check_data.cycle = tick;
3463 /* An implementation of note_dep hook. */
3464 static void
3465 tick_check_note_dep (insn_t pro, ds_t ds)
3467 tick_check_dep_with_dw (pro, ds, 0);
3470 /* An implementation of note_mem_dep hook. */
3471 static void
3472 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3474 dw_t dw;
3476 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3477 ? estimate_dep_weak (mem1, mem2)
3478 : 0);
3480 tick_check_dep_with_dw (pro, ds, dw);
3483 /* This structure contains hooks for dependence analysis used when determining
3484 whether an insn is ready for scheduling. */
3485 static struct sched_deps_info_def tick_check_sched_deps_info =
3487 NULL,
3489 NULL,
3490 NULL,
3491 NULL,
3492 NULL,
3493 NULL,
3494 NULL,
3495 haifa_note_reg_set,
3496 haifa_note_reg_clobber,
3497 haifa_note_reg_use,
3498 tick_check_note_mem_dep,
3499 tick_check_note_dep,
3501 0, 0, 0
3504 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3505 scheduled. Return 0 if all data from producers in DC is ready. */
3507 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3509 int cycles_left;
3510 /* Initialize variables. */
3511 tick_check_data.expr = expr;
3512 tick_check_data.cycle = 0;
3513 tick_check_data.seen_true_dep_p = false;
3514 sched_deps_info = &tick_check_sched_deps_info;
3516 gcc_assert (!dc->readonly);
3517 dc->readonly = 1;
3518 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3519 dc->readonly = 0;
3521 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3523 return cycles_left >= 0 ? cycles_left : 0;
3527 /* Functions to work with insns. */
3529 /* Returns true if LHS of INSN is the same as DEST of an insn
3530 being moved. */
3531 bool
3532 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3534 rtx lhs = INSN_LHS (insn);
3536 if (lhs == NULL || dest == NULL)
3537 return false;
3539 return rtx_equal_p (lhs, dest);
3542 /* Return s_i_d entry of INSN. Callable from debugger. */
3543 sel_insn_data_def
3544 insn_sid (insn_t insn)
3546 return *SID (insn);
3549 /* True when INSN is a speculative check. We can tell this by looking
3550 at the data structures of the selective scheduler, not by examining
3551 the pattern. */
3552 bool
3553 sel_insn_is_speculation_check (rtx insn)
3555 return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
3558 /* Extracts machine mode MODE and destination location DST_LOC
3559 for given INSN. */
3560 void
3561 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3563 rtx pat = PATTERN (insn);
3565 gcc_assert (dst_loc);
3566 gcc_assert (GET_CODE (pat) == SET);
3568 *dst_loc = SET_DEST (pat);
3570 gcc_assert (*dst_loc);
3571 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3573 if (mode)
3574 *mode = GET_MODE (*dst_loc);
3577 /* Returns true when moving through JUMP will result in bookkeeping
3578 creation. */
3579 bool
3580 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3582 insn_t succ;
3583 succ_iterator si;
3585 FOR_EACH_SUCC (succ, si, jump)
3586 if (sel_num_cfg_preds_gt_1 (succ))
3587 return true;
3589 return false;
3592 /* Return 'true' if INSN is the only one in its basic block. */
3593 static bool
3594 insn_is_the_only_one_in_bb_p (insn_t insn)
3596 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3599 #ifdef ENABLE_CHECKING
3600 /* Check that the region we're scheduling still has at most one
3601 backedge. */
3602 static void
3603 verify_backedges (void)
3605 if (pipelining_p)
3607 int i, n = 0;
3608 edge e;
3609 edge_iterator ei;
3611 for (i = 0; i < current_nr_blocks; i++)
3612 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3613 if (in_current_region_p (e->dest)
3614 && BLOCK_TO_BB (e->dest->index) < i)
3615 n++;
3617 gcc_assert (n <= 1);
3620 #endif
3623 /* Functions to work with control flow. */
3625 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3626 are sorted in topological order (it might have been invalidated by
3627 redirecting an edge). */
3628 static void
3629 sel_recompute_toporder (void)
3631 int i, n, rgn;
3632 int *postorder, n_blocks;
3634 postorder = XALLOCAVEC (int, n_basic_blocks);
3635 n_blocks = post_order_compute (postorder, false, false);
3637 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3638 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3639 if (CONTAINING_RGN (postorder[i]) == rgn)
3641 BLOCK_TO_BB (postorder[i]) = n;
3642 BB_TO_BLOCK (n) = postorder[i];
3643 n++;
3646 /* Assert that we updated info for all blocks. We may miss some blocks if
3647 this function is called when redirecting an edge made a block
3648 unreachable, but that block is not deleted yet. */
3649 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3652 /* Tidy the possibly empty block BB. */
3653 static bool
3654 maybe_tidy_empty_bb (basic_block bb)
3656 basic_block succ_bb, pred_bb;
3657 VEC (basic_block, heap) *dom_bbs;
3658 edge e;
3659 edge_iterator ei;
3660 bool rescan_p;
3662 /* Keep empty bb only if this block immediately precedes EXIT and
3663 has incoming non-fallthrough edge, or it has no predecessors or
3664 successors. Otherwise remove it. */
3665 if (!sel_bb_empty_p (bb)
3666 || (single_succ_p (bb)
3667 && single_succ (bb) == EXIT_BLOCK_PTR
3668 && (!single_pred_p (bb)
3669 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3670 || EDGE_COUNT (bb->preds) == 0
3671 || EDGE_COUNT (bb->succs) == 0)
3672 return false;
3674 /* Do not attempt to redirect complex edges. */
3675 FOR_EACH_EDGE (e, ei, bb->preds)
3676 if (e->flags & EDGE_COMPLEX)
3677 return false;
3679 free_data_sets (bb);
3681 /* Do not delete BB if it has more than one successor.
3682 That can occur when we moving a jump. */
3683 if (!single_succ_p (bb))
3685 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3686 sel_merge_blocks (bb->prev_bb, bb);
3687 return true;
3690 succ_bb = single_succ (bb);
3691 rescan_p = true;
3692 pred_bb = NULL;
3693 dom_bbs = NULL;
3695 /* Redirect all non-fallthru edges to the next bb. */
3696 while (rescan_p)
3698 rescan_p = false;
3700 FOR_EACH_EDGE (e, ei, bb->preds)
3702 pred_bb = e->src;
3704 if (!(e->flags & EDGE_FALLTHRU))
3706 /* We can not invalidate computed topological order by moving
3707 the edge destination block (E->SUCC) along a fallthru edge.
3709 We will update dominators here only when we'll get
3710 an unreachable block when redirecting, otherwise
3711 sel_redirect_edge_and_branch will take care of it. */
3712 if (e->dest != bb
3713 && single_pred_p (e->dest))
3714 VEC_safe_push (basic_block, heap, dom_bbs, e->dest);
3715 sel_redirect_edge_and_branch (e, succ_bb);
3716 rescan_p = true;
3717 break;
3719 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3720 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3721 still have to adjust it. */
3722 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3724 /* If possible, try to remove the unneeded conditional jump. */
3725 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3726 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3728 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3729 tidy_fallthru_edge (e);
3731 else
3732 sel_redirect_edge_and_branch (e, succ_bb);
3733 rescan_p = true;
3734 break;
3739 if (can_merge_blocks_p (bb->prev_bb, bb))
3740 sel_merge_blocks (bb->prev_bb, bb);
3741 else
3743 /* This is a block without fallthru predecessor. Just delete it. */
3744 gcc_assert (pred_bb != NULL);
3746 if (in_current_region_p (pred_bb))
3747 move_bb_info (pred_bb, bb);
3748 remove_empty_bb (bb, true);
3751 if (!VEC_empty (basic_block, dom_bbs))
3753 VEC_safe_push (basic_block, heap, dom_bbs, succ_bb);
3754 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3755 VEC_free (basic_block, heap, dom_bbs);
3758 return true;
3761 /* Tidy the control flow after we have removed original insn from
3762 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3763 is true, also try to optimize control flow on non-empty blocks. */
3764 bool
3765 tidy_control_flow (basic_block xbb, bool full_tidying)
3767 bool changed = true;
3768 insn_t first, last;
3770 /* First check whether XBB is empty. */
3771 changed = maybe_tidy_empty_bb (xbb);
3772 if (changed || !full_tidying)
3773 return changed;
3775 /* Check if there is a unnecessary jump after insn left. */
3776 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3777 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3778 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3780 if (sel_remove_insn (BB_END (xbb), false, false))
3781 return true;
3782 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3785 first = sel_bb_head (xbb);
3786 last = sel_bb_end (xbb);
3787 if (MAY_HAVE_DEBUG_INSNS)
3789 if (first != last && DEBUG_INSN_P (first))
3791 first = NEXT_INSN (first);
3792 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3794 if (first != last && DEBUG_INSN_P (last))
3796 last = PREV_INSN (last);
3797 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3799 /* Check if there is an unnecessary jump in previous basic block leading
3800 to next basic block left after removing INSN from stream.
3801 If it is so, remove that jump and redirect edge to current
3802 basic block (where there was INSN before deletion). This way
3803 when NOP will be deleted several instructions later with its
3804 basic block we will not get a jump to next instruction, which
3805 can be harmful. */
3806 if (first == last
3807 && !sel_bb_empty_p (xbb)
3808 && INSN_NOP_P (last)
3809 /* Flow goes fallthru from current block to the next. */
3810 && EDGE_COUNT (xbb->succs) == 1
3811 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3812 /* When successor is an EXIT block, it may not be the next block. */
3813 && single_succ (xbb) != EXIT_BLOCK_PTR
3814 /* And unconditional jump in previous basic block leads to
3815 next basic block of XBB and this jump can be safely removed. */
3816 && in_current_region_p (xbb->prev_bb)
3817 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3818 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3819 /* Also this jump is not at the scheduling boundary. */
3820 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3822 bool recompute_toporder_p;
3823 /* Clear data structures of jump - jump itself will be removed
3824 by sel_redirect_edge_and_branch. */
3825 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3826 recompute_toporder_p
3827 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3829 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3831 /* It can turn out that after removing unused jump, basic block
3832 that contained that jump, becomes empty too. In such case
3833 remove it too. */
3834 if (sel_bb_empty_p (xbb->prev_bb))
3835 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3836 if (recompute_toporder_p)
3837 sel_recompute_toporder ();
3840 #ifdef ENABLE_CHECKING
3841 verify_backedges ();
3842 verify_dominators (CDI_DOMINATORS);
3843 #endif
3845 return changed;
3848 /* Purge meaningless empty blocks in the middle of a region. */
3849 void
3850 purge_empty_blocks (void)
3852 int i;
3854 /* Do not attempt to delete the first basic block in the region. */
3855 for (i = 1; i < current_nr_blocks; )
3857 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3859 if (maybe_tidy_empty_bb (b))
3860 continue;
3862 i++;
3866 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3867 do not delete insn's data, because it will be later re-emitted.
3868 Return true if we have removed some blocks afterwards. */
3869 bool
3870 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3872 basic_block bb = BLOCK_FOR_INSN (insn);
3874 gcc_assert (INSN_IN_STREAM_P (insn));
3876 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3878 expr_t expr;
3879 av_set_iterator i;
3881 /* When we remove a debug insn that is head of a BB, it remains
3882 in the AV_SET of the block, but it shouldn't. */
3883 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3884 if (EXPR_INSN_RTX (expr) == insn)
3886 av_set_iter_remove (&i);
3887 break;
3891 if (only_disconnect)
3893 insn_t prev = PREV_INSN (insn);
3894 insn_t next = NEXT_INSN (insn);
3895 basic_block bb = BLOCK_FOR_INSN (insn);
3897 NEXT_INSN (prev) = next;
3898 PREV_INSN (next) = prev;
3900 if (BB_HEAD (bb) == insn)
3902 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3903 BB_HEAD (bb) = prev;
3905 if (BB_END (bb) == insn)
3906 BB_END (bb) = prev;
3908 else
3910 remove_insn (insn);
3911 clear_expr (INSN_EXPR (insn));
3914 /* It is necessary to null this fields before calling add_insn (). */
3915 PREV_INSN (insn) = NULL_RTX;
3916 NEXT_INSN (insn) = NULL_RTX;
3918 return tidy_control_flow (bb, full_tidying);
3921 /* Estimate number of the insns in BB. */
3922 static int
3923 sel_estimate_number_of_insns (basic_block bb)
3925 int res = 0;
3926 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3928 for (; insn != next_tail; insn = NEXT_INSN (insn))
3929 if (NONDEBUG_INSN_P (insn))
3930 res++;
3932 return res;
3935 /* We don't need separate luids for notes or labels. */
3936 static int
3937 sel_luid_for_non_insn (rtx x)
3939 gcc_assert (NOTE_P (x) || LABEL_P (x));
3941 return -1;
3944 /* Find the proper seqno for inserting at INSN by successors.
3945 Return -1 if no successors with positive seqno exist. */
3946 static int
3947 get_seqno_by_succs (rtx insn)
3949 basic_block bb = BLOCK_FOR_INSN (insn);
3950 rtx tmp = insn, end = BB_END (bb);
3951 int seqno;
3952 insn_t succ = NULL;
3953 succ_iterator si;
3955 while (tmp != end)
3957 tmp = NEXT_INSN (tmp);
3958 if (INSN_P (tmp))
3959 return INSN_SEQNO (tmp);
3962 seqno = INT_MAX;
3964 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
3965 if (INSN_SEQNO (succ) > 0)
3966 seqno = MIN (seqno, INSN_SEQNO (succ));
3968 if (seqno == INT_MAX)
3969 return -1;
3971 return seqno;
3974 /* Compute seqno for INSN by its preds or succs. */
3975 static int
3976 get_seqno_for_a_jump (insn_t insn)
3978 int seqno;
3980 gcc_assert (INSN_SIMPLEJUMP_P (insn));
3982 if (!sel_bb_head_p (insn))
3983 seqno = INSN_SEQNO (PREV_INSN (insn));
3984 else
3986 basic_block bb = BLOCK_FOR_INSN (insn);
3988 if (single_pred_p (bb)
3989 && !in_current_region_p (single_pred (bb)))
3991 /* We can have preds outside a region when splitting edges
3992 for pipelining of an outer loop. Use succ instead.
3993 There should be only one of them. */
3994 insn_t succ = NULL;
3995 succ_iterator si;
3996 bool first = true;
3998 gcc_assert (flag_sel_sched_pipelining_outer_loops
3999 && current_loop_nest);
4000 FOR_EACH_SUCC_1 (succ, si, insn,
4001 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4003 gcc_assert (first);
4004 first = false;
4007 gcc_assert (succ != NULL);
4008 seqno = INSN_SEQNO (succ);
4010 else
4012 insn_t *preds;
4013 int n;
4015 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4017 gcc_assert (n > 0);
4018 /* For one predecessor, use simple method. */
4019 if (n == 1)
4020 seqno = INSN_SEQNO (preds[0]);
4021 else
4022 seqno = get_seqno_by_preds (insn);
4024 free (preds);
4028 /* We were unable to find a good seqno among preds. */
4029 if (seqno < 0)
4030 seqno = get_seqno_by_succs (insn);
4032 gcc_assert (seqno >= 0);
4034 return seqno;
4037 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4038 with positive seqno exist. */
4040 get_seqno_by_preds (rtx insn)
4042 basic_block bb = BLOCK_FOR_INSN (insn);
4043 rtx tmp = insn, head = BB_HEAD (bb);
4044 insn_t *preds;
4045 int n, i, seqno;
4047 while (tmp != head)
4049 tmp = PREV_INSN (tmp);
4050 if (INSN_P (tmp))
4051 return INSN_SEQNO (tmp);
4054 cfg_preds (bb, &preds, &n);
4055 for (i = 0, seqno = -1; i < n; i++)
4056 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4058 return seqno;
4063 /* Extend pass-scope data structures for basic blocks. */
4064 void
4065 sel_extend_global_bb_info (void)
4067 VEC_safe_grow_cleared (sel_global_bb_info_def, heap, sel_global_bb_info,
4068 last_basic_block);
4071 /* Extend region-scope data structures for basic blocks. */
4072 static void
4073 extend_region_bb_info (void)
4075 VEC_safe_grow_cleared (sel_region_bb_info_def, heap, sel_region_bb_info,
4076 last_basic_block);
4079 /* Extend all data structures to fit for all basic blocks. */
4080 static void
4081 extend_bb_info (void)
4083 sel_extend_global_bb_info ();
4084 extend_region_bb_info ();
4087 /* Finalize pass-scope data structures for basic blocks. */
4088 void
4089 sel_finish_global_bb_info (void)
4091 VEC_free (sel_global_bb_info_def, heap, sel_global_bb_info);
4094 /* Finalize region-scope data structures for basic blocks. */
4095 static void
4096 finish_region_bb_info (void)
4098 VEC_free (sel_region_bb_info_def, heap, sel_region_bb_info);
4102 /* Data for each insn in current region. */
4103 VEC (sel_insn_data_def, heap) *s_i_d = NULL;
4105 /* Extend data structures for insns from current region. */
4106 static void
4107 extend_insn_data (void)
4109 int reserve;
4111 sched_extend_target ();
4112 sched_deps_init (false);
4114 /* Extend data structures for insns from current region. */
4115 reserve = (sched_max_luid + 1
4116 - VEC_length (sel_insn_data_def, s_i_d));
4117 if (reserve > 0
4118 && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
4120 int size;
4122 if (sched_max_luid / 2 > 1024)
4123 size = sched_max_luid + 1024;
4124 else
4125 size = 3 * sched_max_luid / 2;
4128 VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
4132 /* Finalize data structures for insns from current region. */
4133 static void
4134 finish_insns (void)
4136 unsigned i;
4138 /* Clear here all dependence contexts that may have left from insns that were
4139 removed during the scheduling. */
4140 for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
4142 sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
4144 if (sid_entry->live)
4145 return_regset_to_pool (sid_entry->live);
4146 if (sid_entry->analyzed_deps)
4148 BITMAP_FREE (sid_entry->analyzed_deps);
4149 BITMAP_FREE (sid_entry->found_deps);
4150 htab_delete (sid_entry->transformed_insns);
4151 free_deps (&sid_entry->deps_context);
4153 if (EXPR_VINSN (&sid_entry->expr))
4155 clear_expr (&sid_entry->expr);
4157 /* Also, clear CANT_MOVE bit here, because we really don't want it
4158 to be passed to the next region. */
4159 CANT_MOVE_BY_LUID (i) = 0;
4163 VEC_free (sel_insn_data_def, heap, s_i_d);
4166 /* A proxy to pass initialization data to init_insn (). */
4167 static sel_insn_data_def _insn_init_ssid;
4168 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4170 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4171 static bool insn_init_create_new_vinsn_p;
4173 /* Set all necessary data for initialization of the new insn[s]. */
4174 static expr_t
4175 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4177 expr_t x = &insn_init_ssid->expr;
4179 copy_expr_onside (x, expr);
4180 if (vi != NULL)
4182 insn_init_create_new_vinsn_p = false;
4183 change_vinsn_in_expr (x, vi);
4185 else
4186 insn_init_create_new_vinsn_p = true;
4188 insn_init_ssid->seqno = seqno;
4189 return x;
4192 /* Init data for INSN. */
4193 static void
4194 init_insn_data (insn_t insn)
4196 expr_t expr;
4197 sel_insn_data_t ssid = insn_init_ssid;
4199 /* The fields mentioned below are special and hence are not being
4200 propagated to the new insns. */
4201 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4202 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4203 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4205 expr = INSN_EXPR (insn);
4206 copy_expr (expr, &ssid->expr);
4207 prepare_insn_expr (insn, ssid->seqno);
4209 if (insn_init_create_new_vinsn_p)
4210 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4212 if (first_time_insn_init (insn))
4213 init_first_time_insn_data (insn);
4216 /* This is used to initialize spurious jumps generated by
4217 sel_redirect_edge (). */
4218 static void
4219 init_simplejump_data (insn_t insn)
4221 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4222 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
4223 false, true);
4224 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn);
4225 init_first_time_insn_data (insn);
4228 /* Perform deferred initialization of insns. This is used to process
4229 a new jump that may be created by redirect_edge. */
4230 void
4231 sel_init_new_insn (insn_t insn, int flags)
4233 /* We create data structures for bb when the first insn is emitted in it. */
4234 if (INSN_P (insn)
4235 && INSN_IN_STREAM_P (insn)
4236 && insn_is_the_only_one_in_bb_p (insn))
4238 extend_bb_info ();
4239 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4242 if (flags & INSN_INIT_TODO_LUID)
4244 sched_extend_luids ();
4245 sched_init_insn_luid (insn);
4248 if (flags & INSN_INIT_TODO_SSID)
4250 extend_insn_data ();
4251 init_insn_data (insn);
4252 clear_expr (&insn_init_ssid->expr);
4255 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4257 extend_insn_data ();
4258 init_simplejump_data (insn);
4261 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4262 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4266 /* Functions to init/finish work with lv sets. */
4268 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4269 static void
4270 init_lv_set (basic_block bb)
4272 gcc_assert (!BB_LV_SET_VALID_P (bb));
4274 BB_LV_SET (bb) = get_regset_from_pool ();
4275 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4276 BB_LV_SET_VALID_P (bb) = true;
4279 /* Copy liveness information to BB from FROM_BB. */
4280 static void
4281 copy_lv_set_from (basic_block bb, basic_block from_bb)
4283 gcc_assert (!BB_LV_SET_VALID_P (bb));
4285 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4286 BB_LV_SET_VALID_P (bb) = true;
4289 /* Initialize lv set of all bb headers. */
4290 void
4291 init_lv_sets (void)
4293 basic_block bb;
4295 /* Initialize of LV sets. */
4296 FOR_EACH_BB (bb)
4297 init_lv_set (bb);
4299 /* Don't forget EXIT_BLOCK. */
4300 init_lv_set (EXIT_BLOCK_PTR);
4303 /* Release lv set of HEAD. */
4304 static void
4305 free_lv_set (basic_block bb)
4307 gcc_assert (BB_LV_SET (bb) != NULL);
4309 return_regset_to_pool (BB_LV_SET (bb));
4310 BB_LV_SET (bb) = NULL;
4311 BB_LV_SET_VALID_P (bb) = false;
4314 /* Finalize lv sets of all bb headers. */
4315 void
4316 free_lv_sets (void)
4318 basic_block bb;
4320 /* Don't forget EXIT_BLOCK. */
4321 free_lv_set (EXIT_BLOCK_PTR);
4323 /* Free LV sets. */
4324 FOR_EACH_BB (bb)
4325 if (BB_LV_SET (bb))
4326 free_lv_set (bb);
4329 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4330 compute_av() processes BB. This function is called when creating new basic
4331 blocks, as well as for blocks (either new or existing) where new jumps are
4332 created when the control flow is being updated. */
4333 static void
4334 invalidate_av_set (basic_block bb)
4336 BB_AV_LEVEL (bb) = -1;
4339 /* Create initial data sets for BB (they will be invalid). */
4340 static void
4341 create_initial_data_sets (basic_block bb)
4343 if (BB_LV_SET (bb))
4344 BB_LV_SET_VALID_P (bb) = false;
4345 else
4346 BB_LV_SET (bb) = get_regset_from_pool ();
4347 invalidate_av_set (bb);
4350 /* Free av set of BB. */
4351 static void
4352 free_av_set (basic_block bb)
4354 av_set_clear (&BB_AV_SET (bb));
4355 BB_AV_LEVEL (bb) = 0;
4358 /* Free data sets of BB. */
4359 void
4360 free_data_sets (basic_block bb)
4362 free_lv_set (bb);
4363 free_av_set (bb);
4366 /* Exchange lv sets of TO and FROM. */
4367 static void
4368 exchange_lv_sets (basic_block to, basic_block from)
4371 regset to_lv_set = BB_LV_SET (to);
4373 BB_LV_SET (to) = BB_LV_SET (from);
4374 BB_LV_SET (from) = to_lv_set;
4378 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4380 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4381 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4386 /* Exchange av sets of TO and FROM. */
4387 static void
4388 exchange_av_sets (basic_block to, basic_block from)
4391 av_set_t to_av_set = BB_AV_SET (to);
4393 BB_AV_SET (to) = BB_AV_SET (from);
4394 BB_AV_SET (from) = to_av_set;
4398 int to_av_level = BB_AV_LEVEL (to);
4400 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4401 BB_AV_LEVEL (from) = to_av_level;
4405 /* Exchange data sets of TO and FROM. */
4406 void
4407 exchange_data_sets (basic_block to, basic_block from)
4409 exchange_lv_sets (to, from);
4410 exchange_av_sets (to, from);
4413 /* Copy data sets of FROM to TO. */
4414 void
4415 copy_data_sets (basic_block to, basic_block from)
4417 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4418 gcc_assert (BB_AV_SET (to) == NULL);
4420 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4421 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4423 if (BB_AV_SET_VALID_P (from))
4425 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4427 if (BB_LV_SET_VALID_P (from))
4429 gcc_assert (BB_LV_SET (to) != NULL);
4430 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4434 /* Return an av set for INSN, if any. */
4435 av_set_t
4436 get_av_set (insn_t insn)
4438 av_set_t av_set;
4440 gcc_assert (AV_SET_VALID_P (insn));
4442 if (sel_bb_head_p (insn))
4443 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4444 else
4445 av_set = NULL;
4447 return av_set;
4450 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4452 get_av_level (insn_t insn)
4454 int av_level;
4456 gcc_assert (INSN_P (insn));
4458 if (sel_bb_head_p (insn))
4459 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4460 else
4461 av_level = INSN_WS_LEVEL (insn);
4463 return av_level;
4468 /* Variables to work with control-flow graph. */
4470 /* The basic block that already has been processed by the sched_data_update (),
4471 but hasn't been in sel_add_bb () yet. */
4472 static VEC (basic_block, heap) *last_added_blocks = NULL;
4474 /* A pool for allocating successor infos. */
4475 static struct
4477 /* A stack for saving succs_info structures. */
4478 struct succs_info *stack;
4480 /* Its size. */
4481 int size;
4483 /* Top of the stack. */
4484 int top;
4486 /* Maximal value of the top. */
4487 int max_top;
4488 } succs_info_pool;
4490 /* Functions to work with control-flow graph. */
4492 /* Return basic block note of BB. */
4493 insn_t
4494 sel_bb_head (basic_block bb)
4496 insn_t head;
4498 if (bb == EXIT_BLOCK_PTR)
4500 gcc_assert (exit_insn != NULL_RTX);
4501 head = exit_insn;
4503 else
4505 insn_t note;
4507 note = bb_note (bb);
4508 head = next_nonnote_insn (note);
4510 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4511 head = NULL_RTX;
4514 return head;
4517 /* Return true if INSN is a basic block header. */
4518 bool
4519 sel_bb_head_p (insn_t insn)
4521 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4524 /* Return last insn of BB. */
4525 insn_t
4526 sel_bb_end (basic_block bb)
4528 if (sel_bb_empty_p (bb))
4529 return NULL_RTX;
4531 gcc_assert (bb != EXIT_BLOCK_PTR);
4533 return BB_END (bb);
4536 /* Return true if INSN is the last insn in its basic block. */
4537 bool
4538 sel_bb_end_p (insn_t insn)
4540 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4543 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4544 bool
4545 sel_bb_empty_p (basic_block bb)
4547 return sel_bb_head (bb) == NULL;
4550 /* True when BB belongs to the current scheduling region. */
4551 bool
4552 in_current_region_p (basic_block bb)
4554 if (bb->index < NUM_FIXED_BLOCKS)
4555 return false;
4557 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4560 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4561 basic_block
4562 fallthru_bb_of_jump (rtx jump)
4564 if (!JUMP_P (jump))
4565 return NULL;
4567 if (!any_condjump_p (jump))
4568 return NULL;
4570 /* A basic block that ends with a conditional jump may still have one successor
4571 (and be followed by a barrier), we are not interested. */
4572 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4573 return NULL;
4575 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4578 /* Remove all notes from BB. */
4579 static void
4580 init_bb (basic_block bb)
4582 remove_notes (bb_note (bb), BB_END (bb));
4583 BB_NOTE_LIST (bb) = note_list;
4586 void
4587 sel_init_bbs (bb_vec_t bbs)
4589 const struct sched_scan_info_def ssi =
4591 extend_bb_info, /* extend_bb */
4592 init_bb, /* init_bb */
4593 NULL, /* extend_insn */
4594 NULL /* init_insn */
4597 sched_scan (&ssi, bbs);
4600 /* Restore notes for the whole region. */
4601 static void
4602 sel_restore_notes (void)
4604 int bb;
4605 insn_t insn;
4607 for (bb = 0; bb < current_nr_blocks; bb++)
4609 basic_block first, last;
4611 first = EBB_FIRST_BB (bb);
4612 last = EBB_LAST_BB (bb)->next_bb;
4616 note_list = BB_NOTE_LIST (first);
4617 restore_other_notes (NULL, first);
4618 BB_NOTE_LIST (first) = NULL_RTX;
4620 FOR_BB_INSNS (first, insn)
4621 if (NONDEBUG_INSN_P (insn))
4622 reemit_notes (insn);
4624 first = first->next_bb;
4626 while (first != last);
4630 /* Free per-bb data structures. */
4631 void
4632 sel_finish_bbs (void)
4634 sel_restore_notes ();
4636 /* Remove current loop preheader from this loop. */
4637 if (current_loop_nest)
4638 sel_remove_loop_preheader ();
4640 finish_region_bb_info ();
4643 /* Return true if INSN has a single successor of type FLAGS. */
4644 bool
4645 sel_insn_has_single_succ_p (insn_t insn, int flags)
4647 insn_t succ;
4648 succ_iterator si;
4649 bool first_p = true;
4651 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4653 if (first_p)
4654 first_p = false;
4655 else
4656 return false;
4659 return true;
4662 /* Allocate successor's info. */
4663 static struct succs_info *
4664 alloc_succs_info (void)
4666 if (succs_info_pool.top == succs_info_pool.max_top)
4668 int i;
4670 if (++succs_info_pool.max_top >= succs_info_pool.size)
4671 gcc_unreachable ();
4673 i = ++succs_info_pool.top;
4674 succs_info_pool.stack[i].succs_ok = VEC_alloc (rtx, heap, 10);
4675 succs_info_pool.stack[i].succs_other = VEC_alloc (rtx, heap, 10);
4676 succs_info_pool.stack[i].probs_ok = VEC_alloc (int, heap, 10);
4678 else
4679 succs_info_pool.top++;
4681 return &succs_info_pool.stack[succs_info_pool.top];
4684 /* Free successor's info. */
4685 void
4686 free_succs_info (struct succs_info * sinfo)
4688 gcc_assert (succs_info_pool.top >= 0
4689 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4690 succs_info_pool.top--;
4692 /* Clear stale info. */
4693 VEC_block_remove (rtx, sinfo->succs_ok,
4694 0, VEC_length (rtx, sinfo->succs_ok));
4695 VEC_block_remove (rtx, sinfo->succs_other,
4696 0, VEC_length (rtx, sinfo->succs_other));
4697 VEC_block_remove (int, sinfo->probs_ok,
4698 0, VEC_length (int, sinfo->probs_ok));
4699 sinfo->all_prob = 0;
4700 sinfo->succs_ok_n = 0;
4701 sinfo->all_succs_n = 0;
4704 /* Compute successor info for INSN. FLAGS are the flags passed
4705 to the FOR_EACH_SUCC_1 iterator. */
4706 struct succs_info *
4707 compute_succs_info (insn_t insn, short flags)
4709 succ_iterator si;
4710 insn_t succ;
4711 struct succs_info *sinfo = alloc_succs_info ();
4713 /* Traverse *all* successors and decide what to do with each. */
4714 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4716 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4717 perform code motion through inner loops. */
4718 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4720 if (current_flags & flags)
4722 VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
4723 VEC_safe_push (int, heap, sinfo->probs_ok,
4724 /* FIXME: Improve calculation when skipping
4725 inner loop to exits. */
4726 (si.bb_end
4727 ? si.e1->probability
4728 : REG_BR_PROB_BASE));
4729 sinfo->succs_ok_n++;
4731 else
4732 VEC_safe_push (rtx, heap, sinfo->succs_other, succ);
4734 /* Compute all_prob. */
4735 if (!si.bb_end)
4736 sinfo->all_prob = REG_BR_PROB_BASE;
4737 else
4738 sinfo->all_prob += si.e1->probability;
4740 sinfo->all_succs_n++;
4743 return sinfo;
4746 /* Return the predecessors of BB in PREDS and their number in N.
4747 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4748 static void
4749 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4751 edge e;
4752 edge_iterator ei;
4754 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4756 FOR_EACH_EDGE (e, ei, bb->preds)
4758 basic_block pred_bb = e->src;
4759 insn_t bb_end = BB_END (pred_bb);
4761 if (!in_current_region_p (pred_bb))
4763 gcc_assert (flag_sel_sched_pipelining_outer_loops
4764 && current_loop_nest);
4765 continue;
4768 if (sel_bb_empty_p (pred_bb))
4769 cfg_preds_1 (pred_bb, preds, n, size);
4770 else
4772 if (*n == *size)
4773 *preds = XRESIZEVEC (insn_t, *preds,
4774 (*size = 2 * *size + 1));
4775 (*preds)[(*n)++] = bb_end;
4779 gcc_assert (*n != 0
4780 || (flag_sel_sched_pipelining_outer_loops
4781 && current_loop_nest));
4784 /* Find all predecessors of BB and record them in PREDS and their number
4785 in N. Empty blocks are skipped, and only normal (forward in-region)
4786 edges are processed. */
4787 static void
4788 cfg_preds (basic_block bb, insn_t **preds, int *n)
4790 int size = 0;
4792 *preds = NULL;
4793 *n = 0;
4794 cfg_preds_1 (bb, preds, n, &size);
4797 /* Returns true if we are moving INSN through join point. */
4798 bool
4799 sel_num_cfg_preds_gt_1 (insn_t insn)
4801 basic_block bb;
4803 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4804 return false;
4806 bb = BLOCK_FOR_INSN (insn);
4808 while (1)
4810 if (EDGE_COUNT (bb->preds) > 1)
4811 return true;
4813 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4814 bb = EDGE_PRED (bb, 0)->src;
4816 if (!sel_bb_empty_p (bb))
4817 break;
4820 return false;
4823 /* Returns true when BB should be the end of an ebb. Adapted from the
4824 code in sched-ebb.c. */
4825 bool
4826 bb_ends_ebb_p (basic_block bb)
4828 basic_block next_bb = bb_next_bb (bb);
4829 edge e;
4831 if (next_bb == EXIT_BLOCK_PTR
4832 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4833 || (LABEL_P (BB_HEAD (next_bb))
4834 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4835 Work around that. */
4836 && !single_pred_p (next_bb)))
4837 return true;
4839 if (!in_current_region_p (next_bb))
4840 return true;
4842 e = find_fallthru_edge (bb->succs);
4843 if (e)
4845 gcc_assert (e->dest == next_bb);
4847 return false;
4850 return true;
4853 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4854 successor of INSN. */
4855 bool
4856 in_same_ebb_p (insn_t insn, insn_t succ)
4858 basic_block ptr = BLOCK_FOR_INSN (insn);
4860 for(;;)
4862 if (ptr == BLOCK_FOR_INSN (succ))
4863 return true;
4865 if (bb_ends_ebb_p (ptr))
4866 return false;
4868 ptr = bb_next_bb (ptr);
4871 gcc_unreachable ();
4872 return false;
4875 /* Recomputes the reverse topological order for the function and
4876 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4877 modified appropriately. */
4878 static void
4879 recompute_rev_top_order (void)
4881 int *postorder;
4882 int n_blocks, i;
4884 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4886 rev_top_order_index_len = last_basic_block;
4887 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4888 rev_top_order_index_len);
4891 postorder = XNEWVEC (int, n_basic_blocks);
4893 n_blocks = post_order_compute (postorder, true, false);
4894 gcc_assert (n_basic_blocks == n_blocks);
4896 /* Build reverse function: for each basic block with BB->INDEX == K
4897 rev_top_order_index[K] is it's reverse topological sort number. */
4898 for (i = 0; i < n_blocks; i++)
4900 gcc_assert (postorder[i] < rev_top_order_index_len);
4901 rev_top_order_index[postorder[i]] = i;
4904 free (postorder);
4907 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4908 void
4909 clear_outdated_rtx_info (basic_block bb)
4911 rtx insn;
4913 FOR_BB_INSNS (bb, insn)
4914 if (INSN_P (insn))
4916 SCHED_GROUP_P (insn) = 0;
4917 INSN_AFTER_STALL_P (insn) = 0;
4918 INSN_SCHED_TIMES (insn) = 0;
4919 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4921 /* We cannot use the changed caches, as previously we could ignore
4922 the LHS dependence due to enabled renaming and transform
4923 the expression, and currently we'll be unable to do this. */
4924 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4928 /* Add BB_NOTE to the pool of available basic block notes. */
4929 static void
4930 return_bb_to_pool (basic_block bb)
4932 rtx note = bb_note (bb);
4934 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4935 && bb->aux == NULL);
4937 /* It turns out that current cfg infrastructure does not support
4938 reuse of basic blocks. Don't bother for now. */
4939 /*VEC_safe_push (rtx, heap, bb_note_pool, note);*/
4942 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4943 static rtx
4944 get_bb_note_from_pool (void)
4946 if (VEC_empty (rtx, bb_note_pool))
4947 return NULL_RTX;
4948 else
4950 rtx note = VEC_pop (rtx, bb_note_pool);
4952 PREV_INSN (note) = NULL_RTX;
4953 NEXT_INSN (note) = NULL_RTX;
4955 return note;
4959 /* Free bb_note_pool. */
4960 void
4961 free_bb_note_pool (void)
4963 VEC_free (rtx, heap, bb_note_pool);
4966 /* Setup scheduler pool and successor structure. */
4967 void
4968 alloc_sched_pools (void)
4970 int succs_size;
4972 succs_size = MAX_WS + 1;
4973 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
4974 succs_info_pool.size = succs_size;
4975 succs_info_pool.top = -1;
4976 succs_info_pool.max_top = -1;
4978 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
4979 sizeof (struct _list_node), 500);
4982 /* Free the pools. */
4983 void
4984 free_sched_pools (void)
4986 int i;
4988 free_alloc_pool (sched_lists_pool);
4989 gcc_assert (succs_info_pool.top == -1);
4990 for (i = 0; i < succs_info_pool.max_top; i++)
4992 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_ok);
4993 VEC_free (rtx, heap, succs_info_pool.stack[i].succs_other);
4994 VEC_free (int, heap, succs_info_pool.stack[i].probs_ok);
4996 free (succs_info_pool.stack);
5000 /* Returns a position in RGN where BB can be inserted retaining
5001 topological order. */
5002 static int
5003 find_place_to_insert_bb (basic_block bb, int rgn)
5005 bool has_preds_outside_rgn = false;
5006 edge e;
5007 edge_iterator ei;
5009 /* Find whether we have preds outside the region. */
5010 FOR_EACH_EDGE (e, ei, bb->preds)
5011 if (!in_current_region_p (e->src))
5013 has_preds_outside_rgn = true;
5014 break;
5017 /* Recompute the top order -- needed when we have > 1 pred
5018 and in case we don't have preds outside. */
5019 if (flag_sel_sched_pipelining_outer_loops
5020 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5022 int i, bbi = bb->index, cur_bbi;
5024 recompute_rev_top_order ();
5025 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5027 cur_bbi = BB_TO_BLOCK (i);
5028 if (rev_top_order_index[bbi]
5029 < rev_top_order_index[cur_bbi])
5030 break;
5033 /* We skipped the right block, so we increase i. We accomodate
5034 it for increasing by step later, so we decrease i. */
5035 return (i + 1) - 1;
5037 else if (has_preds_outside_rgn)
5039 /* This is the case when we generate an extra empty block
5040 to serve as region head during pipelining. */
5041 e = EDGE_SUCC (bb, 0);
5042 gcc_assert (EDGE_COUNT (bb->succs) == 1
5043 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5044 && (BLOCK_TO_BB (e->dest->index) == 0));
5045 return -1;
5048 /* We don't have preds outside the region. We should have
5049 the only pred, because the multiple preds case comes from
5050 the pipelining of outer loops, and that is handled above.
5051 Just take the bbi of this single pred. */
5052 if (EDGE_COUNT (bb->succs) > 0)
5054 int pred_bbi;
5056 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5058 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5059 return BLOCK_TO_BB (pred_bbi);
5061 else
5062 /* BB has no successors. It is safe to put it in the end. */
5063 return current_nr_blocks - 1;
5066 /* Deletes an empty basic block freeing its data. */
5067 static void
5068 delete_and_free_basic_block (basic_block bb)
5070 gcc_assert (sel_bb_empty_p (bb));
5072 if (BB_LV_SET (bb))
5073 free_lv_set (bb);
5075 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5077 /* Can't assert av_set properties because we use sel_aremove_bb
5078 when removing loop preheader from the region. At the point of
5079 removing the preheader we already have deallocated sel_region_bb_info. */
5080 gcc_assert (BB_LV_SET (bb) == NULL
5081 && !BB_LV_SET_VALID_P (bb)
5082 && BB_AV_LEVEL (bb) == 0
5083 && BB_AV_SET (bb) == NULL);
5085 delete_basic_block (bb);
5088 /* Add BB to the current region and update the region data. */
5089 static void
5090 add_block_to_current_region (basic_block bb)
5092 int i, pos, bbi = -2, rgn;
5094 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5095 bbi = find_place_to_insert_bb (bb, rgn);
5096 bbi += 1;
5097 pos = RGN_BLOCKS (rgn) + bbi;
5099 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5100 && ebb_head[bbi] == pos);
5102 /* Make a place for the new block. */
5103 extend_regions ();
5105 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5106 BLOCK_TO_BB (rgn_bb_table[i])++;
5108 memmove (rgn_bb_table + pos + 1,
5109 rgn_bb_table + pos,
5110 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5112 /* Initialize data for BB. */
5113 rgn_bb_table[pos] = bb->index;
5114 BLOCK_TO_BB (bb->index) = bbi;
5115 CONTAINING_RGN (bb->index) = rgn;
5117 RGN_NR_BLOCKS (rgn)++;
5119 for (i = rgn + 1; i <= nr_regions; i++)
5120 RGN_BLOCKS (i)++;
5123 /* Remove BB from the current region and update the region data. */
5124 static void
5125 remove_bb_from_region (basic_block bb)
5127 int i, pos, bbi = -2, rgn;
5129 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5130 bbi = BLOCK_TO_BB (bb->index);
5131 pos = RGN_BLOCKS (rgn) + bbi;
5133 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5134 && ebb_head[bbi] == pos);
5136 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5137 BLOCK_TO_BB (rgn_bb_table[i])--;
5139 memmove (rgn_bb_table + pos,
5140 rgn_bb_table + pos + 1,
5141 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5143 RGN_NR_BLOCKS (rgn)--;
5144 for (i = rgn + 1; i <= nr_regions; i++)
5145 RGN_BLOCKS (i)--;
5148 /* Add BB to the current region and update all data. If BB is NULL, add all
5149 blocks from last_added_blocks vector. */
5150 static void
5151 sel_add_bb (basic_block bb)
5153 /* Extend luids so that new notes will receive zero luids. */
5154 sched_extend_luids ();
5155 sched_init_bbs ();
5156 sel_init_bbs (last_added_blocks);
5158 /* When bb is passed explicitly, the vector should contain
5159 the only element that equals to bb; otherwise, the vector
5160 should not be NULL. */
5161 gcc_assert (last_added_blocks != NULL);
5163 if (bb != NULL)
5165 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5166 && VEC_index (basic_block,
5167 last_added_blocks, 0) == bb);
5168 add_block_to_current_region (bb);
5170 /* We associate creating/deleting data sets with the first insn
5171 appearing / disappearing in the bb. */
5172 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5173 create_initial_data_sets (bb);
5175 VEC_free (basic_block, heap, last_added_blocks);
5177 else
5178 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5180 int i;
5181 basic_block temp_bb = NULL;
5183 for (i = 0;
5184 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5186 add_block_to_current_region (bb);
5187 temp_bb = bb;
5190 /* We need to fetch at least one bb so we know the region
5191 to update. */
5192 gcc_assert (temp_bb != NULL);
5193 bb = temp_bb;
5195 VEC_free (basic_block, heap, last_added_blocks);
5198 rgn_setup_region (CONTAINING_RGN (bb->index));
5201 /* Remove BB from the current region and update all data.
5202 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5203 static void
5204 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5206 unsigned idx = bb->index;
5208 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5210 remove_bb_from_region (bb);
5211 return_bb_to_pool (bb);
5212 bitmap_clear_bit (blocks_to_reschedule, idx);
5214 if (remove_from_cfg_p)
5216 basic_block succ = single_succ (bb);
5217 delete_and_free_basic_block (bb);
5218 set_immediate_dominator (CDI_DOMINATORS, succ,
5219 recompute_dominator (CDI_DOMINATORS, succ));
5222 rgn_setup_region (CONTAINING_RGN (idx));
5225 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5226 static void
5227 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5229 gcc_assert (in_current_region_p (merge_bb));
5231 concat_note_lists (BB_NOTE_LIST (empty_bb),
5232 &BB_NOTE_LIST (merge_bb));
5233 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5237 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5238 region, but keep it in CFG. */
5239 static void
5240 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5242 /* The block should contain just a note or a label.
5243 We try to check whether it is unused below. */
5244 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5245 || LABEL_P (BB_HEAD (empty_bb)));
5247 /* If basic block has predecessors or successors, redirect them. */
5248 if (remove_from_cfg_p
5249 && (EDGE_COUNT (empty_bb->preds) > 0
5250 || EDGE_COUNT (empty_bb->succs) > 0))
5252 basic_block pred;
5253 basic_block succ;
5255 /* We need to init PRED and SUCC before redirecting edges. */
5256 if (EDGE_COUNT (empty_bb->preds) > 0)
5258 edge e;
5260 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5262 e = EDGE_PRED (empty_bb, 0);
5263 gcc_assert (e->src == empty_bb->prev_bb
5264 && (e->flags & EDGE_FALLTHRU));
5266 pred = empty_bb->prev_bb;
5268 else
5269 pred = NULL;
5271 if (EDGE_COUNT (empty_bb->succs) > 0)
5273 /* We do not check fallthruness here as above, because
5274 after removing a jump the edge may actually be not fallthru. */
5275 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5276 succ = EDGE_SUCC (empty_bb, 0)->dest;
5278 else
5279 succ = NULL;
5281 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5283 edge e = EDGE_PRED (empty_bb, 0);
5285 if (e->flags & EDGE_FALLTHRU)
5286 redirect_edge_succ_nodup (e, succ);
5287 else
5288 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5291 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5293 edge e = EDGE_SUCC (empty_bb, 0);
5295 if (find_edge (pred, e->dest) == NULL)
5296 redirect_edge_pred (e, pred);
5300 /* Finish removing. */
5301 sel_remove_bb (empty_bb, remove_from_cfg_p);
5304 /* An implementation of create_basic_block hook, which additionally updates
5305 per-bb data structures. */
5306 static basic_block
5307 sel_create_basic_block (void *headp, void *endp, basic_block after)
5309 basic_block new_bb;
5310 insn_t new_bb_note;
5312 gcc_assert (flag_sel_sched_pipelining_outer_loops
5313 || last_added_blocks == NULL);
5315 new_bb_note = get_bb_note_from_pool ();
5317 if (new_bb_note == NULL_RTX)
5318 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5319 else
5321 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5322 new_bb_note, after);
5323 new_bb->aux = NULL;
5326 VEC_safe_push (basic_block, heap, last_added_blocks, new_bb);
5328 return new_bb;
5331 /* Implement sched_init_only_bb (). */
5332 static void
5333 sel_init_only_bb (basic_block bb, basic_block after)
5335 gcc_assert (after == NULL);
5337 extend_regions ();
5338 rgn_make_new_region_out_of_new_block (bb);
5341 /* Update the latch when we've splitted or merged it from FROM block to TO.
5342 This should be checked for all outer loops, too. */
5343 static void
5344 change_loops_latches (basic_block from, basic_block to)
5346 gcc_assert (from != to);
5348 if (current_loop_nest)
5350 struct loop *loop;
5352 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5353 if (considered_for_pipelining_p (loop) && loop->latch == from)
5355 gcc_assert (loop == current_loop_nest);
5356 loop->latch = to;
5357 gcc_assert (loop_latch_edge (loop));
5362 /* Splits BB on two basic blocks, adding it to the region and extending
5363 per-bb data structures. Returns the newly created bb. */
5364 static basic_block
5365 sel_split_block (basic_block bb, rtx after)
5367 basic_block new_bb;
5368 insn_t insn;
5370 new_bb = sched_split_block_1 (bb, after);
5371 sel_add_bb (new_bb);
5373 /* This should be called after sel_add_bb, because this uses
5374 CONTAINING_RGN for the new block, which is not yet initialized.
5375 FIXME: this function may be a no-op now. */
5376 change_loops_latches (bb, new_bb);
5378 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5379 FOR_BB_INSNS (new_bb, insn)
5380 if (INSN_P (insn))
5381 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5383 if (sel_bb_empty_p (bb))
5385 gcc_assert (!sel_bb_empty_p (new_bb));
5387 /* NEW_BB has data sets that need to be updated and BB holds
5388 data sets that should be removed. Exchange these data sets
5389 so that we won't lose BB's valid data sets. */
5390 exchange_data_sets (new_bb, bb);
5391 free_data_sets (bb);
5394 if (!sel_bb_empty_p (new_bb)
5395 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5396 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5398 return new_bb;
5401 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5402 Otherwise returns NULL. */
5403 static rtx
5404 check_for_new_jump (basic_block bb, int prev_max_uid)
5406 rtx end;
5408 end = sel_bb_end (bb);
5409 if (end && INSN_UID (end) >= prev_max_uid)
5410 return end;
5411 return NULL;
5414 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5415 New means having UID at least equal to PREV_MAX_UID. */
5416 static rtx
5417 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5419 rtx jump;
5421 /* Return immediately if no new insns were emitted. */
5422 if (get_max_uid () == prev_max_uid)
5423 return NULL;
5425 /* Now check both blocks for new jumps. It will ever be only one. */
5426 if ((jump = check_for_new_jump (from, prev_max_uid)))
5427 return jump;
5429 if (jump_bb != NULL
5430 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5431 return jump;
5432 return NULL;
5435 /* Splits E and adds the newly created basic block to the current region.
5436 Returns this basic block. */
5437 basic_block
5438 sel_split_edge (edge e)
5440 basic_block new_bb, src, other_bb = NULL;
5441 int prev_max_uid;
5442 rtx jump;
5444 src = e->src;
5445 prev_max_uid = get_max_uid ();
5446 new_bb = split_edge (e);
5448 if (flag_sel_sched_pipelining_outer_loops
5449 && current_loop_nest)
5451 int i;
5452 basic_block bb;
5454 /* Some of the basic blocks might not have been added to the loop.
5455 Add them here, until this is fixed in force_fallthru. */
5456 for (i = 0;
5457 VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
5458 if (!bb->loop_father)
5460 add_bb_to_loop (bb, e->dest->loop_father);
5462 gcc_assert (!other_bb && (new_bb->index != bb->index));
5463 other_bb = bb;
5467 /* Add all last_added_blocks to the region. */
5468 sel_add_bb (NULL);
5470 jump = find_new_jump (src, new_bb, prev_max_uid);
5471 if (jump)
5472 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5474 /* Put the correct lv set on this block. */
5475 if (other_bb && !sel_bb_empty_p (other_bb))
5476 compute_live (sel_bb_head (other_bb));
5478 return new_bb;
5481 /* Implement sched_create_empty_bb (). */
5482 static basic_block
5483 sel_create_empty_bb (basic_block after)
5485 basic_block new_bb;
5487 new_bb = sched_create_empty_bb_1 (after);
5489 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5490 later. */
5491 gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
5492 && VEC_index (basic_block, last_added_blocks, 0) == new_bb);
5494 VEC_free (basic_block, heap, last_added_blocks);
5495 return new_bb;
5498 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5499 will be splitted to insert a check. */
5500 basic_block
5501 sel_create_recovery_block (insn_t orig_insn)
5503 basic_block first_bb, second_bb, recovery_block;
5504 basic_block before_recovery = NULL;
5505 rtx jump;
5507 first_bb = BLOCK_FOR_INSN (orig_insn);
5508 if (sel_bb_end_p (orig_insn))
5510 /* Avoid introducing an empty block while splitting. */
5511 gcc_assert (single_succ_p (first_bb));
5512 second_bb = single_succ (first_bb);
5514 else
5515 second_bb = sched_split_block (first_bb, orig_insn);
5517 recovery_block = sched_create_recovery_block (&before_recovery);
5518 if (before_recovery)
5519 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5521 gcc_assert (sel_bb_empty_p (recovery_block));
5522 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5523 if (current_loops != NULL)
5524 add_bb_to_loop (recovery_block, first_bb->loop_father);
5526 sel_add_bb (recovery_block);
5528 jump = BB_END (recovery_block);
5529 gcc_assert (sel_bb_head (recovery_block) == jump);
5530 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5532 return recovery_block;
5535 /* Merge basic block B into basic block A. */
5536 static void
5537 sel_merge_blocks (basic_block a, basic_block b)
5539 gcc_assert (sel_bb_empty_p (b)
5540 && EDGE_COUNT (b->preds) == 1
5541 && EDGE_PRED (b, 0)->src == b->prev_bb);
5543 move_bb_info (b->prev_bb, b);
5544 remove_empty_bb (b, false);
5545 merge_blocks (a, b);
5546 change_loops_latches (b, a);
5549 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5550 data structures for possibly created bb and insns. Returns the newly
5551 added bb or NULL, when a bb was not needed. */
5552 void
5553 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5555 basic_block jump_bb, src, orig_dest = e->dest;
5556 int prev_max_uid;
5557 rtx jump;
5559 /* This function is now used only for bookkeeping code creation, where
5560 we'll never get the single pred of orig_dest block and thus will not
5561 hit unreachable blocks when updating dominator info. */
5562 gcc_assert (!sel_bb_empty_p (e->src)
5563 && !single_pred_p (orig_dest));
5564 src = e->src;
5565 prev_max_uid = get_max_uid ();
5566 jump_bb = redirect_edge_and_branch_force (e, to);
5568 if (jump_bb != NULL)
5569 sel_add_bb (jump_bb);
5571 /* This function could not be used to spoil the loop structure by now,
5572 thus we don't care to update anything. But check it to be sure. */
5573 if (current_loop_nest
5574 && pipelining_p)
5575 gcc_assert (loop_latch_edge (current_loop_nest));
5577 jump = find_new_jump (src, jump_bb, prev_max_uid);
5578 if (jump)
5579 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5580 set_immediate_dominator (CDI_DOMINATORS, to,
5581 recompute_dominator (CDI_DOMINATORS, to));
5582 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5583 recompute_dominator (CDI_DOMINATORS, orig_dest));
5586 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5587 redirected edge are in reverse topological order. */
5588 bool
5589 sel_redirect_edge_and_branch (edge e, basic_block to)
5591 bool latch_edge_p;
5592 basic_block src, orig_dest = e->dest;
5593 int prev_max_uid;
5594 rtx jump;
5595 edge redirected;
5596 bool recompute_toporder_p = false;
5597 bool maybe_unreachable = single_pred_p (orig_dest);
5599 latch_edge_p = (pipelining_p
5600 && current_loop_nest
5601 && e == loop_latch_edge (current_loop_nest));
5603 src = e->src;
5604 prev_max_uid = get_max_uid ();
5606 redirected = redirect_edge_and_branch (e, to);
5608 gcc_assert (redirected && last_added_blocks == NULL);
5610 /* When we've redirected a latch edge, update the header. */
5611 if (latch_edge_p)
5613 current_loop_nest->header = to;
5614 gcc_assert (loop_latch_edge (current_loop_nest));
5617 /* In rare situations, the topological relation between the blocks connected
5618 by the redirected edge can change (see PR42245 for an example). Update
5619 block_to_bb/bb_to_block. */
5620 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5621 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5622 recompute_toporder_p = true;
5624 jump = find_new_jump (src, NULL, prev_max_uid);
5625 if (jump)
5626 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5628 /* Only update dominator info when we don't have unreachable blocks.
5629 Otherwise we'll update in maybe_tidy_empty_bb. */
5630 if (!maybe_unreachable)
5632 set_immediate_dominator (CDI_DOMINATORS, to,
5633 recompute_dominator (CDI_DOMINATORS, to));
5634 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5635 recompute_dominator (CDI_DOMINATORS, orig_dest));
5637 return recompute_toporder_p;
5640 /* This variable holds the cfg hooks used by the selective scheduler. */
5641 static struct cfg_hooks sel_cfg_hooks;
5643 /* Register sel-sched cfg hooks. */
5644 void
5645 sel_register_cfg_hooks (void)
5647 sched_split_block = sel_split_block;
5649 orig_cfg_hooks = get_cfg_hooks ();
5650 sel_cfg_hooks = orig_cfg_hooks;
5652 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5654 set_cfg_hooks (sel_cfg_hooks);
5656 sched_init_only_bb = sel_init_only_bb;
5657 sched_split_block = sel_split_block;
5658 sched_create_empty_bb = sel_create_empty_bb;
5661 /* Unregister sel-sched cfg hooks. */
5662 void
5663 sel_unregister_cfg_hooks (void)
5665 sched_create_empty_bb = NULL;
5666 sched_split_block = NULL;
5667 sched_init_only_bb = NULL;
5669 set_cfg_hooks (orig_cfg_hooks);
5673 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5674 LABEL is where this jump should be directed. */
5676 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5678 rtx insn_rtx;
5680 gcc_assert (!INSN_P (pattern));
5682 start_sequence ();
5684 if (label == NULL_RTX)
5685 insn_rtx = emit_insn (pattern);
5686 else if (DEBUG_INSN_P (label))
5687 insn_rtx = emit_debug_insn (pattern);
5688 else
5690 insn_rtx = emit_jump_insn (pattern);
5691 JUMP_LABEL (insn_rtx) = label;
5692 ++LABEL_NUSES (label);
5695 end_sequence ();
5697 sched_extend_luids ();
5698 sched_extend_target ();
5699 sched_deps_init (false);
5701 /* Initialize INSN_CODE now. */
5702 recog_memoized (insn_rtx);
5703 return insn_rtx;
5706 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5707 must not be clonable. */
5708 vinsn_t
5709 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5711 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5713 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5714 return vinsn_create (insn_rtx, force_unique_p);
5717 /* Create a copy of INSN_RTX. */
5719 create_copy_of_insn_rtx (rtx insn_rtx)
5721 rtx res;
5723 if (DEBUG_INSN_P (insn_rtx))
5724 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5725 insn_rtx);
5727 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5729 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5730 NULL_RTX);
5731 return res;
5734 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5735 void
5736 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5738 vinsn_detach (EXPR_VINSN (expr));
5740 EXPR_VINSN (expr) = new_vinsn;
5741 vinsn_attach (new_vinsn);
5744 /* Helpers for global init. */
5745 /* This structure is used to be able to call existing bundling mechanism
5746 and calculate insn priorities. */
5747 static struct haifa_sched_info sched_sel_haifa_sched_info =
5749 NULL, /* init_ready_list */
5750 NULL, /* can_schedule_ready_p */
5751 NULL, /* schedule_more_p */
5752 NULL, /* new_ready */
5753 NULL, /* rgn_rank */
5754 sel_print_insn, /* rgn_print_insn */
5755 contributes_to_priority,
5756 NULL, /* insn_finishes_block_p */
5758 NULL, NULL,
5759 NULL, NULL,
5760 0, 0,
5762 NULL, /* add_remove_insn */
5763 NULL, /* begin_schedule_ready */
5764 NULL, /* begin_move_insn */
5765 NULL, /* advance_target_bb */
5767 NULL,
5768 NULL,
5770 SEL_SCHED | NEW_BBS
5773 /* Setup special insns used in the scheduler. */
5774 void
5775 setup_nop_and_exit_insns (void)
5777 gcc_assert (nop_pattern == NULL_RTX
5778 && exit_insn == NULL_RTX);
5780 nop_pattern = constm1_rtx;
5782 start_sequence ();
5783 emit_insn (nop_pattern);
5784 exit_insn = get_insns ();
5785 end_sequence ();
5786 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5789 /* Free special insns used in the scheduler. */
5790 void
5791 free_nop_and_exit_insns (void)
5793 exit_insn = NULL_RTX;
5794 nop_pattern = NULL_RTX;
5797 /* Setup a special vinsn used in new insns initialization. */
5798 void
5799 setup_nop_vinsn (void)
5801 nop_vinsn = vinsn_create (exit_insn, false);
5802 vinsn_attach (nop_vinsn);
5805 /* Free a special vinsn used in new insns initialization. */
5806 void
5807 free_nop_vinsn (void)
5809 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5810 vinsn_detach (nop_vinsn);
5811 nop_vinsn = NULL;
5814 /* Call a set_sched_flags hook. */
5815 void
5816 sel_set_sched_flags (void)
5818 /* ??? This means that set_sched_flags were called, and we decided to
5819 support speculation. However, set_sched_flags also modifies flags
5820 on current_sched_info, doing this only at global init. And we
5821 sometimes change c_s_i later. So put the correct flags again. */
5822 if (spec_info && targetm.sched.set_sched_flags)
5823 targetm.sched.set_sched_flags (spec_info);
5826 /* Setup pointers to global sched info structures. */
5827 void
5828 sel_setup_sched_infos (void)
5830 rgn_setup_common_sched_info ();
5832 memcpy (&sel_common_sched_info, common_sched_info,
5833 sizeof (sel_common_sched_info));
5835 sel_common_sched_info.fix_recovery_cfg = NULL;
5836 sel_common_sched_info.add_block = NULL;
5837 sel_common_sched_info.estimate_number_of_insns
5838 = sel_estimate_number_of_insns;
5839 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5840 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5842 common_sched_info = &sel_common_sched_info;
5844 current_sched_info = &sched_sel_haifa_sched_info;
5845 current_sched_info->sched_max_insns_priority =
5846 get_rgn_sched_max_insns_priority ();
5848 sel_set_sched_flags ();
5852 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5853 *BB_ORD_INDEX after that is increased. */
5854 static void
5855 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5857 RGN_NR_BLOCKS (rgn) += 1;
5858 RGN_DONT_CALC_DEPS (rgn) = 0;
5859 RGN_HAS_REAL_EBB (rgn) = 0;
5860 CONTAINING_RGN (bb->index) = rgn;
5861 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5862 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5863 (*bb_ord_index)++;
5865 /* FIXME: it is true only when not scheduling ebbs. */
5866 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5869 /* Functions to support pipelining of outer loops. */
5871 /* Creates a new empty region and returns it's number. */
5872 static int
5873 sel_create_new_region (void)
5875 int new_rgn_number = nr_regions;
5877 RGN_NR_BLOCKS (new_rgn_number) = 0;
5879 /* FIXME: This will work only when EBBs are not created. */
5880 if (new_rgn_number != 0)
5881 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5882 RGN_NR_BLOCKS (new_rgn_number - 1);
5883 else
5884 RGN_BLOCKS (new_rgn_number) = 0;
5886 /* Set the blocks of the next region so the other functions may
5887 calculate the number of blocks in the region. */
5888 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5889 RGN_NR_BLOCKS (new_rgn_number);
5891 nr_regions++;
5893 return new_rgn_number;
5896 /* If X has a smaller topological sort number than Y, returns -1;
5897 if greater, returns 1. */
5898 static int
5899 bb_top_order_comparator (const void *x, const void *y)
5901 basic_block bb1 = *(const basic_block *) x;
5902 basic_block bb2 = *(const basic_block *) y;
5904 gcc_assert (bb1 == bb2
5905 || rev_top_order_index[bb1->index]
5906 != rev_top_order_index[bb2->index]);
5908 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5909 bbs with greater number should go earlier. */
5910 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5911 return -1;
5912 else
5913 return 1;
5916 /* Create a region for LOOP and return its number. If we don't want
5917 to pipeline LOOP, return -1. */
5918 static int
5919 make_region_from_loop (struct loop *loop)
5921 unsigned int i;
5922 int new_rgn_number = -1;
5923 struct loop *inner;
5925 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5926 int bb_ord_index = 0;
5927 basic_block *loop_blocks;
5928 basic_block preheader_block;
5930 if (loop->num_nodes
5931 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5932 return -1;
5934 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5935 for (inner = loop->inner; inner; inner = inner->inner)
5936 if (flow_bb_inside_loop_p (inner, loop->latch))
5937 return -1;
5939 loop->ninsns = num_loop_insns (loop);
5940 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5941 return -1;
5943 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5945 for (i = 0; i < loop->num_nodes; i++)
5946 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5948 free (loop_blocks);
5949 return -1;
5952 preheader_block = loop_preheader_edge (loop)->src;
5953 gcc_assert (preheader_block);
5954 gcc_assert (loop_blocks[0] == loop->header);
5956 new_rgn_number = sel_create_new_region ();
5958 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
5959 SET_BIT (bbs_in_loop_rgns, preheader_block->index);
5961 for (i = 0; i < loop->num_nodes; i++)
5963 /* Add only those blocks that haven't been scheduled in the inner loop.
5964 The exception is the basic blocks with bookkeeping code - they should
5965 be added to the region (and they actually don't belong to the loop
5966 body, but to the region containing that loop body). */
5968 gcc_assert (new_rgn_number >= 0);
5970 if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
5972 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
5973 new_rgn_number);
5974 SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
5978 free (loop_blocks);
5979 MARK_LOOP_FOR_PIPELINING (loop);
5981 return new_rgn_number;
5984 /* Create a new region from preheader blocks LOOP_BLOCKS. */
5985 void
5986 make_region_from_loop_preheader (VEC(basic_block, heap) **loop_blocks)
5988 unsigned int i;
5989 int new_rgn_number = -1;
5990 basic_block bb;
5992 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5993 int bb_ord_index = 0;
5995 new_rgn_number = sel_create_new_region ();
5997 FOR_EACH_VEC_ELT (basic_block, *loop_blocks, i, bb)
5999 gcc_assert (new_rgn_number >= 0);
6001 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6004 VEC_free (basic_block, heap, *loop_blocks);
6005 gcc_assert (*loop_blocks == NULL);
6009 /* Create region(s) from loop nest LOOP, such that inner loops will be
6010 pipelined before outer loops. Returns true when a region for LOOP
6011 is created. */
6012 static bool
6013 make_regions_from_loop_nest (struct loop *loop)
6015 struct loop *cur_loop;
6016 int rgn_number;
6018 /* Traverse all inner nodes of the loop. */
6019 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6020 if (! TEST_BIT (bbs_in_loop_rgns, cur_loop->header->index))
6021 return false;
6023 /* At this moment all regular inner loops should have been pipelined.
6024 Try to create a region from this loop. */
6025 rgn_number = make_region_from_loop (loop);
6027 if (rgn_number < 0)
6028 return false;
6030 VEC_safe_push (loop_p, heap, loop_nests, loop);
6031 return true;
6034 /* Initalize data structures needed. */
6035 void
6036 sel_init_pipelining (void)
6038 /* Collect loop information to be used in outer loops pipelining. */
6039 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6040 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6041 | LOOPS_HAVE_RECORDED_EXITS
6042 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6043 current_loop_nest = NULL;
6045 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
6046 sbitmap_zero (bbs_in_loop_rgns);
6048 recompute_rev_top_order ();
6051 /* Returns a struct loop for region RGN. */
6052 loop_p
6053 get_loop_nest_for_rgn (unsigned int rgn)
6055 /* Regions created with extend_rgns don't have corresponding loop nests,
6056 because they don't represent loops. */
6057 if (rgn < VEC_length (loop_p, loop_nests))
6058 return VEC_index (loop_p, loop_nests, rgn);
6059 else
6060 return NULL;
6063 /* True when LOOP was included into pipelining regions. */
6064 bool
6065 considered_for_pipelining_p (struct loop *loop)
6067 if (loop_depth (loop) == 0)
6068 return false;
6070 /* Now, the loop could be too large or irreducible. Check whether its
6071 region is in LOOP_NESTS.
6072 We determine the region number of LOOP as the region number of its
6073 latch. We can't use header here, because this header could be
6074 just removed preheader and it will give us the wrong region number.
6075 Latch can't be used because it could be in the inner loop too. */
6076 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6078 int rgn = CONTAINING_RGN (loop->latch->index);
6080 gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
6081 return true;
6084 return false;
6087 /* Makes regions from the rest of the blocks, after loops are chosen
6088 for pipelining. */
6089 static void
6090 make_regions_from_the_rest (void)
6092 int cur_rgn_blocks;
6093 int *loop_hdr;
6094 int i;
6096 basic_block bb;
6097 edge e;
6098 edge_iterator ei;
6099 int *degree;
6101 /* Index in rgn_bb_table where to start allocating new regions. */
6102 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6104 /* Make regions from all the rest basic blocks - those that don't belong to
6105 any loop or belong to irreducible loops. Prepare the data structures
6106 for extend_rgns. */
6108 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6109 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6110 loop. */
6111 loop_hdr = XNEWVEC (int, last_basic_block);
6112 degree = XCNEWVEC (int, last_basic_block);
6115 /* For each basic block that belongs to some loop assign the number
6116 of innermost loop it belongs to. */
6117 for (i = 0; i < last_basic_block; i++)
6118 loop_hdr[i] = -1;
6120 FOR_EACH_BB (bb)
6122 if (bb->loop_father && !bb->loop_father->num == 0
6123 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6124 loop_hdr[bb->index] = bb->loop_father->num;
6127 /* For each basic block degree is calculated as the number of incoming
6128 edges, that are going out of bbs that are not yet scheduled.
6129 The basic blocks that are scheduled have degree value of zero. */
6130 FOR_EACH_BB (bb)
6132 degree[bb->index] = 0;
6134 if (!TEST_BIT (bbs_in_loop_rgns, bb->index))
6136 FOR_EACH_EDGE (e, ei, bb->preds)
6137 if (!TEST_BIT (bbs_in_loop_rgns, e->src->index))
6138 degree[bb->index]++;
6140 else
6141 degree[bb->index] = -1;
6144 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6146 /* Any block that did not end up in a region is placed into a region
6147 by itself. */
6148 FOR_EACH_BB (bb)
6149 if (degree[bb->index] >= 0)
6151 rgn_bb_table[cur_rgn_blocks] = bb->index;
6152 RGN_NR_BLOCKS (nr_regions) = 1;
6153 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6154 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6155 RGN_HAS_REAL_EBB (nr_regions) = 0;
6156 CONTAINING_RGN (bb->index) = nr_regions++;
6157 BLOCK_TO_BB (bb->index) = 0;
6160 free (degree);
6161 free (loop_hdr);
6164 /* Free data structures used in pipelining of loops. */
6165 void sel_finish_pipelining (void)
6167 loop_iterator li;
6168 struct loop *loop;
6170 /* Release aux fields so we don't free them later by mistake. */
6171 FOR_EACH_LOOP (li, loop, 0)
6172 loop->aux = NULL;
6174 loop_optimizer_finalize ();
6176 VEC_free (loop_p, heap, loop_nests);
6178 free (rev_top_order_index);
6179 rev_top_order_index = NULL;
6182 /* This function replaces the find_rgns when
6183 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6184 void
6185 sel_find_rgns (void)
6187 sel_init_pipelining ();
6188 extend_regions ();
6190 if (current_loops)
6192 loop_p loop;
6193 loop_iterator li;
6195 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6196 ? LI_FROM_INNERMOST
6197 : LI_ONLY_INNERMOST))
6198 make_regions_from_loop_nest (loop);
6201 /* Make regions from all the rest basic blocks and schedule them.
6202 These blocks include blocks that don't belong to any loop or belong
6203 to irreducible loops. */
6204 make_regions_from_the_rest ();
6206 /* We don't need bbs_in_loop_rgns anymore. */
6207 sbitmap_free (bbs_in_loop_rgns);
6208 bbs_in_loop_rgns = NULL;
6211 /* Add the preheader blocks from previous loop to current region taking
6212 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6213 This function is only used with -fsel-sched-pipelining-outer-loops. */
6214 void
6215 sel_add_loop_preheaders (bb_vec_t *bbs)
6217 int i;
6218 basic_block bb;
6219 VEC(basic_block, heap) *preheader_blocks
6220 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6222 for (i = 0;
6223 VEC_iterate (basic_block, preheader_blocks, i, bb);
6224 i++)
6226 VEC_safe_push (basic_block, heap, *bbs, bb);
6227 VEC_safe_push (basic_block, heap, last_added_blocks, bb);
6228 sel_add_bb (bb);
6231 VEC_free (basic_block, heap, preheader_blocks);
6234 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6235 Please note that the function should also work when pipelining_p is
6236 false, because it is used when deciding whether we should or should
6237 not reschedule pipelined code. */
6238 bool
6239 sel_is_loop_preheader_p (basic_block bb)
6241 if (current_loop_nest)
6243 struct loop *outer;
6245 if (preheader_removed)
6246 return false;
6248 /* Preheader is the first block in the region. */
6249 if (BLOCK_TO_BB (bb->index) == 0)
6250 return true;
6252 /* We used to find a preheader with the topological information.
6253 Check that the above code is equivalent to what we did before. */
6255 if (in_current_region_p (current_loop_nest->header))
6256 gcc_assert (!(BLOCK_TO_BB (bb->index)
6257 < BLOCK_TO_BB (current_loop_nest->header->index)));
6259 /* Support the situation when the latch block of outer loop
6260 could be from here. */
6261 for (outer = loop_outer (current_loop_nest);
6262 outer;
6263 outer = loop_outer (outer))
6264 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6265 gcc_unreachable ();
6268 return false;
6271 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6272 can be removed, making the corresponding edge fallthrough (assuming that
6273 all basic blocks between JUMP_BB and DEST_BB are empty). */
6274 static bool
6275 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6277 if (!onlyjump_p (BB_END (jump_bb))
6278 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6279 return false;
6281 /* Several outgoing edges, abnormal edge or destination of jump is
6282 not DEST_BB. */
6283 if (EDGE_COUNT (jump_bb->succs) != 1
6284 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6285 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6286 return false;
6288 /* If not anything of the upper. */
6289 return true;
6292 /* Removes the loop preheader from the current region and saves it in
6293 PREHEADER_BLOCKS of the father loop, so they will be added later to
6294 region that represents an outer loop. */
6295 static void
6296 sel_remove_loop_preheader (void)
6298 int i, old_len;
6299 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6300 basic_block bb;
6301 bool all_empty_p = true;
6302 VEC(basic_block, heap) *preheader_blocks
6303 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6305 gcc_assert (current_loop_nest);
6306 old_len = VEC_length (basic_block, preheader_blocks);
6308 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6309 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6311 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6313 /* If the basic block belongs to region, but doesn't belong to
6314 corresponding loop, then it should be a preheader. */
6315 if (sel_is_loop_preheader_p (bb))
6317 VEC_safe_push (basic_block, heap, preheader_blocks, bb);
6318 if (BB_END (bb) != bb_note (bb))
6319 all_empty_p = false;
6323 /* Remove these blocks only after iterating over the whole region. */
6324 for (i = VEC_length (basic_block, preheader_blocks) - 1;
6325 i >= old_len;
6326 i--)
6328 bb = VEC_index (basic_block, preheader_blocks, i);
6329 sel_remove_bb (bb, false);
6332 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6334 if (!all_empty_p)
6335 /* Immediately create new region from preheader. */
6336 make_region_from_loop_preheader (&preheader_blocks);
6337 else
6339 /* If all preheader blocks are empty - dont create new empty region.
6340 Instead, remove them completely. */
6341 FOR_EACH_VEC_ELT (basic_block, preheader_blocks, i, bb)
6343 edge e;
6344 edge_iterator ei;
6345 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6347 /* Redirect all incoming edges to next basic block. */
6348 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6350 if (! (e->flags & EDGE_FALLTHRU))
6351 redirect_edge_and_branch (e, bb->next_bb);
6352 else
6353 redirect_edge_succ (e, bb->next_bb);
6355 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6356 delete_and_free_basic_block (bb);
6358 /* Check if after deleting preheader there is a nonconditional
6359 jump in PREV_BB that leads to the next basic block NEXT_BB.
6360 If it is so - delete this jump and clear data sets of its
6361 basic block if it becomes empty. */
6362 if (next_bb->prev_bb == prev_bb
6363 && prev_bb != ENTRY_BLOCK_PTR
6364 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6366 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6367 if (BB_END (prev_bb) == bb_note (prev_bb))
6368 free_data_sets (prev_bb);
6371 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6372 recompute_dominator (CDI_DOMINATORS,
6373 next_bb));
6376 VEC_free (basic_block, heap, preheader_blocks);
6378 else
6379 /* Store preheader within the father's loop structure. */
6380 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6381 preheader_blocks);
6383 #endif