2014-05-06 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / sel-sched-ir.c
blob91e91ec37fd6d574b648fca4d6a472cc06a4a04c
1 /* Instruction scheduling pass. Selective scheduler and pipeliner.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "diagnostic-core.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "hard-reg-set.h"
28 #include "regs.h"
29 #include "function.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "insn-attr.h"
33 #include "except.h"
34 #include "recog.h"
35 #include "params.h"
36 #include "target.h"
37 #include "sched-int.h"
38 #include "ggc.h"
39 #include "tree.h"
40 #include "vec.h"
41 #include "langhooks.h"
42 #include "rtlhooks-def.h"
43 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
45 #ifdef INSN_SCHEDULING
46 #include "sel-sched-ir.h"
47 /* We don't have to use it except for sel_print_insn. */
48 #include "sel-sched-dump.h"
50 /* A vector holding bb info for whole scheduling pass. */
51 vec<sel_global_bb_info_def>
52 sel_global_bb_info = vNULL;
54 /* A vector holding bb info. */
55 vec<sel_region_bb_info_def>
56 sel_region_bb_info = vNULL;
58 /* A pool for allocating all lists. */
59 alloc_pool sched_lists_pool;
61 /* This contains information about successors for compute_av_set. */
62 struct succs_info current_succs;
64 /* Data structure to describe interaction with the generic scheduler utils. */
65 static struct common_sched_info_def sel_common_sched_info;
67 /* The loop nest being pipelined. */
68 struct loop *current_loop_nest;
70 /* LOOP_NESTS is a vector containing the corresponding loop nest for
71 each region. */
72 static vec<loop_p> loop_nests = vNULL;
74 /* Saves blocks already in loop regions, indexed by bb->index. */
75 static sbitmap bbs_in_loop_rgns = NULL;
77 /* CFG hooks that are saved before changing create_basic_block hook. */
78 static struct cfg_hooks orig_cfg_hooks;
81 /* Array containing reverse topological index of function basic blocks,
82 indexed by BB->INDEX. */
83 static int *rev_top_order_index = NULL;
85 /* Length of the above array. */
86 static int rev_top_order_index_len = -1;
88 /* A regset pool structure. */
89 static struct
91 /* The stack to which regsets are returned. */
92 regset *v;
94 /* Its pointer. */
95 int n;
97 /* Its size. */
98 int s;
100 /* In VV we save all generated regsets so that, when destructing the
101 pool, we can compare it with V and check that every regset was returned
102 back to pool. */
103 regset *vv;
105 /* The pointer of VV stack. */
106 int nn;
108 /* Its size. */
109 int ss;
111 /* The difference between allocated and returned regsets. */
112 int diff;
113 } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 };
115 /* This represents the nop pool. */
116 static struct
118 /* The vector which holds previously emitted nops. */
119 insn_t *v;
121 /* Its pointer. */
122 int n;
124 /* Its size. */
125 int s;
126 } nop_pool = { NULL, 0, 0 };
128 /* The pool for basic block notes. */
129 static rtx_vec_t bb_note_pool;
131 /* A NOP pattern used to emit placeholder insns. */
132 rtx nop_pattern = NULL_RTX;
133 /* A special instruction that resides in EXIT_BLOCK.
134 EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */
135 rtx exit_insn = NULL_RTX;
137 /* TRUE if while scheduling current region, which is loop, its preheader
138 was removed. */
139 bool preheader_removed = false;
142 /* Forward static declarations. */
143 static void fence_clear (fence_t);
145 static void deps_init_id (idata_t, insn_t, bool);
146 static void init_id_from_df (idata_t, insn_t, bool);
147 static expr_t set_insn_init (expr_t, vinsn_t, int);
149 static void cfg_preds (basic_block, insn_t **, int *);
150 static void prepare_insn_expr (insn_t, int);
151 static void free_history_vect (vec<expr_history_def> &);
153 static void move_bb_info (basic_block, basic_block);
154 static void remove_empty_bb (basic_block, bool);
155 static void sel_merge_blocks (basic_block, basic_block);
156 static void sel_remove_loop_preheader (void);
157 static bool bb_has_removable_jump_to_p (basic_block, basic_block);
159 static bool insn_is_the_only_one_in_bb_p (insn_t);
160 static void create_initial_data_sets (basic_block);
162 static void free_av_set (basic_block);
163 static void invalidate_av_set (basic_block);
164 static void extend_insn_data (void);
165 static void sel_init_new_insn (insn_t, int);
166 static void finish_insns (void);
168 /* Various list functions. */
170 /* Copy an instruction list L. */
171 ilist_t
172 ilist_copy (ilist_t l)
174 ilist_t head = NULL, *tailp = &head;
176 while (l)
178 ilist_add (tailp, ILIST_INSN (l));
179 tailp = &ILIST_NEXT (*tailp);
180 l = ILIST_NEXT (l);
183 return head;
186 /* Invert an instruction list L. */
187 ilist_t
188 ilist_invert (ilist_t l)
190 ilist_t res = NULL;
192 while (l)
194 ilist_add (&res, ILIST_INSN (l));
195 l = ILIST_NEXT (l);
198 return res;
201 /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */
202 void
203 blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc)
205 bnd_t bnd;
207 _list_add (lp);
208 bnd = BLIST_BND (*lp);
210 BND_TO (bnd) = to;
211 BND_PTR (bnd) = ptr;
212 BND_AV (bnd) = NULL;
213 BND_AV1 (bnd) = NULL;
214 BND_DC (bnd) = dc;
217 /* Remove the list note pointed to by LP. */
218 void
219 blist_remove (blist_t *lp)
221 bnd_t b = BLIST_BND (*lp);
223 av_set_clear (&BND_AV (b));
224 av_set_clear (&BND_AV1 (b));
225 ilist_clear (&BND_PTR (b));
227 _list_remove (lp);
230 /* Init a fence tail L. */
231 void
232 flist_tail_init (flist_tail_t l)
234 FLIST_TAIL_HEAD (l) = NULL;
235 FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l);
238 /* Try to find fence corresponding to INSN in L. */
239 fence_t
240 flist_lookup (flist_t l, insn_t insn)
242 while (l)
244 if (FENCE_INSN (FLIST_FENCE (l)) == insn)
245 return FLIST_FENCE (l);
247 l = FLIST_NEXT (l);
250 return NULL;
253 /* Init the fields of F before running fill_insns. */
254 static void
255 init_fence_for_scheduling (fence_t f)
257 FENCE_BNDS (f) = NULL;
258 FENCE_PROCESSED_P (f) = false;
259 FENCE_SCHEDULED_P (f) = false;
262 /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
263 static void
264 flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
265 insn_t last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
266 int *ready_ticks, int ready_ticks_size, insn_t sched_next,
267 int cycle, int cycle_issued_insns, int issue_more,
268 bool starts_cycle_p, bool after_stall_p)
270 fence_t f;
272 _list_add (lp);
273 f = FLIST_FENCE (*lp);
275 FENCE_INSN (f) = insn;
277 gcc_assert (state != NULL);
278 FENCE_STATE (f) = state;
280 FENCE_CYCLE (f) = cycle;
281 FENCE_ISSUED_INSNS (f) = cycle_issued_insns;
282 FENCE_STARTS_CYCLE_P (f) = starts_cycle_p;
283 FENCE_AFTER_STALL_P (f) = after_stall_p;
285 gcc_assert (dc != NULL);
286 FENCE_DC (f) = dc;
288 gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL);
289 FENCE_TC (f) = tc;
291 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
292 FENCE_ISSUE_MORE (f) = issue_more;
293 FENCE_EXECUTING_INSNS (f) = executing_insns;
294 FENCE_READY_TICKS (f) = ready_ticks;
295 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
296 FENCE_SCHED_NEXT (f) = sched_next;
298 init_fence_for_scheduling (f);
301 /* Remove the head node of the list pointed to by LP. */
302 static void
303 flist_remove (flist_t *lp)
305 if (FENCE_INSN (FLIST_FENCE (*lp)))
306 fence_clear (FLIST_FENCE (*lp));
307 _list_remove (lp);
310 /* Clear the fence list pointed to by LP. */
311 void
312 flist_clear (flist_t *lp)
314 while (*lp)
315 flist_remove (lp);
318 /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */
319 void
320 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
322 def_t d;
324 _list_add (dl);
325 d = DEF_LIST_DEF (*dl);
327 d->orig_insn = original_insn;
328 d->crosses_call = crosses_call;
332 /* Functions to work with target contexts. */
334 /* Bulk target context. It is convenient for debugging purposes to ensure
335 that there are no uninitialized (null) target contexts. */
336 static tc_t bulk_tc = (tc_t) 1;
338 /* Target hooks wrappers. In the future we can provide some default
339 implementations for them. */
341 /* Allocate a store for the target context. */
342 static tc_t
343 alloc_target_context (void)
345 return (targetm.sched.alloc_sched_context
346 ? targetm.sched.alloc_sched_context () : bulk_tc);
349 /* Init target context TC.
350 If CLEAN_P is true, then make TC as it is beginning of the scheduler.
351 Overwise, copy current backend context to TC. */
352 static void
353 init_target_context (tc_t tc, bool clean_p)
355 if (targetm.sched.init_sched_context)
356 targetm.sched.init_sched_context (tc, clean_p);
359 /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as
360 int init_target_context (). */
361 tc_t
362 create_target_context (bool clean_p)
364 tc_t tc = alloc_target_context ();
366 init_target_context (tc, clean_p);
367 return tc;
370 /* Copy TC to the current backend context. */
371 void
372 set_target_context (tc_t tc)
374 if (targetm.sched.set_sched_context)
375 targetm.sched.set_sched_context (tc);
378 /* TC is about to be destroyed. Free any internal data. */
379 static void
380 clear_target_context (tc_t tc)
382 if (targetm.sched.clear_sched_context)
383 targetm.sched.clear_sched_context (tc);
386 /* Clear and free it. */
387 static void
388 delete_target_context (tc_t tc)
390 clear_target_context (tc);
392 if (targetm.sched.free_sched_context)
393 targetm.sched.free_sched_context (tc);
396 /* Make a copy of FROM in TO.
397 NB: May be this should be a hook. */
398 static void
399 copy_target_context (tc_t to, tc_t from)
401 tc_t tmp = create_target_context (false);
403 set_target_context (from);
404 init_target_context (to, false);
406 set_target_context (tmp);
407 delete_target_context (tmp);
410 /* Create a copy of TC. */
411 static tc_t
412 create_copy_of_target_context (tc_t tc)
414 tc_t copy = alloc_target_context ();
416 copy_target_context (copy, tc);
418 return copy;
421 /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P
422 is the same as in init_target_context (). */
423 void
424 reset_target_context (tc_t tc, bool clean_p)
426 clear_target_context (tc);
427 init_target_context (tc, clean_p);
430 /* Functions to work with dependence contexts.
431 Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
432 context. It accumulates information about processed insns to decide if
433 current insn is dependent on the processed ones. */
435 /* Make a copy of FROM in TO. */
436 static void
437 copy_deps_context (deps_t to, deps_t from)
439 init_deps (to, false);
440 deps_join (to, from);
443 /* Allocate store for dep context. */
444 static deps_t
445 alloc_deps_context (void)
447 return XNEW (struct deps_desc);
450 /* Allocate and initialize dep context. */
451 static deps_t
452 create_deps_context (void)
454 deps_t dc = alloc_deps_context ();
456 init_deps (dc, false);
457 return dc;
460 /* Create a copy of FROM. */
461 static deps_t
462 create_copy_of_deps_context (deps_t from)
464 deps_t to = alloc_deps_context ();
466 copy_deps_context (to, from);
467 return to;
470 /* Clean up internal data of DC. */
471 static void
472 clear_deps_context (deps_t dc)
474 free_deps (dc);
477 /* Clear and free DC. */
478 static void
479 delete_deps_context (deps_t dc)
481 clear_deps_context (dc);
482 free (dc);
485 /* Clear and init DC. */
486 static void
487 reset_deps_context (deps_t dc)
489 clear_deps_context (dc);
490 init_deps (dc, false);
493 /* This structure describes the dependence analysis hooks for advancing
494 dependence context. */
495 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
497 NULL,
499 NULL, /* start_insn */
500 NULL, /* finish_insn */
501 NULL, /* start_lhs */
502 NULL, /* finish_lhs */
503 NULL, /* start_rhs */
504 NULL, /* finish_rhs */
505 haifa_note_reg_set,
506 haifa_note_reg_clobber,
507 haifa_note_reg_use,
508 NULL, /* note_mem_dep */
509 NULL, /* note_dep */
511 0, 0, 0
514 /* Process INSN and add its impact on DC. */
515 void
516 advance_deps_context (deps_t dc, insn_t insn)
518 sched_deps_info = &advance_deps_context_sched_deps_info;
519 deps_analyze_insn (dc, insn);
523 /* Functions to work with DFA states. */
525 /* Allocate store for a DFA state. */
526 static state_t
527 state_alloc (void)
529 return xmalloc (dfa_state_size);
532 /* Allocate and initialize DFA state. */
533 static state_t
534 state_create (void)
536 state_t state = state_alloc ();
538 state_reset (state);
539 advance_state (state);
540 return state;
543 /* Free DFA state. */
544 static void
545 state_free (state_t state)
547 free (state);
550 /* Make a copy of FROM in TO. */
551 static void
552 state_copy (state_t to, state_t from)
554 memcpy (to, from, dfa_state_size);
557 /* Create a copy of FROM. */
558 static state_t
559 state_create_copy (state_t from)
561 state_t to = state_alloc ();
563 state_copy (to, from);
564 return to;
568 /* Functions to work with fences. */
570 /* Clear the fence. */
571 static void
572 fence_clear (fence_t f)
574 state_t s = FENCE_STATE (f);
575 deps_t dc = FENCE_DC (f);
576 void *tc = FENCE_TC (f);
578 ilist_clear (&FENCE_BNDS (f));
580 gcc_assert ((s != NULL && dc != NULL && tc != NULL)
581 || (s == NULL && dc == NULL && tc == NULL));
583 free (s);
585 if (dc != NULL)
586 delete_deps_context (dc);
588 if (tc != NULL)
589 delete_target_context (tc);
590 vec_free (FENCE_EXECUTING_INSNS (f));
591 free (FENCE_READY_TICKS (f));
592 FENCE_READY_TICKS (f) = NULL;
595 /* Init a list of fences with successors of OLD_FENCE. */
596 void
597 init_fences (insn_t old_fence)
599 insn_t succ;
600 succ_iterator si;
601 bool first = true;
602 int ready_ticks_size = get_max_uid () + 1;
604 FOR_EACH_SUCC_1 (succ, si, old_fence,
605 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
608 if (first)
609 first = false;
610 else
611 gcc_assert (flag_sel_sched_pipelining_outer_loops);
613 flist_add (&fences, succ,
614 state_create (),
615 create_deps_context () /* dc */,
616 create_target_context (true) /* tc */,
617 NULL_RTX /* last_scheduled_insn */,
618 NULL, /* executing_insns */
619 XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
620 ready_ticks_size,
621 NULL_RTX /* sched_next */,
622 1 /* cycle */, 0 /* cycle_issued_insns */,
623 issue_rate, /* issue_more */
624 1 /* starts_cycle_p */, 0 /* after_stall_p */);
628 /* Merges two fences (filling fields of fence F with resulting values) by
629 following rules: 1) state, target context and last scheduled insn are
630 propagated from fallthrough edge if it is available;
631 2) deps context and cycle is propagated from more probable edge;
632 3) all other fields are set to corresponding constant values.
634 INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
635 READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE
636 and AFTER_STALL_P are the corresponding fields of the second fence. */
637 static void
638 merge_fences (fence_t f, insn_t insn,
639 state_t state, deps_t dc, void *tc,
640 rtx last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
641 int *ready_ticks, int ready_ticks_size,
642 rtx sched_next, int cycle, int issue_more, bool after_stall_p)
644 insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f);
646 gcc_assert (sel_bb_head_p (FENCE_INSN (f))
647 && !sched_next && !FENCE_SCHED_NEXT (f));
649 /* Check if we can decide which path fences came.
650 If we can't (or don't want to) - reset all. */
651 if (last_scheduled_insn == NULL
652 || last_scheduled_insn_old == NULL
653 /* This is a case when INSN is reachable on several paths from
654 one insn (this can happen when pipelining of outer loops is on and
655 there are two edges: one going around of inner loop and the other -
656 right through it; in such case just reset everything). */
657 || last_scheduled_insn == last_scheduled_insn_old)
659 state_reset (FENCE_STATE (f));
660 state_free (state);
662 reset_deps_context (FENCE_DC (f));
663 delete_deps_context (dc);
665 reset_target_context (FENCE_TC (f), true);
666 delete_target_context (tc);
668 if (cycle > FENCE_CYCLE (f))
669 FENCE_CYCLE (f) = cycle;
671 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
672 FENCE_ISSUE_MORE (f) = issue_rate;
673 vec_free (executing_insns);
674 free (ready_ticks);
675 if (FENCE_EXECUTING_INSNS (f))
676 FENCE_EXECUTING_INSNS (f)->block_remove (0,
677 FENCE_EXECUTING_INSNS (f)->length ());
678 if (FENCE_READY_TICKS (f))
679 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
681 else
683 edge edge_old = NULL, edge_new = NULL;
684 edge candidate;
685 succ_iterator si;
686 insn_t succ;
688 /* Find fallthrough edge. */
689 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
690 candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb);
692 if (!candidate
693 || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn)
694 && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old)))
696 /* No fallthrough edge leading to basic block of INSN. */
697 state_reset (FENCE_STATE (f));
698 state_free (state);
700 reset_target_context (FENCE_TC (f), true);
701 delete_target_context (tc);
703 FENCE_LAST_SCHEDULED_INSN (f) = NULL;
704 FENCE_ISSUE_MORE (f) = issue_rate;
706 else
707 if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
709 /* Would be weird if same insn is successor of several fallthrough
710 edges. */
711 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
712 != BLOCK_FOR_INSN (last_scheduled_insn_old));
714 state_free (FENCE_STATE (f));
715 FENCE_STATE (f) = state;
717 delete_target_context (FENCE_TC (f));
718 FENCE_TC (f) = tc;
720 FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn;
721 FENCE_ISSUE_MORE (f) = issue_more;
723 else
725 /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */
726 state_free (state);
727 delete_target_context (tc);
729 gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
730 != BLOCK_FOR_INSN (last_scheduled_insn));
733 /* Find edge of first predecessor (last_scheduled_insn_old->insn). */
734 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old,
735 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
737 if (succ == insn)
739 /* No same successor allowed from several edges. */
740 gcc_assert (!edge_old);
741 edge_old = si.e1;
744 /* Find edge of second predecessor (last_scheduled_insn->insn). */
745 FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn,
746 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
748 if (succ == insn)
750 /* No same successor allowed from several edges. */
751 gcc_assert (!edge_new);
752 edge_new = si.e1;
756 /* Check if we can choose most probable predecessor. */
757 if (edge_old == NULL || edge_new == NULL)
759 reset_deps_context (FENCE_DC (f));
760 delete_deps_context (dc);
761 vec_free (executing_insns);
762 free (ready_ticks);
764 FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
765 if (FENCE_EXECUTING_INSNS (f))
766 FENCE_EXECUTING_INSNS (f)->block_remove (0,
767 FENCE_EXECUTING_INSNS (f)->length ());
768 if (FENCE_READY_TICKS (f))
769 memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
771 else
772 if (edge_new->probability > edge_old->probability)
774 delete_deps_context (FENCE_DC (f));
775 FENCE_DC (f) = dc;
776 vec_free (FENCE_EXECUTING_INSNS (f));
777 FENCE_EXECUTING_INSNS (f) = executing_insns;
778 free (FENCE_READY_TICKS (f));
779 FENCE_READY_TICKS (f) = ready_ticks;
780 FENCE_READY_TICKS_SIZE (f) = ready_ticks_size;
781 FENCE_CYCLE (f) = cycle;
783 else
785 /* Leave DC and CYCLE untouched. */
786 delete_deps_context (dc);
787 vec_free (executing_insns);
788 free (ready_ticks);
792 /* Fill remaining invariant fields. */
793 if (after_stall_p)
794 FENCE_AFTER_STALL_P (f) = 1;
796 FENCE_ISSUED_INSNS (f) = 0;
797 FENCE_STARTS_CYCLE_P (f) = 1;
798 FENCE_SCHED_NEXT (f) = NULL;
801 /* Add a new fence to NEW_FENCES list, initializing it from all
802 other parameters. */
803 static void
804 add_to_fences (flist_tail_t new_fences, insn_t insn,
805 state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
806 vec<rtx, va_gc> *executing_insns, int *ready_ticks,
807 int ready_ticks_size, rtx sched_next, int cycle,
808 int cycle_issued_insns, int issue_rate,
809 bool starts_cycle_p, bool after_stall_p)
811 fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
813 if (! f)
815 flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
816 last_scheduled_insn, executing_insns, ready_ticks,
817 ready_ticks_size, sched_next, cycle, cycle_issued_insns,
818 issue_rate, starts_cycle_p, after_stall_p);
820 FLIST_TAIL_TAILP (new_fences)
821 = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences));
823 else
825 merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
826 executing_insns, ready_ticks, ready_ticks_size,
827 sched_next, cycle, issue_rate, after_stall_p);
831 /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */
832 void
833 move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences)
835 fence_t f, old;
836 flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
838 old = FLIST_FENCE (old_fences);
839 f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
840 FENCE_INSN (FLIST_FENCE (old_fences)));
841 if (f)
843 merge_fences (f, old->insn, old->state, old->dc, old->tc,
844 old->last_scheduled_insn, old->executing_insns,
845 old->ready_ticks, old->ready_ticks_size,
846 old->sched_next, old->cycle, old->issue_more,
847 old->after_stall_p);
849 else
851 _list_add (tailp);
852 FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp);
853 *FLIST_FENCE (*tailp) = *old;
854 init_fence_for_scheduling (FLIST_FENCE (*tailp));
856 FENCE_INSN (old) = NULL;
859 /* Add a new fence to NEW_FENCES list and initialize most of its data
860 as a clean one. */
861 void
862 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
864 int ready_ticks_size = get_max_uid () + 1;
866 add_to_fences (new_fences,
867 succ, state_create (), create_deps_context (),
868 create_target_context (true),
869 NULL_RTX, NULL,
870 XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
871 NULL_RTX, FENCE_CYCLE (fence) + 1,
872 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
875 /* Add a new fence to NEW_FENCES list and initialize all of its data
876 from FENCE and SUCC. */
877 void
878 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
880 int * new_ready_ticks
881 = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
883 memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
884 FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
885 add_to_fences (new_fences,
886 succ, state_create_copy (FENCE_STATE (fence)),
887 create_copy_of_deps_context (FENCE_DC (fence)),
888 create_copy_of_target_context (FENCE_TC (fence)),
889 FENCE_LAST_SCHEDULED_INSN (fence),
890 vec_safe_copy (FENCE_EXECUTING_INSNS (fence)),
891 new_ready_ticks,
892 FENCE_READY_TICKS_SIZE (fence),
893 FENCE_SCHED_NEXT (fence),
894 FENCE_CYCLE (fence),
895 FENCE_ISSUED_INSNS (fence),
896 FENCE_ISSUE_MORE (fence),
897 FENCE_STARTS_CYCLE_P (fence),
898 FENCE_AFTER_STALL_P (fence));
902 /* Functions to work with regset and nop pools. */
904 /* Returns the new regset from pool. It might have some of the bits set
905 from the previous usage. */
906 regset
907 get_regset_from_pool (void)
909 regset rs;
911 if (regset_pool.n != 0)
912 rs = regset_pool.v[--regset_pool.n];
913 else
914 /* We need to create the regset. */
916 rs = ALLOC_REG_SET (&reg_obstack);
918 if (regset_pool.nn == regset_pool.ss)
919 regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv,
920 (regset_pool.ss = 2 * regset_pool.ss + 1));
921 regset_pool.vv[regset_pool.nn++] = rs;
924 regset_pool.diff++;
926 return rs;
929 /* Same as above, but returns the empty regset. */
930 regset
931 get_clear_regset_from_pool (void)
933 regset rs = get_regset_from_pool ();
935 CLEAR_REG_SET (rs);
936 return rs;
939 /* Return regset RS to the pool for future use. */
940 void
941 return_regset_to_pool (regset rs)
943 gcc_assert (rs);
944 regset_pool.diff--;
946 if (regset_pool.n == regset_pool.s)
947 regset_pool.v = XRESIZEVEC (regset, regset_pool.v,
948 (regset_pool.s = 2 * regset_pool.s + 1));
949 regset_pool.v[regset_pool.n++] = rs;
952 #ifdef ENABLE_CHECKING
953 /* This is used as a qsort callback for sorting regset pool stacks.
954 X and XX are addresses of two regsets. They are never equal. */
955 static int
956 cmp_v_in_regset_pool (const void *x, const void *xx)
958 uintptr_t r1 = (uintptr_t) *((const regset *) x);
959 uintptr_t r2 = (uintptr_t) *((const regset *) xx);
960 if (r1 > r2)
961 return 1;
962 else if (r1 < r2)
963 return -1;
964 gcc_unreachable ();
966 #endif
968 /* Free the regset pool possibly checking for memory leaks. */
969 void
970 free_regset_pool (void)
972 #ifdef ENABLE_CHECKING
974 regset *v = regset_pool.v;
975 int i = 0;
976 int n = regset_pool.n;
978 regset *vv = regset_pool.vv;
979 int ii = 0;
980 int nn = regset_pool.nn;
982 int diff = 0;
984 gcc_assert (n <= nn);
986 /* Sort both vectors so it will be possible to compare them. */
987 qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
988 qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
990 while (ii < nn)
992 if (v[i] == vv[ii])
993 i++;
994 else
995 /* VV[II] was lost. */
996 diff++;
998 ii++;
1001 gcc_assert (diff == regset_pool.diff);
1003 #endif
1005 /* If not true - we have a memory leak. */
1006 gcc_assert (regset_pool.diff == 0);
1008 while (regset_pool.n)
1010 --regset_pool.n;
1011 FREE_REG_SET (regset_pool.v[regset_pool.n]);
1014 free (regset_pool.v);
1015 regset_pool.v = NULL;
1016 regset_pool.s = 0;
1018 free (regset_pool.vv);
1019 regset_pool.vv = NULL;
1020 regset_pool.nn = 0;
1021 regset_pool.ss = 0;
1023 regset_pool.diff = 0;
1027 /* Functions to work with nop pools. NOP insns are used as temporary
1028 placeholders of the insns being scheduled to allow correct update of
1029 the data sets. When update is finished, NOPs are deleted. */
1031 /* A vinsn that is used to represent a nop. This vinsn is shared among all
1032 nops sel-sched generates. */
1033 static vinsn_t nop_vinsn = NULL;
1035 /* Emit a nop before INSN, taking it from pool. */
1036 insn_t
1037 get_nop_from_pool (insn_t insn)
1039 insn_t nop;
1040 bool old_p = nop_pool.n != 0;
1041 int flags;
1043 if (old_p)
1044 nop = nop_pool.v[--nop_pool.n];
1045 else
1046 nop = nop_pattern;
1048 nop = emit_insn_before (nop, insn);
1050 if (old_p)
1051 flags = INSN_INIT_TODO_SSID;
1052 else
1053 flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID;
1055 set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn));
1056 sel_init_new_insn (nop, flags);
1058 return nop;
1061 /* Remove NOP from the instruction stream and return it to the pool. */
1062 void
1063 return_nop_to_pool (insn_t nop, bool full_tidying)
1065 gcc_assert (INSN_IN_STREAM_P (nop));
1066 sel_remove_insn (nop, false, full_tidying);
1068 if (nop_pool.n == nop_pool.s)
1069 nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
1070 (nop_pool.s = 2 * nop_pool.s + 1));
1071 nop_pool.v[nop_pool.n++] = nop;
1074 /* Free the nop pool. */
1075 void
1076 free_nop_pool (void)
1078 nop_pool.n = 0;
1079 nop_pool.s = 0;
1080 free (nop_pool.v);
1081 nop_pool.v = NULL;
1085 /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
1086 The callback is given two rtxes XX and YY and writes the new rtxes
1087 to NX and NY in case some needs to be skipped. */
1088 static int
1089 skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny)
1091 const_rtx x = *xx;
1092 const_rtx y = *yy;
1094 if (GET_CODE (x) == UNSPEC
1095 && (targetm.sched.skip_rtx_p == NULL
1096 || targetm.sched.skip_rtx_p (x)))
1098 *nx = XVECEXP (x, 0, 0);
1099 *ny = CONST_CAST_RTX (y);
1100 return 1;
1103 if (GET_CODE (y) == UNSPEC
1104 && (targetm.sched.skip_rtx_p == NULL
1105 || targetm.sched.skip_rtx_p (y)))
1107 *nx = CONST_CAST_RTX (x);
1108 *ny = XVECEXP (y, 0, 0);
1109 return 1;
1112 return 0;
1115 /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way
1116 to support ia64 speculation. When changes are needed, new rtx X and new mode
1117 NMODE are written, and the callback returns true. */
1118 static int
1119 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
1120 rtx *nx, enum machine_mode* nmode)
1122 if (GET_CODE (x) == UNSPEC
1123 && targetm.sched.skip_rtx_p
1124 && targetm.sched.skip_rtx_p (x))
1126 *nx = XVECEXP (x, 0 ,0);
1127 *nmode = VOIDmode;
1128 return 1;
1131 return 0;
1134 /* Returns LHS and RHS are ok to be scheduled separately. */
1135 static bool
1136 lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
1138 if (lhs == NULL || rhs == NULL)
1139 return false;
1141 /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
1142 to use reg, if const can be used. Moreover, scheduling const as rhs may
1143 lead to mode mismatch cause consts don't have modes but they could be
1144 merged from branches where the same const used in different modes. */
1145 if (CONSTANT_P (rhs))
1146 return false;
1148 /* ??? Do not rename predicate registers to avoid ICEs in bundling. */
1149 if (COMPARISON_P (rhs))
1150 return false;
1152 /* Do not allow single REG to be an rhs. */
1153 if (REG_P (rhs))
1154 return false;
1156 /* See comment at find_used_regs_1 (*1) for explanation of this
1157 restriction. */
1158 /* FIXME: remove this later. */
1159 if (MEM_P (lhs))
1160 return false;
1162 /* This will filter all tricky things like ZERO_EXTRACT etc.
1163 For now we don't handle it. */
1164 if (!REG_P (lhs) && !MEM_P (lhs))
1165 return false;
1167 return true;
1170 /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When
1171 FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is
1172 used e.g. for insns from recovery blocks. */
1173 static void
1174 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
1176 hash_rtx_callback_function hrcf;
1177 int insn_class;
1179 VINSN_INSN_RTX (vi) = insn;
1180 VINSN_COUNT (vi) = 0;
1181 vi->cost = -1;
1183 if (INSN_NOP_P (insn))
1184 return;
1186 if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
1187 init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
1188 else
1189 deps_init_id (VINSN_ID (vi), insn, force_unique_p);
1191 /* Hash vinsn depending on whether it is separable or not. */
1192 hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
1193 if (VINSN_SEPARABLE_P (vi))
1195 rtx rhs = VINSN_RHS (vi);
1197 VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs),
1198 NULL, NULL, false, hrcf);
1199 VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi),
1200 VOIDmode, NULL, NULL,
1201 false, hrcf);
1203 else
1205 VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode,
1206 NULL, NULL, false, hrcf);
1207 VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
1210 insn_class = haifa_classify_insn (insn);
1211 if (insn_class >= 2
1212 && (!targetm.sched.get_insn_spec_ds
1213 || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL)
1214 == 0)))
1215 VINSN_MAY_TRAP_P (vi) = true;
1216 else
1217 VINSN_MAY_TRAP_P (vi) = false;
1220 /* Indicate that VI has become the part of an rtx object. */
1221 void
1222 vinsn_attach (vinsn_t vi)
1224 /* Assert that VI is not pending for deletion. */
1225 gcc_assert (VINSN_INSN_RTX (vi));
1227 VINSN_COUNT (vi)++;
1230 /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct
1231 VINSN_TYPE (VI). */
1232 static vinsn_t
1233 vinsn_create (insn_t insn, bool force_unique_p)
1235 vinsn_t vi = XCNEW (struct vinsn_def);
1237 vinsn_init (vi, insn, force_unique_p);
1238 return vi;
1241 /* Return a copy of VI. When REATTACH_P is true, detach VI and attach
1242 the copy. */
1243 vinsn_t
1244 vinsn_copy (vinsn_t vi, bool reattach_p)
1246 rtx copy;
1247 bool unique = VINSN_UNIQUE_P (vi);
1248 vinsn_t new_vi;
1250 copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
1251 new_vi = create_vinsn_from_insn_rtx (copy, unique);
1252 if (reattach_p)
1254 vinsn_detach (vi);
1255 vinsn_attach (new_vi);
1258 return new_vi;
1261 /* Delete the VI vinsn and free its data. */
1262 static void
1263 vinsn_delete (vinsn_t vi)
1265 gcc_assert (VINSN_COUNT (vi) == 0);
1267 if (!INSN_NOP_P (VINSN_INSN_RTX (vi)))
1269 return_regset_to_pool (VINSN_REG_SETS (vi));
1270 return_regset_to_pool (VINSN_REG_USES (vi));
1271 return_regset_to_pool (VINSN_REG_CLOBBERS (vi));
1274 free (vi);
1277 /* Indicate that VI is no longer a part of some rtx object.
1278 Remove VI if it is no longer needed. */
1279 void
1280 vinsn_detach (vinsn_t vi)
1282 gcc_assert (VINSN_COUNT (vi) > 0);
1284 if (--VINSN_COUNT (vi) == 0)
1285 vinsn_delete (vi);
1288 /* Returns TRUE if VI is a branch. */
1289 bool
1290 vinsn_cond_branch_p (vinsn_t vi)
1292 insn_t insn;
1294 if (!VINSN_UNIQUE_P (vi))
1295 return false;
1297 insn = VINSN_INSN_RTX (vi);
1298 if (BB_END (BLOCK_FOR_INSN (insn)) != insn)
1299 return false;
1301 return control_flow_insn_p (insn);
1304 /* Return latency of INSN. */
1305 static int
1306 sel_insn_rtx_cost (rtx insn)
1308 int cost;
1310 /* A USE insn, or something else we don't need to
1311 understand. We can't pass these directly to
1312 result_ready_cost or insn_default_latency because it will
1313 trigger a fatal error for unrecognizable insns. */
1314 if (recog_memoized (insn) < 0)
1315 cost = 0;
1316 else
1318 cost = insn_default_latency (insn);
1320 if (cost < 0)
1321 cost = 0;
1324 return cost;
1327 /* Return the cost of the VI.
1328 !!! FIXME: Unify with haifa-sched.c: insn_cost (). */
1330 sel_vinsn_cost (vinsn_t vi)
1332 int cost = vi->cost;
1334 if (cost < 0)
1336 cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi));
1337 vi->cost = cost;
1340 return cost;
1344 /* Functions for insn emitting. */
1346 /* Emit new insn after AFTER based on PATTERN and initialize its data from
1347 EXPR and SEQNO. */
1348 insn_t
1349 sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
1351 insn_t new_insn;
1353 gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
1355 new_insn = emit_insn_after (pattern, after);
1356 set_insn_init (expr, NULL, seqno);
1357 sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID);
1359 return new_insn;
1362 /* Force newly generated vinsns to be unique. */
1363 static bool init_insn_force_unique_p = false;
1365 /* Emit new speculation recovery insn after AFTER based on PATTERN and
1366 initialize its data from EXPR and SEQNO. */
1367 insn_t
1368 sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno,
1369 insn_t after)
1371 insn_t insn;
1373 gcc_assert (!init_insn_force_unique_p);
1375 init_insn_force_unique_p = true;
1376 insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after);
1377 CANT_MOVE (insn) = 1;
1378 init_insn_force_unique_p = false;
1380 return insn;
1383 /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL,
1384 take it as a new vinsn instead of EXPR's vinsn.
1385 We simplify insns later, after scheduling region in
1386 simplify_changed_insns. */
1387 insn_t
1388 sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
1389 insn_t after)
1391 expr_t emit_expr;
1392 insn_t insn;
1393 int flags;
1395 emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
1396 seqno);
1397 insn = EXPR_INSN_RTX (emit_expr);
1398 add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
1400 flags = INSN_INIT_TODO_SSID;
1401 if (INSN_LUID (insn) == 0)
1402 flags |= INSN_INIT_TODO_LUID;
1403 sel_init_new_insn (insn, flags);
1405 return insn;
1408 /* Move insn from EXPR after AFTER. */
1409 insn_t
1410 sel_move_insn (expr_t expr, int seqno, insn_t after)
1412 insn_t insn = EXPR_INSN_RTX (expr);
1413 basic_block bb = BLOCK_FOR_INSN (after);
1414 insn_t next = NEXT_INSN (after);
1416 /* Assert that in move_op we disconnected this insn properly. */
1417 gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
1418 PREV_INSN (insn) = after;
1419 NEXT_INSN (insn) = next;
1421 NEXT_INSN (after) = insn;
1422 PREV_INSN (next) = insn;
1424 /* Update links from insn to bb and vice versa. */
1425 df_insn_change_bb (insn, bb);
1426 if (BB_END (bb) == after)
1427 BB_END (bb) = insn;
1429 prepare_insn_expr (insn, seqno);
1430 return insn;
1434 /* Functions to work with right-hand sides. */
1436 /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
1437 VECT and return true when found. Use NEW_VINSN for comparison only when
1438 COMPARE_VINSNS is true. Write to INDP the index on which
1439 the search has stopped, such that inserting the new element at INDP will
1440 retain VECT's sort order. */
1441 static bool
1442 find_in_history_vect_1 (vec<expr_history_def> vect,
1443 unsigned uid, vinsn_t new_vinsn,
1444 bool compare_vinsns, int *indp)
1446 expr_history_def *arr;
1447 int i, j, len = vect.length ();
1449 if (len == 0)
1451 *indp = 0;
1452 return false;
1455 arr = vect.address ();
1456 i = 0, j = len - 1;
1458 while (i <= j)
1460 unsigned auid = arr[i].uid;
1461 vinsn_t avinsn = arr[i].new_expr_vinsn;
1463 if (auid == uid
1464 /* When undoing transformation on a bookkeeping copy, the new vinsn
1465 may not be exactly equal to the one that is saved in the vector.
1466 This is because the insn whose copy we're checking was possibly
1467 substituted itself. */
1468 && (! compare_vinsns
1469 || vinsn_equal_p (avinsn, new_vinsn)))
1471 *indp = i;
1472 return true;
1474 else if (auid > uid)
1475 break;
1476 i++;
1479 *indp = i;
1480 return false;
1483 /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return
1484 the position found or -1, if no such value is in vector.
1485 Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */
1487 find_in_history_vect (vec<expr_history_def> vect, rtx insn,
1488 vinsn_t new_vinsn, bool originators_p)
1490 int ind;
1492 if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
1493 false, &ind))
1494 return ind;
1496 if (INSN_ORIGINATORS (insn) && originators_p)
1498 unsigned uid;
1499 bitmap_iterator bi;
1501 EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi)
1502 if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
1503 return ind;
1506 return -1;
1509 /* Insert new element in a sorted history vector pointed to by PVECT,
1510 if it is not there already. The element is searched using
1511 UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save
1512 the history of a transformation. */
1513 void
1514 insert_in_history_vect (vec<expr_history_def> *pvect,
1515 unsigned uid, enum local_trans_type type,
1516 vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
1517 ds_t spec_ds)
1519 vec<expr_history_def> vect = *pvect;
1520 expr_history_def temp;
1521 bool res;
1522 int ind;
1524 res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind);
1526 if (res)
1528 expr_history_def *phist = &vect[ind];
1530 /* It is possible that speculation types of expressions that were
1531 propagated through different paths will be different here. In this
1532 case, merge the status to get the correct check later. */
1533 if (phist->spec_ds != spec_ds)
1534 phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
1535 return;
1538 temp.uid = uid;
1539 temp.old_expr_vinsn = old_expr_vinsn;
1540 temp.new_expr_vinsn = new_expr_vinsn;
1541 temp.spec_ds = spec_ds;
1542 temp.type = type;
1544 vinsn_attach (old_expr_vinsn);
1545 vinsn_attach (new_expr_vinsn);
1546 vect.safe_insert (ind, temp);
1547 *pvect = vect;
1550 /* Free history vector PVECT. */
1551 static void
1552 free_history_vect (vec<expr_history_def> &pvect)
1554 unsigned i;
1555 expr_history_def *phist;
1557 if (! pvect.exists ())
1558 return;
1560 for (i = 0; pvect.iterate (i, &phist); i++)
1562 vinsn_detach (phist->old_expr_vinsn);
1563 vinsn_detach (phist->new_expr_vinsn);
1566 pvect.release ();
1569 /* Merge vector FROM to PVECT. */
1570 static void
1571 merge_history_vect (vec<expr_history_def> *pvect,
1572 vec<expr_history_def> from)
1574 expr_history_def *phist;
1575 int i;
1577 /* We keep this vector sorted. */
1578 for (i = 0; from.iterate (i, &phist); i++)
1579 insert_in_history_vect (pvect, phist->uid, phist->type,
1580 phist->old_expr_vinsn, phist->new_expr_vinsn,
1581 phist->spec_ds);
1584 /* Compare two vinsns as rhses if possible and as vinsns otherwise. */
1585 bool
1586 vinsn_equal_p (vinsn_t x, vinsn_t y)
1588 rtx_equal_p_callback_function repcf;
1590 if (x == y)
1591 return true;
1593 if (VINSN_TYPE (x) != VINSN_TYPE (y))
1594 return false;
1596 if (VINSN_HASH (x) != VINSN_HASH (y))
1597 return false;
1599 repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
1600 if (VINSN_SEPARABLE_P (x))
1602 /* Compare RHSes of VINSNs. */
1603 gcc_assert (VINSN_RHS (x));
1604 gcc_assert (VINSN_RHS (y));
1606 return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf);
1609 return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf);
1613 /* Functions for working with expressions. */
1615 /* Initialize EXPR. */
1616 static void
1617 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
1618 int sched_times, int orig_bb_index, ds_t spec_done_ds,
1619 ds_t spec_to_check_ds, int orig_sched_cycle,
1620 vec<expr_history_def> history,
1621 signed char target_available,
1622 bool was_substituted, bool was_renamed, bool needs_spec_check_p,
1623 bool cant_move)
1625 vinsn_attach (vi);
1627 EXPR_VINSN (expr) = vi;
1628 EXPR_SPEC (expr) = spec;
1629 EXPR_USEFULNESS (expr) = use;
1630 EXPR_PRIORITY (expr) = priority;
1631 EXPR_PRIORITY_ADJ (expr) = 0;
1632 EXPR_SCHED_TIMES (expr) = sched_times;
1633 EXPR_ORIG_BB_INDEX (expr) = orig_bb_index;
1634 EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle;
1635 EXPR_SPEC_DONE_DS (expr) = spec_done_ds;
1636 EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds;
1638 if (history.exists ())
1639 EXPR_HISTORY_OF_CHANGES (expr) = history;
1640 else
1641 EXPR_HISTORY_OF_CHANGES (expr).create (0);
1643 EXPR_TARGET_AVAILABLE (expr) = target_available;
1644 EXPR_WAS_SUBSTITUTED (expr) = was_substituted;
1645 EXPR_WAS_RENAMED (expr) = was_renamed;
1646 EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p;
1647 EXPR_CANT_MOVE (expr) = cant_move;
1650 /* Make a copy of the expr FROM into the expr TO. */
1651 void
1652 copy_expr (expr_t to, expr_t from)
1654 vec<expr_history_def> temp = vNULL;
1656 if (EXPR_HISTORY_OF_CHANGES (from).exists ())
1658 unsigned i;
1659 expr_history_def *phist;
1661 temp = EXPR_HISTORY_OF_CHANGES (from).copy ();
1662 for (i = 0;
1663 temp.iterate (i, &phist);
1664 i++)
1666 vinsn_attach (phist->old_expr_vinsn);
1667 vinsn_attach (phist->new_expr_vinsn);
1671 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
1672 EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
1673 EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
1674 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
1675 EXPR_ORIG_SCHED_CYCLE (from), temp,
1676 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1677 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1678 EXPR_CANT_MOVE (from));
1681 /* Same, but the final expr will not ever be in av sets, so don't copy
1682 "uninteresting" data such as bitmap cache. */
1683 void
1684 copy_expr_onside (expr_t to, expr_t from)
1686 init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
1687 EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
1688 EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0,
1689 vNULL,
1690 EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
1691 EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
1692 EXPR_CANT_MOVE (from));
1695 /* Prepare the expr of INSN for scheduling. Used when moving insn and when
1696 initializing new insns. */
1697 static void
1698 prepare_insn_expr (insn_t insn, int seqno)
1700 expr_t expr = INSN_EXPR (insn);
1701 ds_t ds;
1703 INSN_SEQNO (insn) = seqno;
1704 EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
1705 EXPR_SPEC (expr) = 0;
1706 EXPR_ORIG_SCHED_CYCLE (expr) = 0;
1707 EXPR_WAS_SUBSTITUTED (expr) = 0;
1708 EXPR_WAS_RENAMED (expr) = 0;
1709 EXPR_TARGET_AVAILABLE (expr) = 1;
1710 INSN_LIVE_VALID_P (insn) = false;
1712 /* ??? If this expression is speculative, make its dependence
1713 as weak as possible. We can filter this expression later
1714 in process_spec_exprs, because we do not distinguish
1715 between the status we got during compute_av_set and the
1716 existing status. To be fixed. */
1717 ds = EXPR_SPEC_DONE_DS (expr);
1718 if (ds)
1719 EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds);
1721 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1724 /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT
1725 is non-null when expressions are merged from different successors at
1726 a split point. */
1727 static void
1728 update_target_availability (expr_t to, expr_t from, insn_t split_point)
1730 if (EXPR_TARGET_AVAILABLE (to) < 0
1731 || EXPR_TARGET_AVAILABLE (from) < 0)
1732 EXPR_TARGET_AVAILABLE (to) = -1;
1733 else
1735 /* We try to detect the case when one of the expressions
1736 can only be reached through another one. In this case,
1737 we can do better. */
1738 if (split_point == NULL)
1740 int toind, fromind;
1742 toind = EXPR_ORIG_BB_INDEX (to);
1743 fromind = EXPR_ORIG_BB_INDEX (from);
1745 if (toind && toind == fromind)
1746 /* Do nothing -- everything is done in
1747 merge_with_other_exprs. */
1749 else
1750 EXPR_TARGET_AVAILABLE (to) = -1;
1752 else if (EXPR_TARGET_AVAILABLE (from) == 0
1753 && EXPR_LHS (from)
1754 && REG_P (EXPR_LHS (from))
1755 && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from)))
1756 EXPR_TARGET_AVAILABLE (to) = -1;
1757 else
1758 EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from);
1762 /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT
1763 is non-null when expressions are merged from different successors at
1764 a split point. */
1765 static void
1766 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
1768 ds_t old_to_ds, old_from_ds;
1770 old_to_ds = EXPR_SPEC_DONE_DS (to);
1771 old_from_ds = EXPR_SPEC_DONE_DS (from);
1773 EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
1774 EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
1775 EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
1777 /* When merging e.g. control & data speculative exprs, or a control
1778 speculative with a control&data speculative one, we really have
1779 to change vinsn too. Also, when speculative status is changed,
1780 we also need to record this as a transformation in expr's history. */
1781 if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
1783 old_to_ds = ds_get_speculation_types (old_to_ds);
1784 old_from_ds = ds_get_speculation_types (old_from_ds);
1786 if (old_to_ds != old_from_ds)
1788 ds_t record_ds;
1790 /* When both expressions are speculative, we need to change
1791 the vinsn first. */
1792 if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
1794 int res;
1796 res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
1797 gcc_assert (res >= 0);
1800 if (split_point != NULL)
1802 /* Record the change with proper status. */
1803 record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
1804 record_ds &= ~(old_to_ds & SPECULATIVE);
1805 record_ds &= ~(old_from_ds & SPECULATIVE);
1807 insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1808 INSN_UID (split_point), TRANS_SPECULATION,
1809 EXPR_VINSN (from), EXPR_VINSN (to),
1810 record_ds);
1817 /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL,
1818 this is done along different paths. */
1819 void
1820 merge_expr_data (expr_t to, expr_t from, insn_t split_point)
1822 /* Choose the maximum of the specs of merged exprs. This is required
1823 for correctness of bookkeeping. */
1824 if (EXPR_SPEC (to) < EXPR_SPEC (from))
1825 EXPR_SPEC (to) = EXPR_SPEC (from);
1827 if (split_point)
1828 EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
1829 else
1830 EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
1831 EXPR_USEFULNESS (from));
1833 if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
1834 EXPR_PRIORITY (to) = EXPR_PRIORITY (from);
1836 if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from))
1837 EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from);
1839 if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
1840 EXPR_ORIG_BB_INDEX (to) = 0;
1842 EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
1843 EXPR_ORIG_SCHED_CYCLE (from));
1845 EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
1846 EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
1847 EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
1849 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
1850 EXPR_HISTORY_OF_CHANGES (from));
1851 update_target_availability (to, from, split_point);
1852 update_speculative_bits (to, from, split_point);
1855 /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal
1856 in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions
1857 are merged from different successors at a split point. */
1858 void
1859 merge_expr (expr_t to, expr_t from, insn_t split_point)
1861 vinsn_t to_vi = EXPR_VINSN (to);
1862 vinsn_t from_vi = EXPR_VINSN (from);
1864 gcc_assert (vinsn_equal_p (to_vi, from_vi));
1866 /* Make sure that speculative pattern is propagated into exprs that
1867 have non-speculative one. This will provide us with consistent
1868 speculative bits and speculative patterns inside expr. */
1869 if ((EXPR_SPEC_DONE_DS (from) != 0
1870 && EXPR_SPEC_DONE_DS (to) == 0)
1871 /* Do likewise for volatile insns, so that we always retain
1872 the may_trap_p bit on the resulting expression. */
1873 || (VINSN_MAY_TRAP_P (EXPR_VINSN (from))
1874 && !VINSN_MAY_TRAP_P (EXPR_VINSN (to))))
1875 change_vinsn_in_expr (to, EXPR_VINSN (from));
1877 merge_expr_data (to, from, split_point);
1878 gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE);
1881 /* Clear the information of this EXPR. */
1882 void
1883 clear_expr (expr_t expr)
1886 vinsn_detach (EXPR_VINSN (expr));
1887 EXPR_VINSN (expr) = NULL;
1889 free_history_vect (EXPR_HISTORY_OF_CHANGES (expr));
1892 /* For a given LV_SET, mark EXPR having unavailable target register. */
1893 static void
1894 set_unavailable_target_for_expr (expr_t expr, regset lv_set)
1896 if (EXPR_SEPARABLE_P (expr))
1898 if (REG_P (EXPR_LHS (expr))
1899 && register_unavailable_p (lv_set, EXPR_LHS (expr)))
1901 /* If it's an insn like r1 = use (r1, ...), and it exists in
1902 different forms in each of the av_sets being merged, we can't say
1903 whether original destination register is available or not.
1904 However, this still works if destination register is not used
1905 in the original expression: if the branch at which LV_SET we're
1906 looking here is not actually 'other branch' in sense that same
1907 expression is available through it (but it can't be determined
1908 at computation stage because of transformations on one of the
1909 branches), it still won't affect the availability.
1910 Liveness of a register somewhere on a code motion path means
1911 it's either read somewhere on a codemotion path, live on
1912 'other' branch, live at the point immediately following
1913 the original operation, or is read by the original operation.
1914 The latter case is filtered out in the condition below.
1915 It still doesn't cover the case when register is defined and used
1916 somewhere within the code motion path, and in this case we could
1917 miss a unifying code motion along both branches using a renamed
1918 register, but it won't affect a code correctness since upon
1919 an actual code motion a bookkeeping code would be generated. */
1920 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1921 EXPR_LHS (expr)))
1922 EXPR_TARGET_AVAILABLE (expr) = -1;
1923 else
1924 EXPR_TARGET_AVAILABLE (expr) = false;
1927 else
1929 unsigned regno;
1930 reg_set_iterator rsi;
1932 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
1933 0, regno, rsi)
1934 if (bitmap_bit_p (lv_set, regno))
1936 EXPR_TARGET_AVAILABLE (expr) = false;
1937 break;
1940 EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)),
1941 0, regno, rsi)
1942 if (bitmap_bit_p (lv_set, regno))
1944 EXPR_TARGET_AVAILABLE (expr) = false;
1945 break;
1950 /* Try to make EXPR speculative. Return 1 when EXPR's pattern
1951 or dependence status have changed, 2 when also the target register
1952 became unavailable, 0 if nothing had to be changed. */
1954 speculate_expr (expr_t expr, ds_t ds)
1956 int res;
1957 rtx orig_insn_rtx;
1958 rtx spec_pat;
1959 ds_t target_ds, current_ds;
1961 /* Obtain the status we need to put on EXPR. */
1962 target_ds = (ds & SPECULATIVE);
1963 current_ds = EXPR_SPEC_DONE_DS (expr);
1964 ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX);
1966 orig_insn_rtx = EXPR_INSN_RTX (expr);
1968 res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat);
1970 switch (res)
1972 case 0:
1973 EXPR_SPEC_DONE_DS (expr) = ds;
1974 return current_ds != ds ? 1 : 0;
1976 case 1:
1978 rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
1979 vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false);
1981 change_vinsn_in_expr (expr, spec_vinsn);
1982 EXPR_SPEC_DONE_DS (expr) = ds;
1983 EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
1985 /* Do not allow clobbering the address register of speculative
1986 insns. */
1987 if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)),
1988 expr_dest_reg (expr)))
1990 EXPR_TARGET_AVAILABLE (expr) = false;
1991 return 2;
1994 return 1;
1997 case -1:
1998 return -1;
2000 default:
2001 gcc_unreachable ();
2002 return -1;
2006 /* Return a destination register, if any, of EXPR. */
2008 expr_dest_reg (expr_t expr)
2010 rtx dest = VINSN_LHS (EXPR_VINSN (expr));
2012 if (dest != NULL_RTX && REG_P (dest))
2013 return dest;
2015 return NULL_RTX;
2018 /* Returns the REGNO of the R's destination. */
2019 unsigned
2020 expr_dest_regno (expr_t expr)
2022 rtx dest = expr_dest_reg (expr);
2024 gcc_assert (dest != NULL_RTX);
2025 return REGNO (dest);
2028 /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
2029 AV_SET having unavailable target register. */
2030 void
2031 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
2033 expr_t expr;
2034 av_set_iterator avi;
2036 FOR_EACH_EXPR (expr, avi, join_set)
2037 if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL)
2038 set_unavailable_target_for_expr (expr, lv_set);
2042 /* Returns true if REG (at least partially) is present in REGS. */
2043 bool
2044 register_unavailable_p (regset regs, rtx reg)
2046 unsigned regno, end_regno;
2048 regno = REGNO (reg);
2049 if (bitmap_bit_p (regs, regno))
2050 return true;
2052 end_regno = END_REGNO (reg);
2054 while (++regno < end_regno)
2055 if (bitmap_bit_p (regs, regno))
2056 return true;
2058 return false;
2061 /* Av set functions. */
2063 /* Add a new element to av set SETP.
2064 Return the element added. */
2065 static av_set_t
2066 av_set_add_element (av_set_t *setp)
2068 /* Insert at the beginning of the list. */
2069 _list_add (setp);
2070 return *setp;
2073 /* Add EXPR to SETP. */
2074 void
2075 av_set_add (av_set_t *setp, expr_t expr)
2077 av_set_t elem;
2079 gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
2080 elem = av_set_add_element (setp);
2081 copy_expr (_AV_SET_EXPR (elem), expr);
2084 /* Same, but do not copy EXPR. */
2085 static void
2086 av_set_add_nocopy (av_set_t *setp, expr_t expr)
2088 av_set_t elem;
2090 elem = av_set_add_element (setp);
2091 *_AV_SET_EXPR (elem) = *expr;
2094 /* Remove expr pointed to by IP from the av_set. */
2095 void
2096 av_set_iter_remove (av_set_iterator *ip)
2098 clear_expr (_AV_SET_EXPR (*ip->lp));
2099 _list_iter_remove (ip);
2102 /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the
2103 sense of vinsn_equal_p function. Return NULL if no such expr is
2104 in SET was found. */
2105 expr_t
2106 av_set_lookup (av_set_t set, vinsn_t sought_vinsn)
2108 expr_t expr;
2109 av_set_iterator i;
2111 FOR_EACH_EXPR (expr, i, set)
2112 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2113 return expr;
2114 return NULL;
2117 /* Same, but also remove the EXPR found. */
2118 static expr_t
2119 av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn)
2121 expr_t expr;
2122 av_set_iterator i;
2124 FOR_EACH_EXPR_1 (expr, i, setp)
2125 if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn))
2127 _list_iter_remove_nofree (&i);
2128 return expr;
2130 return NULL;
2133 /* Search for an expr in SET, such that it's equivalent to EXPR in the
2134 sense of vinsn_equal_p function of their vinsns, but not EXPR itself.
2135 Returns NULL if no such expr is in SET was found. */
2136 static expr_t
2137 av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr)
2139 expr_t cur_expr;
2140 av_set_iterator i;
2142 FOR_EACH_EXPR (cur_expr, i, set)
2144 if (cur_expr == expr)
2145 continue;
2146 if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr)))
2147 return cur_expr;
2150 return NULL;
2153 /* If other expression is already in AVP, remove one of them. */
2154 expr_t
2155 merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
2157 expr_t expr2;
2159 expr2 = av_set_lookup_other_equiv_expr (*avp, expr);
2160 if (expr2 != NULL)
2162 /* Reset target availability on merge, since taking it only from one
2163 of the exprs would be controversial for different code. */
2164 EXPR_TARGET_AVAILABLE (expr2) = -1;
2165 EXPR_USEFULNESS (expr2) = 0;
2167 merge_expr (expr2, expr, NULL);
2169 /* Fix usefulness as it should be now REG_BR_PROB_BASE. */
2170 EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
2172 av_set_iter_remove (ip);
2173 return expr2;
2176 return expr;
2179 /* Return true if there is an expr that correlates to VI in SET. */
2180 bool
2181 av_set_is_in_p (av_set_t set, vinsn_t vi)
2183 return av_set_lookup (set, vi) != NULL;
2186 /* Return a copy of SET. */
2187 av_set_t
2188 av_set_copy (av_set_t set)
2190 expr_t expr;
2191 av_set_iterator i;
2192 av_set_t res = NULL;
2194 FOR_EACH_EXPR (expr, i, set)
2195 av_set_add (&res, expr);
2197 return res;
2200 /* Join two av sets that do not have common elements by attaching second set
2201 (pointed to by FROMP) to the end of first set (TO_TAILP must point to
2202 _AV_SET_NEXT of first set's last element). */
2203 static void
2204 join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp)
2206 gcc_assert (*to_tailp == NULL);
2207 *to_tailp = *fromp;
2208 *fromp = NULL;
2211 /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set
2212 pointed to by FROMP afterwards. */
2213 void
2214 av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn)
2216 expr_t expr1;
2217 av_set_iterator i;
2219 /* Delete from TOP all exprs, that present in FROMP. */
2220 FOR_EACH_EXPR_1 (expr1, i, top)
2222 expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1));
2224 if (expr2)
2226 merge_expr (expr2, expr1, insn);
2227 av_set_iter_remove (&i);
2231 join_distinct_sets (i.lp, fromp);
2234 /* Same as above, but also update availability of target register in
2235 TOP judging by TO_LV_SET and FROM_LV_SET. */
2236 void
2237 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
2238 regset from_lv_set, insn_t insn)
2240 expr_t expr1;
2241 av_set_iterator i;
2242 av_set_t *to_tailp, in_both_set = NULL;
2244 /* Delete from TOP all expres, that present in FROMP. */
2245 FOR_EACH_EXPR_1 (expr1, i, top)
2247 expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1));
2249 if (expr2)
2251 /* It may be that the expressions have different destination
2252 registers, in which case we need to check liveness here. */
2253 if (EXPR_SEPARABLE_P (expr1))
2255 int regno1 = (REG_P (EXPR_LHS (expr1))
2256 ? (int) expr_dest_regno (expr1) : -1);
2257 int regno2 = (REG_P (EXPR_LHS (expr2))
2258 ? (int) expr_dest_regno (expr2) : -1);
2260 /* ??? We don't have a way to check restrictions for
2261 *other* register on the current path, we did it only
2262 for the current target register. Give up. */
2263 if (regno1 != regno2)
2264 EXPR_TARGET_AVAILABLE (expr2) = -1;
2266 else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2))
2267 EXPR_TARGET_AVAILABLE (expr2) = -1;
2269 merge_expr (expr2, expr1, insn);
2270 av_set_add_nocopy (&in_both_set, expr2);
2271 av_set_iter_remove (&i);
2273 else
2274 /* EXPR1 is present in TOP, but not in FROMP. Check it on
2275 FROM_LV_SET. */
2276 set_unavailable_target_for_expr (expr1, from_lv_set);
2278 to_tailp = i.lp;
2280 /* These expressions are not present in TOP. Check liveness
2281 restrictions on TO_LV_SET. */
2282 FOR_EACH_EXPR (expr1, i, *fromp)
2283 set_unavailable_target_for_expr (expr1, to_lv_set);
2285 join_distinct_sets (i.lp, &in_both_set);
2286 join_distinct_sets (to_tailp, fromp);
2289 /* Clear av_set pointed to by SETP. */
2290 void
2291 av_set_clear (av_set_t *setp)
2293 expr_t expr;
2294 av_set_iterator i;
2296 FOR_EACH_EXPR_1 (expr, i, setp)
2297 av_set_iter_remove (&i);
2299 gcc_assert (*setp == NULL);
2302 /* Leave only one non-speculative element in the SETP. */
2303 void
2304 av_set_leave_one_nonspec (av_set_t *setp)
2306 expr_t expr;
2307 av_set_iterator i;
2308 bool has_one_nonspec = false;
2310 /* Keep all speculative exprs, and leave one non-speculative
2311 (the first one). */
2312 FOR_EACH_EXPR_1 (expr, i, setp)
2314 if (!EXPR_SPEC_DONE_DS (expr))
2316 if (has_one_nonspec)
2317 av_set_iter_remove (&i);
2318 else
2319 has_one_nonspec = true;
2324 /* Return the N'th element of the SET. */
2325 expr_t
2326 av_set_element (av_set_t set, int n)
2328 expr_t expr;
2329 av_set_iterator i;
2331 FOR_EACH_EXPR (expr, i, set)
2332 if (n-- == 0)
2333 return expr;
2335 gcc_unreachable ();
2336 return NULL;
2339 /* Deletes all expressions from AVP that are conditional branches (IFs). */
2340 void
2341 av_set_substract_cond_branches (av_set_t *avp)
2343 av_set_iterator i;
2344 expr_t expr;
2346 FOR_EACH_EXPR_1 (expr, i, avp)
2347 if (vinsn_cond_branch_p (EXPR_VINSN (expr)))
2348 av_set_iter_remove (&i);
2351 /* Multiplies usefulness attribute of each member of av-set *AVP by
2352 value PROB / ALL_PROB. */
2353 void
2354 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
2356 av_set_iterator i;
2357 expr_t expr;
2359 FOR_EACH_EXPR (expr, i, av)
2360 EXPR_USEFULNESS (expr) = (all_prob
2361 ? (EXPR_USEFULNESS (expr) * prob) / all_prob
2362 : 0);
2365 /* Leave in AVP only those expressions, which are present in AV,
2366 and return it, merging history expressions. */
2367 void
2368 av_set_code_motion_filter (av_set_t *avp, av_set_t av)
2370 av_set_iterator i;
2371 expr_t expr, expr2;
2373 FOR_EACH_EXPR_1 (expr, i, avp)
2374 if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL)
2375 av_set_iter_remove (&i);
2376 else
2377 /* When updating av sets in bookkeeping blocks, we can add more insns
2378 there which will be transformed but the upper av sets will not
2379 reflect those transformations. We then fail to undo those
2380 when searching for such insns. So merge the history saved
2381 in the av set of the block we are processing. */
2382 merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2383 EXPR_HISTORY_OF_CHANGES (expr2));
2388 /* Dependence hooks to initialize insn data. */
2390 /* This is used in hooks callable from dependence analysis when initializing
2391 instruction's data. */
2392 static struct
2394 /* Where the dependence was found (lhs/rhs). */
2395 deps_where_t where;
2397 /* The actual data object to initialize. */
2398 idata_t id;
2400 /* True when the insn should not be made clonable. */
2401 bool force_unique_p;
2403 /* True when insn should be treated as of type USE, i.e. never renamed. */
2404 bool force_use_p;
2405 } deps_init_id_data;
2408 /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be
2409 clonable. */
2410 static void
2411 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
2413 int type;
2415 /* Determine whether INSN could be cloned and return appropriate vinsn type.
2416 That clonable insns which can be separated into lhs and rhs have type SET.
2417 Other clonable insns have type USE. */
2418 type = GET_CODE (insn);
2420 /* Only regular insns could be cloned. */
2421 if (type == INSN && !force_unique_p)
2422 type = SET;
2423 else if (type == JUMP_INSN && simplejump_p (insn))
2424 type = PC;
2425 else if (type == DEBUG_INSN)
2426 type = !force_unique_p ? USE : INSN;
2428 IDATA_TYPE (id) = type;
2429 IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
2430 IDATA_REG_USES (id) = get_clear_regset_from_pool ();
2431 IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool ();
2434 /* Start initializing insn data. */
2435 static void
2436 deps_init_id_start_insn (insn_t insn)
2438 gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE);
2440 setup_id_for_insn (deps_init_id_data.id, insn,
2441 deps_init_id_data.force_unique_p);
2442 deps_init_id_data.where = DEPS_IN_INSN;
2445 /* Start initializing lhs data. */
2446 static void
2447 deps_init_id_start_lhs (rtx lhs)
2449 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2450 gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL);
2452 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2454 IDATA_LHS (deps_init_id_data.id) = lhs;
2455 deps_init_id_data.where = DEPS_IN_LHS;
2459 /* Finish initializing lhs data. */
2460 static void
2461 deps_init_id_finish_lhs (void)
2463 deps_init_id_data.where = DEPS_IN_INSN;
2466 /* Note a set of REGNO. */
2467 static void
2468 deps_init_id_note_reg_set (int regno)
2470 haifa_note_reg_set (regno);
2472 if (deps_init_id_data.where == DEPS_IN_RHS)
2473 deps_init_id_data.force_use_p = true;
2475 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2476 SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
2478 #ifdef STACK_REGS
2479 /* Make instructions that set stack registers to be ineligible for
2480 renaming to avoid issues with find_used_regs. */
2481 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2482 deps_init_id_data.force_use_p = true;
2483 #endif
2486 /* Note a clobber of REGNO. */
2487 static void
2488 deps_init_id_note_reg_clobber (int regno)
2490 haifa_note_reg_clobber (regno);
2492 if (deps_init_id_data.where == DEPS_IN_RHS)
2493 deps_init_id_data.force_use_p = true;
2495 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2496 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno);
2499 /* Note a use of REGNO. */
2500 static void
2501 deps_init_id_note_reg_use (int regno)
2503 haifa_note_reg_use (regno);
2505 if (IDATA_TYPE (deps_init_id_data.id) != PC)
2506 SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno);
2509 /* Start initializing rhs data. */
2510 static void
2511 deps_init_id_start_rhs (rtx rhs)
2513 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2515 /* And there was no sel_deps_reset_to_insn (). */
2516 if (IDATA_LHS (deps_init_id_data.id) != NULL)
2518 IDATA_RHS (deps_init_id_data.id) = rhs;
2519 deps_init_id_data.where = DEPS_IN_RHS;
2523 /* Finish initializing rhs data. */
2524 static void
2525 deps_init_id_finish_rhs (void)
2527 gcc_assert (deps_init_id_data.where == DEPS_IN_RHS
2528 || deps_init_id_data.where == DEPS_IN_INSN);
2529 deps_init_id_data.where = DEPS_IN_INSN;
2532 /* Finish initializing insn data. */
2533 static void
2534 deps_init_id_finish_insn (void)
2536 gcc_assert (deps_init_id_data.where == DEPS_IN_INSN);
2538 if (IDATA_TYPE (deps_init_id_data.id) == SET)
2540 rtx lhs = IDATA_LHS (deps_init_id_data.id);
2541 rtx rhs = IDATA_RHS (deps_init_id_data.id);
2543 if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
2544 || deps_init_id_data.force_use_p)
2546 /* This should be a USE, as we don't want to schedule its RHS
2547 separately. However, we still want to have them recorded
2548 for the purposes of substitution. That's why we don't
2549 simply call downgrade_to_use () here. */
2550 gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
2551 gcc_assert (!lhs == !rhs);
2553 IDATA_TYPE (deps_init_id_data.id) = USE;
2557 deps_init_id_data.where = DEPS_IN_NOWHERE;
2560 /* This is dependence info used for initializing insn's data. */
2561 static struct sched_deps_info_def deps_init_id_sched_deps_info;
2563 /* This initializes most of the static part of the above structure. */
2564 static const struct sched_deps_info_def const_deps_init_id_sched_deps_info =
2566 NULL,
2568 deps_init_id_start_insn,
2569 deps_init_id_finish_insn,
2570 deps_init_id_start_lhs,
2571 deps_init_id_finish_lhs,
2572 deps_init_id_start_rhs,
2573 deps_init_id_finish_rhs,
2574 deps_init_id_note_reg_set,
2575 deps_init_id_note_reg_clobber,
2576 deps_init_id_note_reg_use,
2577 NULL, /* note_mem_dep */
2578 NULL, /* note_dep */
2580 0, /* use_cselib */
2581 0, /* use_deps_list */
2582 0 /* generate_spec_deps */
2585 /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true,
2586 we don't actually need information about lhs and rhs. */
2587 static void
2588 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
2590 rtx pat = PATTERN (insn);
2592 if (NONJUMP_INSN_P (insn)
2593 && GET_CODE (pat) == SET
2594 && !force_unique_p)
2596 IDATA_RHS (id) = SET_SRC (pat);
2597 IDATA_LHS (id) = SET_DEST (pat);
2599 else
2600 IDATA_LHS (id) = IDATA_RHS (id) = NULL;
2603 /* Possibly downgrade INSN to USE. */
2604 static void
2605 maybe_downgrade_id_to_use (idata_t id, insn_t insn)
2607 bool must_be_use = false;
2608 unsigned uid = INSN_UID (insn);
2609 df_ref *rec;
2610 rtx lhs = IDATA_LHS (id);
2611 rtx rhs = IDATA_RHS (id);
2613 /* We downgrade only SETs. */
2614 if (IDATA_TYPE (id) != SET)
2615 return;
2617 if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
2619 IDATA_TYPE (id) = USE;
2620 return;
2623 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2625 df_ref def = *rec;
2627 if (DF_REF_INSN (def)
2628 && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
2629 && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
2631 must_be_use = true;
2632 break;
2635 #ifdef STACK_REGS
2636 /* Make instructions that set stack registers to be ineligible for
2637 renaming to avoid issues with find_used_regs. */
2638 if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
2640 must_be_use = true;
2641 break;
2643 #endif
2646 if (must_be_use)
2647 IDATA_TYPE (id) = USE;
2650 /* Setup register sets describing INSN in ID. */
2651 static void
2652 setup_id_reg_sets (idata_t id, insn_t insn)
2654 unsigned uid = INSN_UID (insn);
2655 df_ref *rec;
2656 regset tmp = get_clear_regset_from_pool ();
2658 for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
2660 df_ref def = *rec;
2661 unsigned int regno = DF_REF_REGNO (def);
2663 /* Post modifies are treated like clobbers by sched-deps.c. */
2664 if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
2665 | DF_REF_PRE_POST_MODIFY)))
2666 SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
2667 else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
2669 SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
2671 #ifdef STACK_REGS
2672 /* For stack registers, treat writes to them as writes
2673 to the first one to be consistent with sched-deps.c. */
2674 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2675 SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
2676 #endif
2678 /* Mark special refs that generate read/write def pair. */
2679 if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
2680 || regno == STACK_POINTER_REGNUM)
2681 bitmap_set_bit (tmp, regno);
2684 for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
2686 df_ref use = *rec;
2687 unsigned int regno = DF_REF_REGNO (use);
2689 /* When these refs are met for the first time, skip them, as
2690 these uses are just counterparts of some defs. */
2691 if (bitmap_bit_p (tmp, regno))
2692 bitmap_clear_bit (tmp, regno);
2693 else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
2695 SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
2697 #ifdef STACK_REGS
2698 /* For stack registers, treat reads from them as reads from
2699 the first one to be consistent with sched-deps.c. */
2700 if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
2701 SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
2702 #endif
2706 return_regset_to_pool (tmp);
2709 /* Initialize instruction data for INSN in ID using DF's data. */
2710 static void
2711 init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
2713 gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL);
2715 setup_id_for_insn (id, insn, force_unique_p);
2716 setup_id_lhs_rhs (id, insn, force_unique_p);
2718 if (INSN_NOP_P (insn))
2719 return;
2721 maybe_downgrade_id_to_use (id, insn);
2722 setup_id_reg_sets (id, insn);
2725 /* Initialize instruction data for INSN in ID. */
2726 static void
2727 deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
2729 struct deps_desc _dc, *dc = &_dc;
2731 deps_init_id_data.where = DEPS_IN_NOWHERE;
2732 deps_init_id_data.id = id;
2733 deps_init_id_data.force_unique_p = force_unique_p;
2734 deps_init_id_data.force_use_p = false;
2736 init_deps (dc, false);
2738 memcpy (&deps_init_id_sched_deps_info,
2739 &const_deps_init_id_sched_deps_info,
2740 sizeof (deps_init_id_sched_deps_info));
2742 if (spec_info != NULL)
2743 deps_init_id_sched_deps_info.generate_spec_deps = 1;
2745 sched_deps_info = &deps_init_id_sched_deps_info;
2747 deps_analyze_insn (dc, insn);
2749 free_deps (dc);
2751 deps_init_id_data.id = NULL;
2755 struct sched_scan_info_def
2757 /* This hook notifies scheduler frontend to extend its internal per basic
2758 block data structures. This hook should be called once before a series of
2759 calls to bb_init (). */
2760 void (*extend_bb) (void);
2762 /* This hook makes scheduler frontend to initialize its internal data
2763 structures for the passed basic block. */
2764 void (*init_bb) (basic_block);
2766 /* This hook notifies scheduler frontend to extend its internal per insn data
2767 structures. This hook should be called once before a series of calls to
2768 insn_init (). */
2769 void (*extend_insn) (void);
2771 /* This hook makes scheduler frontend to initialize its internal data
2772 structures for the passed insn. */
2773 void (*init_insn) (rtx);
2776 /* A driver function to add a set of basic blocks (BBS) to the
2777 scheduling region. */
2778 static void
2779 sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs)
2781 unsigned i;
2782 basic_block bb;
2784 if (ssi->extend_bb)
2785 ssi->extend_bb ();
2787 if (ssi->init_bb)
2788 FOR_EACH_VEC_ELT (bbs, i, bb)
2789 ssi->init_bb (bb);
2791 if (ssi->extend_insn)
2792 ssi->extend_insn ();
2794 if (ssi->init_insn)
2795 FOR_EACH_VEC_ELT (bbs, i, bb)
2797 rtx insn;
2799 FOR_BB_INSNS (bb, insn)
2800 ssi->init_insn (insn);
2804 /* Implement hooks for collecting fundamental insn properties like if insn is
2805 an ASM or is within a SCHED_GROUP. */
2807 /* True when a "one-time init" data for INSN was already inited. */
2808 static bool
2809 first_time_insn_init (insn_t insn)
2811 return INSN_LIVE (insn) == NULL;
2814 /* Hash an entry in a transformed_insns hashtable. */
2815 static hashval_t
2816 hash_transformed_insns (const void *p)
2818 return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old);
2821 /* Compare the entries in a transformed_insns hashtable. */
2822 static int
2823 eq_transformed_insns (const void *p, const void *q)
2825 rtx i1 = VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old);
2826 rtx i2 = VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old);
2828 if (INSN_UID (i1) == INSN_UID (i2))
2829 return 1;
2830 return rtx_equal_p (PATTERN (i1), PATTERN (i2));
2833 /* Free an entry in a transformed_insns hashtable. */
2834 static void
2835 free_transformed_insns (void *p)
2837 struct transformed_insns *pti = (struct transformed_insns *) p;
2839 vinsn_detach (pti->vinsn_old);
2840 vinsn_detach (pti->vinsn_new);
2841 free (pti);
2844 /* Init the s_i_d data for INSN which should be inited just once, when
2845 we first see the insn. */
2846 static void
2847 init_first_time_insn_data (insn_t insn)
2849 /* This should not be set if this is the first time we init data for
2850 insn. */
2851 gcc_assert (first_time_insn_init (insn));
2853 /* These are needed for nops too. */
2854 INSN_LIVE (insn) = get_regset_from_pool ();
2855 INSN_LIVE_VALID_P (insn) = false;
2857 if (!INSN_NOP_P (insn))
2859 INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
2860 INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
2861 INSN_TRANSFORMED_INSNS (insn)
2862 = htab_create (16, hash_transformed_insns,
2863 eq_transformed_insns, free_transformed_insns);
2864 init_deps (&INSN_DEPS_CONTEXT (insn), true);
2868 /* Free almost all above data for INSN that is scheduled already.
2869 Used for extra-large basic blocks. */
2870 void
2871 free_data_for_scheduled_insn (insn_t insn)
2873 gcc_assert (! first_time_insn_init (insn));
2875 if (! INSN_ANALYZED_DEPS (insn))
2876 return;
2878 BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
2879 BITMAP_FREE (INSN_FOUND_DEPS (insn));
2880 htab_delete (INSN_TRANSFORMED_INSNS (insn));
2882 /* This is allocated only for bookkeeping insns. */
2883 if (INSN_ORIGINATORS (insn))
2884 BITMAP_FREE (INSN_ORIGINATORS (insn));
2885 free_deps (&INSN_DEPS_CONTEXT (insn));
2887 INSN_ANALYZED_DEPS (insn) = NULL;
2889 /* Clear the readonly flag so we would ICE when trying to recalculate
2890 the deps context (as we believe that it should not happen). */
2891 (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
2894 /* Free the same data as above for INSN. */
2895 static void
2896 free_first_time_insn_data (insn_t insn)
2898 gcc_assert (! first_time_insn_init (insn));
2900 free_data_for_scheduled_insn (insn);
2901 return_regset_to_pool (INSN_LIVE (insn));
2902 INSN_LIVE (insn) = NULL;
2903 INSN_LIVE_VALID_P (insn) = false;
2906 /* Initialize region-scope data structures for basic blocks. */
2907 static void
2908 init_global_and_expr_for_bb (basic_block bb)
2910 if (sel_bb_empty_p (bb))
2911 return;
2913 invalidate_av_set (bb);
2916 /* Data for global dependency analysis (to initialize CANT_MOVE and
2917 SCHED_GROUP_P). */
2918 static struct
2920 /* Previous insn. */
2921 insn_t prev_insn;
2922 } init_global_data;
2924 /* Determine if INSN is in the sched_group, is an asm or should not be
2925 cloned. After that initialize its expr. */
2926 static void
2927 init_global_and_expr_for_insn (insn_t insn)
2929 if (LABEL_P (insn))
2930 return;
2932 if (NOTE_INSN_BASIC_BLOCK_P (insn))
2934 init_global_data.prev_insn = NULL_RTX;
2935 return;
2938 gcc_assert (INSN_P (insn));
2940 if (SCHED_GROUP_P (insn))
2941 /* Setup a sched_group. */
2943 insn_t prev_insn = init_global_data.prev_insn;
2945 if (prev_insn)
2946 INSN_SCHED_NEXT (prev_insn) = insn;
2948 init_global_data.prev_insn = insn;
2950 else
2951 init_global_data.prev_insn = NULL_RTX;
2953 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
2954 || asm_noperands (PATTERN (insn)) >= 0)
2955 /* Mark INSN as an asm. */
2956 INSN_ASM_P (insn) = true;
2959 bool force_unique_p;
2960 ds_t spec_done_ds;
2962 /* Certain instructions cannot be cloned, and frame related insns and
2963 the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of
2964 their block. */
2965 if (prologue_epilogue_contains (insn))
2967 if (RTX_FRAME_RELATED_P (insn))
2968 CANT_MOVE (insn) = 1;
2969 else
2971 rtx note;
2972 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2973 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE
2974 && ((enum insn_note) INTVAL (XEXP (note, 0))
2975 == NOTE_INSN_EPILOGUE_BEG))
2977 CANT_MOVE (insn) = 1;
2978 break;
2981 force_unique_p = true;
2983 else
2984 if (CANT_MOVE (insn)
2985 || INSN_ASM_P (insn)
2986 || SCHED_GROUP_P (insn)
2987 || CALL_P (insn)
2988 /* Exception handling insns are always unique. */
2989 || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
2990 /* TRAP_IF though have an INSN code is control_flow_insn_p (). */
2991 || control_flow_insn_p (insn)
2992 || volatile_insn_p (PATTERN (insn))
2993 || (targetm.cannot_copy_insn_p
2994 && targetm.cannot_copy_insn_p (insn)))
2995 force_unique_p = true;
2996 else
2997 force_unique_p = false;
2999 if (targetm.sched.get_insn_spec_ds)
3001 spec_done_ds = targetm.sched.get_insn_spec_ds (insn);
3002 spec_done_ds = ds_get_max_dep_weak (spec_done_ds);
3004 else
3005 spec_done_ds = 0;
3007 /* Initialize INSN's expr. */
3008 init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
3009 REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
3010 spec_done_ds, 0, 0, vNULL, true,
3011 false, false, false, CANT_MOVE (insn));
3014 init_first_time_insn_data (insn);
3017 /* Scan the region and initialize instruction data for basic blocks BBS. */
3018 void
3019 sel_init_global_and_expr (bb_vec_t bbs)
3021 /* ??? It would be nice to implement push / pop scheme for sched_infos. */
3022 const struct sched_scan_info_def ssi =
3024 NULL, /* extend_bb */
3025 init_global_and_expr_for_bb, /* init_bb */
3026 extend_insn_data, /* extend_insn */
3027 init_global_and_expr_for_insn /* init_insn */
3030 sched_scan (&ssi, bbs);
3033 /* Finalize region-scope data structures for basic blocks. */
3034 static void
3035 finish_global_and_expr_for_bb (basic_block bb)
3037 av_set_clear (&BB_AV_SET (bb));
3038 BB_AV_LEVEL (bb) = 0;
3041 /* Finalize INSN's data. */
3042 static void
3043 finish_global_and_expr_insn (insn_t insn)
3045 if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
3046 return;
3048 gcc_assert (INSN_P (insn));
3050 if (INSN_LUID (insn) > 0)
3052 free_first_time_insn_data (insn);
3053 INSN_WS_LEVEL (insn) = 0;
3054 CANT_MOVE (insn) = 0;
3056 /* We can no longer assert this, as vinsns of this insn could be
3057 easily live in other insn's caches. This should be changed to
3058 a counter-like approach among all vinsns. */
3059 gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
3060 clear_expr (INSN_EXPR (insn));
3064 /* Finalize per instruction data for the whole region. */
3065 void
3066 sel_finish_global_and_expr (void)
3069 bb_vec_t bbs;
3070 int i;
3072 bbs.create (current_nr_blocks);
3074 for (i = 0; i < current_nr_blocks; i++)
3075 bbs.quick_push (BASIC_BLOCK (BB_TO_BLOCK (i)));
3077 /* Clear AV_SETs and INSN_EXPRs. */
3079 const struct sched_scan_info_def ssi =
3081 NULL, /* extend_bb */
3082 finish_global_and_expr_for_bb, /* init_bb */
3083 NULL, /* extend_insn */
3084 finish_global_and_expr_insn /* init_insn */
3087 sched_scan (&ssi, bbs);
3090 bbs.release ();
3093 finish_insns ();
3097 /* In the below hooks, we merely calculate whether or not a dependence
3098 exists, and in what part of insn. However, we will need more data
3099 when we'll start caching dependence requests. */
3101 /* Container to hold information for dependency analysis. */
3102 static struct
3104 deps_t dc;
3106 /* A variable to track which part of rtx we are scanning in
3107 sched-deps.c: sched_analyze_insn (). */
3108 deps_where_t where;
3110 /* Current producer. */
3111 insn_t pro;
3113 /* Current consumer. */
3114 vinsn_t con;
3116 /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence.
3117 X is from { INSN, LHS, RHS }. */
3118 ds_t has_dep_p[DEPS_IN_NOWHERE];
3119 } has_dependence_data;
3121 /* Start analyzing dependencies of INSN. */
3122 static void
3123 has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED)
3125 gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE);
3127 has_dependence_data.where = DEPS_IN_INSN;
3130 /* Finish analyzing dependencies of an insn. */
3131 static void
3132 has_dependence_finish_insn (void)
3134 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3136 has_dependence_data.where = DEPS_IN_NOWHERE;
3139 /* Start analyzing dependencies of LHS. */
3140 static void
3141 has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED)
3143 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3145 if (VINSN_LHS (has_dependence_data.con) != NULL)
3146 has_dependence_data.where = DEPS_IN_LHS;
3149 /* Finish analyzing dependencies of an lhs. */
3150 static void
3151 has_dependence_finish_lhs (void)
3153 has_dependence_data.where = DEPS_IN_INSN;
3156 /* Start analyzing dependencies of RHS. */
3157 static void
3158 has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED)
3160 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3162 if (VINSN_RHS (has_dependence_data.con) != NULL)
3163 has_dependence_data.where = DEPS_IN_RHS;
3166 /* Start analyzing dependencies of an rhs. */
3167 static void
3168 has_dependence_finish_rhs (void)
3170 gcc_assert (has_dependence_data.where == DEPS_IN_RHS
3171 || has_dependence_data.where == DEPS_IN_INSN);
3173 has_dependence_data.where = DEPS_IN_INSN;
3176 /* Note a set of REGNO. */
3177 static void
3178 has_dependence_note_reg_set (int regno)
3180 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3182 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3183 VINSN_INSN_RTX
3184 (has_dependence_data.con)))
3186 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3188 if (reg_last->sets != NULL
3189 || reg_last->clobbers != NULL)
3190 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3192 if (reg_last->uses || reg_last->implicit_sets)
3193 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3197 /* Note a clobber of REGNO. */
3198 static void
3199 has_dependence_note_reg_clobber (int regno)
3201 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3203 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3204 VINSN_INSN_RTX
3205 (has_dependence_data.con)))
3207 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3209 if (reg_last->sets)
3210 *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
3212 if (reg_last->uses || reg_last->implicit_sets)
3213 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3217 /* Note a use of REGNO. */
3218 static void
3219 has_dependence_note_reg_use (int regno)
3221 struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno];
3223 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3224 VINSN_INSN_RTX
3225 (has_dependence_data.con)))
3227 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3229 if (reg_last->sets)
3230 *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE;
3232 if (reg_last->clobbers || reg_last->implicit_sets)
3233 *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
3235 /* Merge BE_IN_SPEC bits into *DSP when the dependency producer
3236 is actually a check insn. We need to do this for any register
3237 read-read dependency with the check unless we track properly
3238 all registers written by BE_IN_SPEC-speculated insns, as
3239 we don't have explicit dependence lists. See PR 53975. */
3240 if (reg_last->uses)
3242 ds_t pro_spec_checked_ds;
3244 pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro);
3245 pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds);
3247 if (pro_spec_checked_ds != 0)
3248 *dsp = ds_full_merge (*dsp, pro_spec_checked_ds,
3249 NULL_RTX, NULL_RTX);
3254 /* Note a memory dependence. */
3255 static void
3256 has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED,
3257 rtx pending_mem ATTRIBUTE_UNUSED,
3258 insn_t pending_insn ATTRIBUTE_UNUSED,
3259 ds_t ds ATTRIBUTE_UNUSED)
3261 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3262 VINSN_INSN_RTX (has_dependence_data.con)))
3264 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3266 *dsp = ds_full_merge (ds, *dsp, pending_mem, mem);
3270 /* Note a dependence. */
3271 static void
3272 has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED,
3273 ds_t ds ATTRIBUTE_UNUSED)
3275 if (!sched_insns_conditions_mutex_p (has_dependence_data.pro,
3276 VINSN_INSN_RTX (has_dependence_data.con)))
3278 ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where];
3280 *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX);
3284 /* Mark the insn as having a hard dependence that prevents speculation. */
3285 void
3286 sel_mark_hard_insn (rtx insn)
3288 int i;
3290 /* Only work when we're in has_dependence_p mode.
3291 ??? This is a hack, this should actually be a hook. */
3292 if (!has_dependence_data.dc || !has_dependence_data.pro)
3293 return;
3295 gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con));
3296 gcc_assert (has_dependence_data.where == DEPS_IN_INSN);
3298 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3299 has_dependence_data.has_dep_p[i] &= ~SPECULATIVE;
3302 /* This structure holds the hooks for the dependency analysis used when
3303 actually processing dependencies in the scheduler. */
3304 static struct sched_deps_info_def has_dependence_sched_deps_info;
3306 /* This initializes most of the fields of the above structure. */
3307 static const struct sched_deps_info_def const_has_dependence_sched_deps_info =
3309 NULL,
3311 has_dependence_start_insn,
3312 has_dependence_finish_insn,
3313 has_dependence_start_lhs,
3314 has_dependence_finish_lhs,
3315 has_dependence_start_rhs,
3316 has_dependence_finish_rhs,
3317 has_dependence_note_reg_set,
3318 has_dependence_note_reg_clobber,
3319 has_dependence_note_reg_use,
3320 has_dependence_note_mem_dep,
3321 has_dependence_note_dep,
3323 0, /* use_cselib */
3324 0, /* use_deps_list */
3325 0 /* generate_spec_deps */
3328 /* Initialize has_dependence_sched_deps_info with extra spec field. */
3329 static void
3330 setup_has_dependence_sched_deps_info (void)
3332 memcpy (&has_dependence_sched_deps_info,
3333 &const_has_dependence_sched_deps_info,
3334 sizeof (has_dependence_sched_deps_info));
3336 if (spec_info != NULL)
3337 has_dependence_sched_deps_info.generate_spec_deps = 1;
3339 sched_deps_info = &has_dependence_sched_deps_info;
3342 /* Remove all dependences found and recorded in has_dependence_data array. */
3343 void
3344 sel_clear_has_dependence (void)
3346 int i;
3348 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3349 has_dependence_data.has_dep_p[i] = 0;
3352 /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer
3353 to the dependence information array in HAS_DEP_PP. */
3354 ds_t
3355 has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
3357 int i;
3358 ds_t ds;
3359 struct deps_desc *dc;
3361 if (INSN_SIMPLEJUMP_P (pred))
3362 /* Unconditional jump is just a transfer of control flow.
3363 Ignore it. */
3364 return false;
3366 dc = &INSN_DEPS_CONTEXT (pred);
3368 /* We init this field lazily. */
3369 if (dc->reg_last == NULL)
3370 init_deps_reg_last (dc);
3372 if (!dc->readonly)
3374 has_dependence_data.pro = NULL;
3375 /* Initialize empty dep context with information about PRED. */
3376 advance_deps_context (dc, pred);
3377 dc->readonly = 1;
3380 has_dependence_data.where = DEPS_IN_NOWHERE;
3381 has_dependence_data.pro = pred;
3382 has_dependence_data.con = EXPR_VINSN (expr);
3383 has_dependence_data.dc = dc;
3385 sel_clear_has_dependence ();
3387 /* Now catch all dependencies that would be generated between PRED and
3388 INSN. */
3389 setup_has_dependence_sched_deps_info ();
3390 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3391 has_dependence_data.dc = NULL;
3393 /* When a barrier was found, set DEPS_IN_INSN bits. */
3394 if (dc->last_reg_pending_barrier == TRUE_BARRIER)
3395 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
3396 else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
3397 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3399 /* Do not allow stores to memory to move through checks. Currently
3400 we don't move this to sched-deps.c as the check doesn't have
3401 obvious places to which this dependence can be attached.
3402 FIMXE: this should go to a hook. */
3403 if (EXPR_LHS (expr)
3404 && MEM_P (EXPR_LHS (expr))
3405 && sel_insn_is_speculation_check (pred))
3406 has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
3408 *has_dep_pp = has_dependence_data.has_dep_p;
3409 ds = 0;
3410 for (i = 0; i < DEPS_IN_NOWHERE; i++)
3411 ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i],
3412 NULL_RTX, NULL_RTX);
3414 return ds;
3418 /* Dependence hooks implementation that checks dependence latency constraints
3419 on the insns being scheduled. The entry point for these routines is
3420 tick_check_p predicate. */
3422 static struct
3424 /* An expr we are currently checking. */
3425 expr_t expr;
3427 /* A minimal cycle for its scheduling. */
3428 int cycle;
3430 /* Whether we have seen a true dependence while checking. */
3431 bool seen_true_dep_p;
3432 } tick_check_data;
3434 /* Update minimal scheduling cycle for tick_check_insn given that it depends
3435 on PRO with status DS and weight DW. */
3436 static void
3437 tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw)
3439 expr_t con_expr = tick_check_data.expr;
3440 insn_t con_insn = EXPR_INSN_RTX (con_expr);
3442 if (con_insn != pro_insn)
3444 enum reg_note dt;
3445 int tick;
3447 if (/* PROducer was removed from above due to pipelining. */
3448 !INSN_IN_STREAM_P (pro_insn)
3449 /* Or PROducer was originally on the next iteration regarding the
3450 CONsumer. */
3451 || (INSN_SCHED_TIMES (pro_insn)
3452 - EXPR_SCHED_TIMES (con_expr)) > 1)
3453 /* Don't count this dependence. */
3454 return;
3456 dt = ds_to_dt (ds);
3457 if (dt == REG_DEP_TRUE)
3458 tick_check_data.seen_true_dep_p = true;
3460 gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0);
3463 dep_def _dep, *dep = &_dep;
3465 init_dep (dep, pro_insn, con_insn, dt);
3467 tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw);
3470 /* When there are several kinds of dependencies between pro and con,
3471 only REG_DEP_TRUE should be taken into account. */
3472 if (tick > tick_check_data.cycle
3473 && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p))
3474 tick_check_data.cycle = tick;
3478 /* An implementation of note_dep hook. */
3479 static void
3480 tick_check_note_dep (insn_t pro, ds_t ds)
3482 tick_check_dep_with_dw (pro, ds, 0);
3485 /* An implementation of note_mem_dep hook. */
3486 static void
3487 tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds)
3489 dw_t dw;
3491 dw = (ds_to_dt (ds) == REG_DEP_TRUE
3492 ? estimate_dep_weak (mem1, mem2)
3493 : 0);
3495 tick_check_dep_with_dw (pro, ds, dw);
3498 /* This structure contains hooks for dependence analysis used when determining
3499 whether an insn is ready for scheduling. */
3500 static struct sched_deps_info_def tick_check_sched_deps_info =
3502 NULL,
3504 NULL,
3505 NULL,
3506 NULL,
3507 NULL,
3508 NULL,
3509 NULL,
3510 haifa_note_reg_set,
3511 haifa_note_reg_clobber,
3512 haifa_note_reg_use,
3513 tick_check_note_mem_dep,
3514 tick_check_note_dep,
3516 0, 0, 0
3519 /* Estimate number of cycles from the current cycle of FENCE until EXPR can be
3520 scheduled. Return 0 if all data from producers in DC is ready. */
3522 tick_check_p (expr_t expr, deps_t dc, fence_t fence)
3524 int cycles_left;
3525 /* Initialize variables. */
3526 tick_check_data.expr = expr;
3527 tick_check_data.cycle = 0;
3528 tick_check_data.seen_true_dep_p = false;
3529 sched_deps_info = &tick_check_sched_deps_info;
3531 gcc_assert (!dc->readonly);
3532 dc->readonly = 1;
3533 deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
3534 dc->readonly = 0;
3536 cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence);
3538 return cycles_left >= 0 ? cycles_left : 0;
3542 /* Functions to work with insns. */
3544 /* Returns true if LHS of INSN is the same as DEST of an insn
3545 being moved. */
3546 bool
3547 lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest)
3549 rtx lhs = INSN_LHS (insn);
3551 if (lhs == NULL || dest == NULL)
3552 return false;
3554 return rtx_equal_p (lhs, dest);
3557 /* Return s_i_d entry of INSN. Callable from debugger. */
3558 sel_insn_data_def
3559 insn_sid (insn_t insn)
3561 return *SID (insn);
3564 /* True when INSN is a speculative check. We can tell this by looking
3565 at the data structures of the selective scheduler, not by examining
3566 the pattern. */
3567 bool
3568 sel_insn_is_speculation_check (rtx insn)
3570 return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn);
3573 /* Extracts machine mode MODE and destination location DST_LOC
3574 for given INSN. */
3575 void
3576 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
3578 rtx pat = PATTERN (insn);
3580 gcc_assert (dst_loc);
3581 gcc_assert (GET_CODE (pat) == SET);
3583 *dst_loc = SET_DEST (pat);
3585 gcc_assert (*dst_loc);
3586 gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc));
3588 if (mode)
3589 *mode = GET_MODE (*dst_loc);
3592 /* Returns true when moving through JUMP will result in bookkeeping
3593 creation. */
3594 bool
3595 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
3597 insn_t succ;
3598 succ_iterator si;
3600 FOR_EACH_SUCC (succ, si, jump)
3601 if (sel_num_cfg_preds_gt_1 (succ))
3602 return true;
3604 return false;
3607 /* Return 'true' if INSN is the only one in its basic block. */
3608 static bool
3609 insn_is_the_only_one_in_bb_p (insn_t insn)
3611 return sel_bb_head_p (insn) && sel_bb_end_p (insn);
3614 #ifdef ENABLE_CHECKING
3615 /* Check that the region we're scheduling still has at most one
3616 backedge. */
3617 static void
3618 verify_backedges (void)
3620 if (pipelining_p)
3622 int i, n = 0;
3623 edge e;
3624 edge_iterator ei;
3626 for (i = 0; i < current_nr_blocks; i++)
3627 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
3628 if (in_current_region_p (e->dest)
3629 && BLOCK_TO_BB (e->dest->index) < i)
3630 n++;
3632 gcc_assert (n <= 1);
3635 #endif
3638 /* Functions to work with control flow. */
3640 /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
3641 are sorted in topological order (it might have been invalidated by
3642 redirecting an edge). */
3643 static void
3644 sel_recompute_toporder (void)
3646 int i, n, rgn;
3647 int *postorder, n_blocks;
3649 postorder = XALLOCAVEC (int, n_basic_blocks);
3650 n_blocks = post_order_compute (postorder, false, false);
3652 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
3653 for (n = 0, i = n_blocks - 1; i >= 0; i--)
3654 if (CONTAINING_RGN (postorder[i]) == rgn)
3656 BLOCK_TO_BB (postorder[i]) = n;
3657 BB_TO_BLOCK (n) = postorder[i];
3658 n++;
3661 /* Assert that we updated info for all blocks. We may miss some blocks if
3662 this function is called when redirecting an edge made a block
3663 unreachable, but that block is not deleted yet. */
3664 gcc_assert (n == RGN_NR_BLOCKS (rgn));
3667 /* Tidy the possibly empty block BB. */
3668 static bool
3669 maybe_tidy_empty_bb (basic_block bb)
3671 basic_block succ_bb, pred_bb, note_bb;
3672 vec<basic_block> dom_bbs;
3673 edge e;
3674 edge_iterator ei;
3675 bool rescan_p;
3677 /* Keep empty bb only if this block immediately precedes EXIT and
3678 has incoming non-fallthrough edge, or it has no predecessors or
3679 successors. Otherwise remove it. */
3680 if (!sel_bb_empty_p (bb)
3681 || (single_succ_p (bb)
3682 && single_succ (bb) == EXIT_BLOCK_PTR
3683 && (!single_pred_p (bb)
3684 || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU)))
3685 || EDGE_COUNT (bb->preds) == 0
3686 || EDGE_COUNT (bb->succs) == 0)
3687 return false;
3689 /* Do not attempt to redirect complex edges. */
3690 FOR_EACH_EDGE (e, ei, bb->preds)
3691 if (e->flags & EDGE_COMPLEX)
3692 return false;
3693 else if (e->flags & EDGE_FALLTHRU)
3695 rtx note;
3696 /* If prev bb ends with asm goto, see if any of the
3697 ASM_OPERANDS_LABELs don't point to the fallthru
3698 label. Do not attempt to redirect it in that case. */
3699 if (JUMP_P (BB_END (e->src))
3700 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
3702 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
3704 for (i = 0; i < n; ++i)
3705 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb))
3706 return false;
3710 free_data_sets (bb);
3712 /* Do not delete BB if it has more than one successor.
3713 That can occur when we moving a jump. */
3714 if (!single_succ_p (bb))
3716 gcc_assert (can_merge_blocks_p (bb->prev_bb, bb));
3717 sel_merge_blocks (bb->prev_bb, bb);
3718 return true;
3721 succ_bb = single_succ (bb);
3722 rescan_p = true;
3723 pred_bb = NULL;
3724 dom_bbs.create (0);
3726 /* Save a pred/succ from the current region to attach the notes to. */
3727 note_bb = NULL;
3728 FOR_EACH_EDGE (e, ei, bb->preds)
3729 if (in_current_region_p (e->src))
3731 note_bb = e->src;
3732 break;
3734 if (note_bb == NULL)
3735 note_bb = succ_bb;
3737 /* Redirect all non-fallthru edges to the next bb. */
3738 while (rescan_p)
3740 rescan_p = false;
3742 FOR_EACH_EDGE (e, ei, bb->preds)
3744 pred_bb = e->src;
3746 if (!(e->flags & EDGE_FALLTHRU))
3748 /* We can not invalidate computed topological order by moving
3749 the edge destination block (E->SUCC) along a fallthru edge.
3751 We will update dominators here only when we'll get
3752 an unreachable block when redirecting, otherwise
3753 sel_redirect_edge_and_branch will take care of it. */
3754 if (e->dest != bb
3755 && single_pred_p (e->dest))
3756 dom_bbs.safe_push (e->dest);
3757 sel_redirect_edge_and_branch (e, succ_bb);
3758 rescan_p = true;
3759 break;
3761 /* If the edge is fallthru, but PRED_BB ends in a conditional jump
3762 to BB (so there is no non-fallthru edge from PRED_BB to BB), we
3763 still have to adjust it. */
3764 else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb)))
3766 /* If possible, try to remove the unneeded conditional jump. */
3767 if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0
3768 && !IN_CURRENT_FENCE_P (BB_END (pred_bb)))
3770 if (!sel_remove_insn (BB_END (pred_bb), false, false))
3771 tidy_fallthru_edge (e);
3773 else
3774 sel_redirect_edge_and_branch (e, succ_bb);
3775 rescan_p = true;
3776 break;
3781 if (can_merge_blocks_p (bb->prev_bb, bb))
3782 sel_merge_blocks (bb->prev_bb, bb);
3783 else
3785 /* This is a block without fallthru predecessor. Just delete it. */
3786 gcc_assert (note_bb);
3787 move_bb_info (note_bb, bb);
3788 remove_empty_bb (bb, true);
3791 if (!dom_bbs.is_empty ())
3793 dom_bbs.safe_push (succ_bb);
3794 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
3795 dom_bbs.release ();
3798 return true;
3801 /* Tidy the control flow after we have removed original insn from
3802 XBB. Return true if we have removed some blocks. When FULL_TIDYING
3803 is true, also try to optimize control flow on non-empty blocks. */
3804 bool
3805 tidy_control_flow (basic_block xbb, bool full_tidying)
3807 bool changed = true;
3808 insn_t first, last;
3810 /* First check whether XBB is empty. */
3811 changed = maybe_tidy_empty_bb (xbb);
3812 if (changed || !full_tidying)
3813 return changed;
3815 /* Check if there is a unnecessary jump after insn left. */
3816 if (bb_has_removable_jump_to_p (xbb, xbb->next_bb)
3817 && INSN_SCHED_TIMES (BB_END (xbb)) == 0
3818 && !IN_CURRENT_FENCE_P (BB_END (xbb)))
3820 if (sel_remove_insn (BB_END (xbb), false, false))
3821 return true;
3822 tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
3825 first = sel_bb_head (xbb);
3826 last = sel_bb_end (xbb);
3827 if (MAY_HAVE_DEBUG_INSNS)
3829 if (first != last && DEBUG_INSN_P (first))
3831 first = NEXT_INSN (first);
3832 while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
3834 if (first != last && DEBUG_INSN_P (last))
3836 last = PREV_INSN (last);
3837 while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
3839 /* Check if there is an unnecessary jump in previous basic block leading
3840 to next basic block left after removing INSN from stream.
3841 If it is so, remove that jump and redirect edge to current
3842 basic block (where there was INSN before deletion). This way
3843 when NOP will be deleted several instructions later with its
3844 basic block we will not get a jump to next instruction, which
3845 can be harmful. */
3846 if (first == last
3847 && !sel_bb_empty_p (xbb)
3848 && INSN_NOP_P (last)
3849 /* Flow goes fallthru from current block to the next. */
3850 && EDGE_COUNT (xbb->succs) == 1
3851 && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
3852 /* When successor is an EXIT block, it may not be the next block. */
3853 && single_succ (xbb) != EXIT_BLOCK_PTR
3854 /* And unconditional jump in previous basic block leads to
3855 next basic block of XBB and this jump can be safely removed. */
3856 && in_current_region_p (xbb->prev_bb)
3857 && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb)
3858 && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0
3859 /* Also this jump is not at the scheduling boundary. */
3860 && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
3862 bool recompute_toporder_p;
3863 /* Clear data structures of jump - jump itself will be removed
3864 by sel_redirect_edge_and_branch. */
3865 clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
3866 recompute_toporder_p
3867 = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
3869 gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
3871 /* It can turn out that after removing unused jump, basic block
3872 that contained that jump, becomes empty too. In such case
3873 remove it too. */
3874 if (sel_bb_empty_p (xbb->prev_bb))
3875 changed = maybe_tidy_empty_bb (xbb->prev_bb);
3876 if (recompute_toporder_p)
3877 sel_recompute_toporder ();
3880 #ifdef ENABLE_CHECKING
3881 verify_backedges ();
3882 verify_dominators (CDI_DOMINATORS);
3883 #endif
3885 return changed;
3888 /* Purge meaningless empty blocks in the middle of a region. */
3889 void
3890 purge_empty_blocks (void)
3892 int i;
3894 /* Do not attempt to delete the first basic block in the region. */
3895 for (i = 1; i < current_nr_blocks; )
3897 basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
3899 if (maybe_tidy_empty_bb (b))
3900 continue;
3902 i++;
3906 /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true,
3907 do not delete insn's data, because it will be later re-emitted.
3908 Return true if we have removed some blocks afterwards. */
3909 bool
3910 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
3912 basic_block bb = BLOCK_FOR_INSN (insn);
3914 gcc_assert (INSN_IN_STREAM_P (insn));
3916 if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
3918 expr_t expr;
3919 av_set_iterator i;
3921 /* When we remove a debug insn that is head of a BB, it remains
3922 in the AV_SET of the block, but it shouldn't. */
3923 FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
3924 if (EXPR_INSN_RTX (expr) == insn)
3926 av_set_iter_remove (&i);
3927 break;
3931 if (only_disconnect)
3933 insn_t prev = PREV_INSN (insn);
3934 insn_t next = NEXT_INSN (insn);
3935 basic_block bb = BLOCK_FOR_INSN (insn);
3937 NEXT_INSN (prev) = next;
3938 PREV_INSN (next) = prev;
3940 if (BB_HEAD (bb) == insn)
3942 gcc_assert (BLOCK_FOR_INSN (prev) == bb);
3943 BB_HEAD (bb) = prev;
3945 if (BB_END (bb) == insn)
3946 BB_END (bb) = prev;
3948 else
3950 remove_insn (insn);
3951 clear_expr (INSN_EXPR (insn));
3954 /* It is necessary to null this fields before calling add_insn (). */
3955 PREV_INSN (insn) = NULL_RTX;
3956 NEXT_INSN (insn) = NULL_RTX;
3958 return tidy_control_flow (bb, full_tidying);
3961 /* Estimate number of the insns in BB. */
3962 static int
3963 sel_estimate_number_of_insns (basic_block bb)
3965 int res = 0;
3966 insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
3968 for (; insn != next_tail; insn = NEXT_INSN (insn))
3969 if (NONDEBUG_INSN_P (insn))
3970 res++;
3972 return res;
3975 /* We don't need separate luids for notes or labels. */
3976 static int
3977 sel_luid_for_non_insn (rtx x)
3979 gcc_assert (NOTE_P (x) || LABEL_P (x));
3981 return -1;
3984 /* Find the proper seqno for inserting at INSN by successors.
3985 Return -1 if no successors with positive seqno exist. */
3986 static int
3987 get_seqno_by_succs (rtx insn)
3989 basic_block bb = BLOCK_FOR_INSN (insn);
3990 rtx tmp = insn, end = BB_END (bb);
3991 int seqno;
3992 insn_t succ = NULL;
3993 succ_iterator si;
3995 while (tmp != end)
3997 tmp = NEXT_INSN (tmp);
3998 if (INSN_P (tmp))
3999 return INSN_SEQNO (tmp);
4002 seqno = INT_MAX;
4004 FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL)
4005 if (INSN_SEQNO (succ) > 0)
4006 seqno = MIN (seqno, INSN_SEQNO (succ));
4008 if (seqno == INT_MAX)
4009 return -1;
4011 return seqno;
4014 /* Compute seqno for INSN by its preds or succs. */
4015 static int
4016 get_seqno_for_a_jump (insn_t insn)
4018 int seqno;
4020 gcc_assert (INSN_SIMPLEJUMP_P (insn));
4022 if (!sel_bb_head_p (insn))
4023 seqno = INSN_SEQNO (PREV_INSN (insn));
4024 else
4026 basic_block bb = BLOCK_FOR_INSN (insn);
4028 if (single_pred_p (bb)
4029 && !in_current_region_p (single_pred (bb)))
4031 /* We can have preds outside a region when splitting edges
4032 for pipelining of an outer loop. Use succ instead.
4033 There should be only one of them. */
4034 insn_t succ = NULL;
4035 succ_iterator si;
4036 bool first = true;
4038 gcc_assert (flag_sel_sched_pipelining_outer_loops
4039 && current_loop_nest);
4040 FOR_EACH_SUCC_1 (succ, si, insn,
4041 SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
4043 gcc_assert (first);
4044 first = false;
4047 gcc_assert (succ != NULL);
4048 seqno = INSN_SEQNO (succ);
4050 else
4052 insn_t *preds;
4053 int n;
4055 cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n);
4057 gcc_assert (n > 0);
4058 /* For one predecessor, use simple method. */
4059 if (n == 1)
4060 seqno = INSN_SEQNO (preds[0]);
4061 else
4062 seqno = get_seqno_by_preds (insn);
4064 free (preds);
4068 /* We were unable to find a good seqno among preds. */
4069 if (seqno < 0)
4070 seqno = get_seqno_by_succs (insn);
4072 gcc_assert (seqno >= 0);
4074 return seqno;
4077 /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
4078 with positive seqno exist. */
4080 get_seqno_by_preds (rtx insn)
4082 basic_block bb = BLOCK_FOR_INSN (insn);
4083 rtx tmp = insn, head = BB_HEAD (bb);
4084 insn_t *preds;
4085 int n, i, seqno;
4087 while (tmp != head)
4089 tmp = PREV_INSN (tmp);
4090 if (INSN_P (tmp))
4091 return INSN_SEQNO (tmp);
4094 cfg_preds (bb, &preds, &n);
4095 for (i = 0, seqno = -1; i < n; i++)
4096 seqno = MAX (seqno, INSN_SEQNO (preds[i]));
4098 return seqno;
4103 /* Extend pass-scope data structures for basic blocks. */
4104 void
4105 sel_extend_global_bb_info (void)
4107 sel_global_bb_info.safe_grow_cleared (last_basic_block);
4110 /* Extend region-scope data structures for basic blocks. */
4111 static void
4112 extend_region_bb_info (void)
4114 sel_region_bb_info.safe_grow_cleared (last_basic_block);
4117 /* Extend all data structures to fit for all basic blocks. */
4118 static void
4119 extend_bb_info (void)
4121 sel_extend_global_bb_info ();
4122 extend_region_bb_info ();
4125 /* Finalize pass-scope data structures for basic blocks. */
4126 void
4127 sel_finish_global_bb_info (void)
4129 sel_global_bb_info.release ();
4132 /* Finalize region-scope data structures for basic blocks. */
4133 static void
4134 finish_region_bb_info (void)
4136 sel_region_bb_info.release ();
4140 /* Data for each insn in current region. */
4141 vec<sel_insn_data_def> s_i_d = vNULL;
4143 /* Extend data structures for insns from current region. */
4144 static void
4145 extend_insn_data (void)
4147 int reserve;
4149 sched_extend_target ();
4150 sched_deps_init (false);
4152 /* Extend data structures for insns from current region. */
4153 reserve = (sched_max_luid + 1 - s_i_d.length ());
4154 if (reserve > 0 && ! s_i_d.space (reserve))
4156 int size;
4158 if (sched_max_luid / 2 > 1024)
4159 size = sched_max_luid + 1024;
4160 else
4161 size = 3 * sched_max_luid / 2;
4164 s_i_d.safe_grow_cleared (size);
4168 /* Finalize data structures for insns from current region. */
4169 static void
4170 finish_insns (void)
4172 unsigned i;
4174 /* Clear here all dependence contexts that may have left from insns that were
4175 removed during the scheduling. */
4176 for (i = 0; i < s_i_d.length (); i++)
4178 sel_insn_data_def *sid_entry = &s_i_d[i];
4180 if (sid_entry->live)
4181 return_regset_to_pool (sid_entry->live);
4182 if (sid_entry->analyzed_deps)
4184 BITMAP_FREE (sid_entry->analyzed_deps);
4185 BITMAP_FREE (sid_entry->found_deps);
4186 htab_delete (sid_entry->transformed_insns);
4187 free_deps (&sid_entry->deps_context);
4189 if (EXPR_VINSN (&sid_entry->expr))
4191 clear_expr (&sid_entry->expr);
4193 /* Also, clear CANT_MOVE bit here, because we really don't want it
4194 to be passed to the next region. */
4195 CANT_MOVE_BY_LUID (i) = 0;
4199 s_i_d.release ();
4202 /* A proxy to pass initialization data to init_insn (). */
4203 static sel_insn_data_def _insn_init_ssid;
4204 static sel_insn_data_t insn_init_ssid = &_insn_init_ssid;
4206 /* If true create a new vinsn. Otherwise use the one from EXPR. */
4207 static bool insn_init_create_new_vinsn_p;
4209 /* Set all necessary data for initialization of the new insn[s]. */
4210 static expr_t
4211 set_insn_init (expr_t expr, vinsn_t vi, int seqno)
4213 expr_t x = &insn_init_ssid->expr;
4215 copy_expr_onside (x, expr);
4216 if (vi != NULL)
4218 insn_init_create_new_vinsn_p = false;
4219 change_vinsn_in_expr (x, vi);
4221 else
4222 insn_init_create_new_vinsn_p = true;
4224 insn_init_ssid->seqno = seqno;
4225 return x;
4228 /* Init data for INSN. */
4229 static void
4230 init_insn_data (insn_t insn)
4232 expr_t expr;
4233 sel_insn_data_t ssid = insn_init_ssid;
4235 /* The fields mentioned below are special and hence are not being
4236 propagated to the new insns. */
4237 gcc_assert (!ssid->asm_p && ssid->sched_next == NULL
4238 && !ssid->after_stall_p && ssid->sched_cycle == 0);
4239 gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0);
4241 expr = INSN_EXPR (insn);
4242 copy_expr (expr, &ssid->expr);
4243 prepare_insn_expr (insn, ssid->seqno);
4245 if (insn_init_create_new_vinsn_p)
4246 change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
4248 if (first_time_insn_init (insn))
4249 init_first_time_insn_data (insn);
4252 /* This is used to initialize spurious jumps generated by
4253 sel_redirect_edge (). */
4254 static void
4255 init_simplejump_data (insn_t insn)
4257 init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
4258 REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0,
4259 vNULL, true, false, false,
4260 false, true);
4261 INSN_SEQNO (insn) = get_seqno_for_a_jump (insn);
4262 init_first_time_insn_data (insn);
4265 /* Perform deferred initialization of insns. This is used to process
4266 a new jump that may be created by redirect_edge. */
4267 void
4268 sel_init_new_insn (insn_t insn, int flags)
4270 /* We create data structures for bb when the first insn is emitted in it. */
4271 if (INSN_P (insn)
4272 && INSN_IN_STREAM_P (insn)
4273 && insn_is_the_only_one_in_bb_p (insn))
4275 extend_bb_info ();
4276 create_initial_data_sets (BLOCK_FOR_INSN (insn));
4279 if (flags & INSN_INIT_TODO_LUID)
4281 sched_extend_luids ();
4282 sched_init_insn_luid (insn);
4285 if (flags & INSN_INIT_TODO_SSID)
4287 extend_insn_data ();
4288 init_insn_data (insn);
4289 clear_expr (&insn_init_ssid->expr);
4292 if (flags & INSN_INIT_TODO_SIMPLEJUMP)
4294 extend_insn_data ();
4295 init_simplejump_data (insn);
4298 gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
4299 == CONTAINING_RGN (BB_TO_BLOCK (0)));
4303 /* Functions to init/finish work with lv sets. */
4305 /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */
4306 static void
4307 init_lv_set (basic_block bb)
4309 gcc_assert (!BB_LV_SET_VALID_P (bb));
4311 BB_LV_SET (bb) = get_regset_from_pool ();
4312 COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
4313 BB_LV_SET_VALID_P (bb) = true;
4316 /* Copy liveness information to BB from FROM_BB. */
4317 static void
4318 copy_lv_set_from (basic_block bb, basic_block from_bb)
4320 gcc_assert (!BB_LV_SET_VALID_P (bb));
4322 COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
4323 BB_LV_SET_VALID_P (bb) = true;
4326 /* Initialize lv set of all bb headers. */
4327 void
4328 init_lv_sets (void)
4330 basic_block bb;
4332 /* Initialize of LV sets. */
4333 FOR_EACH_BB (bb)
4334 init_lv_set (bb);
4336 /* Don't forget EXIT_BLOCK. */
4337 init_lv_set (EXIT_BLOCK_PTR);
4340 /* Release lv set of HEAD. */
4341 static void
4342 free_lv_set (basic_block bb)
4344 gcc_assert (BB_LV_SET (bb) != NULL);
4346 return_regset_to_pool (BB_LV_SET (bb));
4347 BB_LV_SET (bb) = NULL;
4348 BB_LV_SET_VALID_P (bb) = false;
4351 /* Finalize lv sets of all bb headers. */
4352 void
4353 free_lv_sets (void)
4355 basic_block bb;
4357 /* Don't forget EXIT_BLOCK. */
4358 free_lv_set (EXIT_BLOCK_PTR);
4360 /* Free LV sets. */
4361 FOR_EACH_BB (bb)
4362 if (BB_LV_SET (bb))
4363 free_lv_set (bb);
4366 /* Mark AV_SET for BB as invalid, so this set will be updated the next time
4367 compute_av() processes BB. This function is called when creating new basic
4368 blocks, as well as for blocks (either new or existing) where new jumps are
4369 created when the control flow is being updated. */
4370 static void
4371 invalidate_av_set (basic_block bb)
4373 BB_AV_LEVEL (bb) = -1;
4376 /* Create initial data sets for BB (they will be invalid). */
4377 static void
4378 create_initial_data_sets (basic_block bb)
4380 if (BB_LV_SET (bb))
4381 BB_LV_SET_VALID_P (bb) = false;
4382 else
4383 BB_LV_SET (bb) = get_regset_from_pool ();
4384 invalidate_av_set (bb);
4387 /* Free av set of BB. */
4388 static void
4389 free_av_set (basic_block bb)
4391 av_set_clear (&BB_AV_SET (bb));
4392 BB_AV_LEVEL (bb) = 0;
4395 /* Free data sets of BB. */
4396 void
4397 free_data_sets (basic_block bb)
4399 free_lv_set (bb);
4400 free_av_set (bb);
4403 /* Exchange lv sets of TO and FROM. */
4404 static void
4405 exchange_lv_sets (basic_block to, basic_block from)
4408 regset to_lv_set = BB_LV_SET (to);
4410 BB_LV_SET (to) = BB_LV_SET (from);
4411 BB_LV_SET (from) = to_lv_set;
4415 bool to_lv_set_valid_p = BB_LV_SET_VALID_P (to);
4417 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4418 BB_LV_SET_VALID_P (from) = to_lv_set_valid_p;
4423 /* Exchange av sets of TO and FROM. */
4424 static void
4425 exchange_av_sets (basic_block to, basic_block from)
4428 av_set_t to_av_set = BB_AV_SET (to);
4430 BB_AV_SET (to) = BB_AV_SET (from);
4431 BB_AV_SET (from) = to_av_set;
4435 int to_av_level = BB_AV_LEVEL (to);
4437 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4438 BB_AV_LEVEL (from) = to_av_level;
4442 /* Exchange data sets of TO and FROM. */
4443 void
4444 exchange_data_sets (basic_block to, basic_block from)
4446 exchange_lv_sets (to, from);
4447 exchange_av_sets (to, from);
4450 /* Copy data sets of FROM to TO. */
4451 void
4452 copy_data_sets (basic_block to, basic_block from)
4454 gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to));
4455 gcc_assert (BB_AV_SET (to) == NULL);
4457 BB_AV_LEVEL (to) = BB_AV_LEVEL (from);
4458 BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from);
4460 if (BB_AV_SET_VALID_P (from))
4462 BB_AV_SET (to) = av_set_copy (BB_AV_SET (from));
4464 if (BB_LV_SET_VALID_P (from))
4466 gcc_assert (BB_LV_SET (to) != NULL);
4467 COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from));
4471 /* Return an av set for INSN, if any. */
4472 av_set_t
4473 get_av_set (insn_t insn)
4475 av_set_t av_set;
4477 gcc_assert (AV_SET_VALID_P (insn));
4479 if (sel_bb_head_p (insn))
4480 av_set = BB_AV_SET (BLOCK_FOR_INSN (insn));
4481 else
4482 av_set = NULL;
4484 return av_set;
4487 /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */
4489 get_av_level (insn_t insn)
4491 int av_level;
4493 gcc_assert (INSN_P (insn));
4495 if (sel_bb_head_p (insn))
4496 av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn));
4497 else
4498 av_level = INSN_WS_LEVEL (insn);
4500 return av_level;
4505 /* Variables to work with control-flow graph. */
4507 /* The basic block that already has been processed by the sched_data_update (),
4508 but hasn't been in sel_add_bb () yet. */
4509 static vec<basic_block>
4510 last_added_blocks = vNULL;
4512 /* A pool for allocating successor infos. */
4513 static struct
4515 /* A stack for saving succs_info structures. */
4516 struct succs_info *stack;
4518 /* Its size. */
4519 int size;
4521 /* Top of the stack. */
4522 int top;
4524 /* Maximal value of the top. */
4525 int max_top;
4526 } succs_info_pool;
4528 /* Functions to work with control-flow graph. */
4530 /* Return basic block note of BB. */
4531 insn_t
4532 sel_bb_head (basic_block bb)
4534 insn_t head;
4536 if (bb == EXIT_BLOCK_PTR)
4538 gcc_assert (exit_insn != NULL_RTX);
4539 head = exit_insn;
4541 else
4543 insn_t note;
4545 note = bb_note (bb);
4546 head = next_nonnote_insn (note);
4548 if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
4549 head = NULL_RTX;
4552 return head;
4555 /* Return true if INSN is a basic block header. */
4556 bool
4557 sel_bb_head_p (insn_t insn)
4559 return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn;
4562 /* Return last insn of BB. */
4563 insn_t
4564 sel_bb_end (basic_block bb)
4566 if (sel_bb_empty_p (bb))
4567 return NULL_RTX;
4569 gcc_assert (bb != EXIT_BLOCK_PTR);
4571 return BB_END (bb);
4574 /* Return true if INSN is the last insn in its basic block. */
4575 bool
4576 sel_bb_end_p (insn_t insn)
4578 return insn == sel_bb_end (BLOCK_FOR_INSN (insn));
4581 /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */
4582 bool
4583 sel_bb_empty_p (basic_block bb)
4585 return sel_bb_head (bb) == NULL;
4588 /* True when BB belongs to the current scheduling region. */
4589 bool
4590 in_current_region_p (basic_block bb)
4592 if (bb->index < NUM_FIXED_BLOCKS)
4593 return false;
4595 return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0));
4598 /* Return the block which is a fallthru bb of a conditional jump JUMP. */
4599 basic_block
4600 fallthru_bb_of_jump (rtx jump)
4602 if (!JUMP_P (jump))
4603 return NULL;
4605 if (!any_condjump_p (jump))
4606 return NULL;
4608 /* A basic block that ends with a conditional jump may still have one successor
4609 (and be followed by a barrier), we are not interested. */
4610 if (single_succ_p (BLOCK_FOR_INSN (jump)))
4611 return NULL;
4613 return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
4616 /* Remove all notes from BB. */
4617 static void
4618 init_bb (basic_block bb)
4620 remove_notes (bb_note (bb), BB_END (bb));
4621 BB_NOTE_LIST (bb) = note_list;
4624 void
4625 sel_init_bbs (bb_vec_t bbs)
4627 const struct sched_scan_info_def ssi =
4629 extend_bb_info, /* extend_bb */
4630 init_bb, /* init_bb */
4631 NULL, /* extend_insn */
4632 NULL /* init_insn */
4635 sched_scan (&ssi, bbs);
4638 /* Restore notes for the whole region. */
4639 static void
4640 sel_restore_notes (void)
4642 int bb;
4643 insn_t insn;
4645 for (bb = 0; bb < current_nr_blocks; bb++)
4647 basic_block first, last;
4649 first = EBB_FIRST_BB (bb);
4650 last = EBB_LAST_BB (bb)->next_bb;
4654 note_list = BB_NOTE_LIST (first);
4655 restore_other_notes (NULL, first);
4656 BB_NOTE_LIST (first) = NULL_RTX;
4658 FOR_BB_INSNS (first, insn)
4659 if (NONDEBUG_INSN_P (insn))
4660 reemit_notes (insn);
4662 first = first->next_bb;
4664 while (first != last);
4668 /* Free per-bb data structures. */
4669 void
4670 sel_finish_bbs (void)
4672 sel_restore_notes ();
4674 /* Remove current loop preheader from this loop. */
4675 if (current_loop_nest)
4676 sel_remove_loop_preheader ();
4678 finish_region_bb_info ();
4681 /* Return true if INSN has a single successor of type FLAGS. */
4682 bool
4683 sel_insn_has_single_succ_p (insn_t insn, int flags)
4685 insn_t succ;
4686 succ_iterator si;
4687 bool first_p = true;
4689 FOR_EACH_SUCC_1 (succ, si, insn, flags)
4691 if (first_p)
4692 first_p = false;
4693 else
4694 return false;
4697 return true;
4700 /* Allocate successor's info. */
4701 static struct succs_info *
4702 alloc_succs_info (void)
4704 if (succs_info_pool.top == succs_info_pool.max_top)
4706 int i;
4708 if (++succs_info_pool.max_top >= succs_info_pool.size)
4709 gcc_unreachable ();
4711 i = ++succs_info_pool.top;
4712 succs_info_pool.stack[i].succs_ok.create (10);
4713 succs_info_pool.stack[i].succs_other.create (10);
4714 succs_info_pool.stack[i].probs_ok.create (10);
4716 else
4717 succs_info_pool.top++;
4719 return &succs_info_pool.stack[succs_info_pool.top];
4722 /* Free successor's info. */
4723 void
4724 free_succs_info (struct succs_info * sinfo)
4726 gcc_assert (succs_info_pool.top >= 0
4727 && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
4728 succs_info_pool.top--;
4730 /* Clear stale info. */
4731 sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ());
4732 sinfo->succs_other.block_remove (0, sinfo->succs_other.length ());
4733 sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ());
4734 sinfo->all_prob = 0;
4735 sinfo->succs_ok_n = 0;
4736 sinfo->all_succs_n = 0;
4739 /* Compute successor info for INSN. FLAGS are the flags passed
4740 to the FOR_EACH_SUCC_1 iterator. */
4741 struct succs_info *
4742 compute_succs_info (insn_t insn, short flags)
4744 succ_iterator si;
4745 insn_t succ;
4746 struct succs_info *sinfo = alloc_succs_info ();
4748 /* Traverse *all* successors and decide what to do with each. */
4749 FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
4751 /* FIXME: this doesn't work for skipping to loop exits, as we don't
4752 perform code motion through inner loops. */
4753 short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS;
4755 if (current_flags & flags)
4757 sinfo->succs_ok.safe_push (succ);
4758 sinfo->probs_ok.safe_push (
4759 /* FIXME: Improve calculation when skipping
4760 inner loop to exits. */
4761 si.bb_end ? si.e1->probability : REG_BR_PROB_BASE);
4762 sinfo->succs_ok_n++;
4764 else
4765 sinfo->succs_other.safe_push (succ);
4767 /* Compute all_prob. */
4768 if (!si.bb_end)
4769 sinfo->all_prob = REG_BR_PROB_BASE;
4770 else
4771 sinfo->all_prob += si.e1->probability;
4773 sinfo->all_succs_n++;
4776 return sinfo;
4779 /* Return the predecessors of BB in PREDS and their number in N.
4780 Empty blocks are skipped. SIZE is used to allocate PREDS. */
4781 static void
4782 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
4784 edge e;
4785 edge_iterator ei;
4787 gcc_assert (BLOCK_TO_BB (bb->index) != 0);
4789 FOR_EACH_EDGE (e, ei, bb->preds)
4791 basic_block pred_bb = e->src;
4792 insn_t bb_end = BB_END (pred_bb);
4794 if (!in_current_region_p (pred_bb))
4796 gcc_assert (flag_sel_sched_pipelining_outer_loops
4797 && current_loop_nest);
4798 continue;
4801 if (sel_bb_empty_p (pred_bb))
4802 cfg_preds_1 (pred_bb, preds, n, size);
4803 else
4805 if (*n == *size)
4806 *preds = XRESIZEVEC (insn_t, *preds,
4807 (*size = 2 * *size + 1));
4808 (*preds)[(*n)++] = bb_end;
4812 gcc_assert (*n != 0
4813 || (flag_sel_sched_pipelining_outer_loops
4814 && current_loop_nest));
4817 /* Find all predecessors of BB and record them in PREDS and their number
4818 in N. Empty blocks are skipped, and only normal (forward in-region)
4819 edges are processed. */
4820 static void
4821 cfg_preds (basic_block bb, insn_t **preds, int *n)
4823 int size = 0;
4825 *preds = NULL;
4826 *n = 0;
4827 cfg_preds_1 (bb, preds, n, &size);
4830 /* Returns true if we are moving INSN through join point. */
4831 bool
4832 sel_num_cfg_preds_gt_1 (insn_t insn)
4834 basic_block bb;
4836 if (!sel_bb_head_p (insn) || INSN_BB (insn) == 0)
4837 return false;
4839 bb = BLOCK_FOR_INSN (insn);
4841 while (1)
4843 if (EDGE_COUNT (bb->preds) > 1)
4844 return true;
4846 gcc_assert (EDGE_PRED (bb, 0)->dest == bb);
4847 bb = EDGE_PRED (bb, 0)->src;
4849 if (!sel_bb_empty_p (bb))
4850 break;
4853 return false;
4856 /* Returns true when BB should be the end of an ebb. Adapted from the
4857 code in sched-ebb.c. */
4858 bool
4859 bb_ends_ebb_p (basic_block bb)
4861 basic_block next_bb = bb_next_bb (bb);
4862 edge e;
4864 if (next_bb == EXIT_BLOCK_PTR
4865 || bitmap_bit_p (forced_ebb_heads, next_bb->index)
4866 || (LABEL_P (BB_HEAD (next_bb))
4867 /* NB: LABEL_NUSES () is not maintained outside of jump.c.
4868 Work around that. */
4869 && !single_pred_p (next_bb)))
4870 return true;
4872 if (!in_current_region_p (next_bb))
4873 return true;
4875 e = find_fallthru_edge (bb->succs);
4876 if (e)
4878 gcc_assert (e->dest == next_bb);
4880 return false;
4883 return true;
4886 /* Returns true when INSN and SUCC are in the same EBB, given that SUCC is a
4887 successor of INSN. */
4888 bool
4889 in_same_ebb_p (insn_t insn, insn_t succ)
4891 basic_block ptr = BLOCK_FOR_INSN (insn);
4893 for(;;)
4895 if (ptr == BLOCK_FOR_INSN (succ))
4896 return true;
4898 if (bb_ends_ebb_p (ptr))
4899 return false;
4901 ptr = bb_next_bb (ptr);
4904 gcc_unreachable ();
4905 return false;
4908 /* Recomputes the reverse topological order for the function and
4909 saves it in REV_TOP_ORDER_INDEX. REV_TOP_ORDER_INDEX_LEN is also
4910 modified appropriately. */
4911 static void
4912 recompute_rev_top_order (void)
4914 int *postorder;
4915 int n_blocks, i;
4917 if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
4919 rev_top_order_index_len = last_basic_block;
4920 rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
4921 rev_top_order_index_len);
4924 postorder = XNEWVEC (int, n_basic_blocks);
4926 n_blocks = post_order_compute (postorder, true, false);
4927 gcc_assert (n_basic_blocks == n_blocks);
4929 /* Build reverse function: for each basic block with BB->INDEX == K
4930 rev_top_order_index[K] is it's reverse topological sort number. */
4931 for (i = 0; i < n_blocks; i++)
4933 gcc_assert (postorder[i] < rev_top_order_index_len);
4934 rev_top_order_index[postorder[i]] = i;
4937 free (postorder);
4940 /* Clear all flags from insns in BB that could spoil its rescheduling. */
4941 void
4942 clear_outdated_rtx_info (basic_block bb)
4944 rtx insn;
4946 FOR_BB_INSNS (bb, insn)
4947 if (INSN_P (insn))
4949 SCHED_GROUP_P (insn) = 0;
4950 INSN_AFTER_STALL_P (insn) = 0;
4951 INSN_SCHED_TIMES (insn) = 0;
4952 EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
4954 /* We cannot use the changed caches, as previously we could ignore
4955 the LHS dependence due to enabled renaming and transform
4956 the expression, and currently we'll be unable to do this. */
4957 htab_empty (INSN_TRANSFORMED_INSNS (insn));
4961 /* Add BB_NOTE to the pool of available basic block notes. */
4962 static void
4963 return_bb_to_pool (basic_block bb)
4965 rtx note = bb_note (bb);
4967 gcc_assert (NOTE_BASIC_BLOCK (note) == bb
4968 && bb->aux == NULL);
4970 /* It turns out that current cfg infrastructure does not support
4971 reuse of basic blocks. Don't bother for now. */
4972 /*bb_note_pool.safe_push (note);*/
4975 /* Get a bb_note from pool or return NULL_RTX if pool is empty. */
4976 static rtx
4977 get_bb_note_from_pool (void)
4979 if (bb_note_pool.is_empty ())
4980 return NULL_RTX;
4981 else
4983 rtx note = bb_note_pool.pop ();
4985 PREV_INSN (note) = NULL_RTX;
4986 NEXT_INSN (note) = NULL_RTX;
4988 return note;
4992 /* Free bb_note_pool. */
4993 void
4994 free_bb_note_pool (void)
4996 bb_note_pool.release ();
4999 /* Setup scheduler pool and successor structure. */
5000 void
5001 alloc_sched_pools (void)
5003 int succs_size;
5005 succs_size = MAX_WS + 1;
5006 succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
5007 succs_info_pool.size = succs_size;
5008 succs_info_pool.top = -1;
5009 succs_info_pool.max_top = -1;
5011 sched_lists_pool = create_alloc_pool ("sel-sched-lists",
5012 sizeof (struct _list_node), 500);
5015 /* Free the pools. */
5016 void
5017 free_sched_pools (void)
5019 int i;
5021 free_alloc_pool (sched_lists_pool);
5022 gcc_assert (succs_info_pool.top == -1);
5023 for (i = 0; i <= succs_info_pool.max_top; i++)
5025 succs_info_pool.stack[i].succs_ok.release ();
5026 succs_info_pool.stack[i].succs_other.release ();
5027 succs_info_pool.stack[i].probs_ok.release ();
5029 free (succs_info_pool.stack);
5033 /* Returns a position in RGN where BB can be inserted retaining
5034 topological order. */
5035 static int
5036 find_place_to_insert_bb (basic_block bb, int rgn)
5038 bool has_preds_outside_rgn = false;
5039 edge e;
5040 edge_iterator ei;
5042 /* Find whether we have preds outside the region. */
5043 FOR_EACH_EDGE (e, ei, bb->preds)
5044 if (!in_current_region_p (e->src))
5046 has_preds_outside_rgn = true;
5047 break;
5050 /* Recompute the top order -- needed when we have > 1 pred
5051 and in case we don't have preds outside. */
5052 if (flag_sel_sched_pipelining_outer_loops
5053 && (has_preds_outside_rgn || EDGE_COUNT (bb->preds) > 1))
5055 int i, bbi = bb->index, cur_bbi;
5057 recompute_rev_top_order ();
5058 for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
5060 cur_bbi = BB_TO_BLOCK (i);
5061 if (rev_top_order_index[bbi]
5062 < rev_top_order_index[cur_bbi])
5063 break;
5066 /* We skipped the right block, so we increase i. We accommodate
5067 it for increasing by step later, so we decrease i. */
5068 return (i + 1) - 1;
5070 else if (has_preds_outside_rgn)
5072 /* This is the case when we generate an extra empty block
5073 to serve as region head during pipelining. */
5074 e = EDGE_SUCC (bb, 0);
5075 gcc_assert (EDGE_COUNT (bb->succs) == 1
5076 && in_current_region_p (EDGE_SUCC (bb, 0)->dest)
5077 && (BLOCK_TO_BB (e->dest->index) == 0));
5078 return -1;
5081 /* We don't have preds outside the region. We should have
5082 the only pred, because the multiple preds case comes from
5083 the pipelining of outer loops, and that is handled above.
5084 Just take the bbi of this single pred. */
5085 if (EDGE_COUNT (bb->succs) > 0)
5087 int pred_bbi;
5089 gcc_assert (EDGE_COUNT (bb->preds) == 1);
5091 pred_bbi = EDGE_PRED (bb, 0)->src->index;
5092 return BLOCK_TO_BB (pred_bbi);
5094 else
5095 /* BB has no successors. It is safe to put it in the end. */
5096 return current_nr_blocks - 1;
5099 /* Deletes an empty basic block freeing its data. */
5100 static void
5101 delete_and_free_basic_block (basic_block bb)
5103 gcc_assert (sel_bb_empty_p (bb));
5105 if (BB_LV_SET (bb))
5106 free_lv_set (bb);
5108 bitmap_clear_bit (blocks_to_reschedule, bb->index);
5110 /* Can't assert av_set properties because we use sel_aremove_bb
5111 when removing loop preheader from the region. At the point of
5112 removing the preheader we already have deallocated sel_region_bb_info. */
5113 gcc_assert (BB_LV_SET (bb) == NULL
5114 && !BB_LV_SET_VALID_P (bb)
5115 && BB_AV_LEVEL (bb) == 0
5116 && BB_AV_SET (bb) == NULL);
5118 delete_basic_block (bb);
5121 /* Add BB to the current region and update the region data. */
5122 static void
5123 add_block_to_current_region (basic_block bb)
5125 int i, pos, bbi = -2, rgn;
5127 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5128 bbi = find_place_to_insert_bb (bb, rgn);
5129 bbi += 1;
5130 pos = RGN_BLOCKS (rgn) + bbi;
5132 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5133 && ebb_head[bbi] == pos);
5135 /* Make a place for the new block. */
5136 extend_regions ();
5138 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5139 BLOCK_TO_BB (rgn_bb_table[i])++;
5141 memmove (rgn_bb_table + pos + 1,
5142 rgn_bb_table + pos,
5143 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5145 /* Initialize data for BB. */
5146 rgn_bb_table[pos] = bb->index;
5147 BLOCK_TO_BB (bb->index) = bbi;
5148 CONTAINING_RGN (bb->index) = rgn;
5150 RGN_NR_BLOCKS (rgn)++;
5152 for (i = rgn + 1; i <= nr_regions; i++)
5153 RGN_BLOCKS (i)++;
5156 /* Remove BB from the current region and update the region data. */
5157 static void
5158 remove_bb_from_region (basic_block bb)
5160 int i, pos, bbi = -2, rgn;
5162 rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
5163 bbi = BLOCK_TO_BB (bb->index);
5164 pos = RGN_BLOCKS (rgn) + bbi;
5166 gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
5167 && ebb_head[bbi] == pos);
5169 for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
5170 BLOCK_TO_BB (rgn_bb_table[i])--;
5172 memmove (rgn_bb_table + pos,
5173 rgn_bb_table + pos + 1,
5174 (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
5176 RGN_NR_BLOCKS (rgn)--;
5177 for (i = rgn + 1; i <= nr_regions; i++)
5178 RGN_BLOCKS (i)--;
5181 /* Add BB to the current region and update all data. If BB is NULL, add all
5182 blocks from last_added_blocks vector. */
5183 static void
5184 sel_add_bb (basic_block bb)
5186 /* Extend luids so that new notes will receive zero luids. */
5187 sched_extend_luids ();
5188 sched_init_bbs ();
5189 sel_init_bbs (last_added_blocks);
5191 /* When bb is passed explicitly, the vector should contain
5192 the only element that equals to bb; otherwise, the vector
5193 should not be NULL. */
5194 gcc_assert (last_added_blocks.exists ());
5196 if (bb != NULL)
5198 gcc_assert (last_added_blocks.length () == 1
5199 && last_added_blocks[0] == bb);
5200 add_block_to_current_region (bb);
5202 /* We associate creating/deleting data sets with the first insn
5203 appearing / disappearing in the bb. */
5204 if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
5205 create_initial_data_sets (bb);
5207 last_added_blocks.release ();
5209 else
5210 /* BB is NULL - process LAST_ADDED_BLOCKS instead. */
5212 int i;
5213 basic_block temp_bb = NULL;
5215 for (i = 0;
5216 last_added_blocks.iterate (i, &bb); i++)
5218 add_block_to_current_region (bb);
5219 temp_bb = bb;
5222 /* We need to fetch at least one bb so we know the region
5223 to update. */
5224 gcc_assert (temp_bb != NULL);
5225 bb = temp_bb;
5227 last_added_blocks.release ();
5230 rgn_setup_region (CONTAINING_RGN (bb->index));
5233 /* Remove BB from the current region and update all data.
5234 If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg. */
5235 static void
5236 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
5238 unsigned idx = bb->index;
5240 gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
5242 remove_bb_from_region (bb);
5243 return_bb_to_pool (bb);
5244 bitmap_clear_bit (blocks_to_reschedule, idx);
5246 if (remove_from_cfg_p)
5248 basic_block succ = single_succ (bb);
5249 delete_and_free_basic_block (bb);
5250 set_immediate_dominator (CDI_DOMINATORS, succ,
5251 recompute_dominator (CDI_DOMINATORS, succ));
5254 rgn_setup_region (CONTAINING_RGN (idx));
5257 /* Concatenate info of EMPTY_BB to info of MERGE_BB. */
5258 static void
5259 move_bb_info (basic_block merge_bb, basic_block empty_bb)
5261 if (in_current_region_p (merge_bb))
5262 concat_note_lists (BB_NOTE_LIST (empty_bb),
5263 &BB_NOTE_LIST (merge_bb));
5264 BB_NOTE_LIST (empty_bb) = NULL_RTX;
5268 /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from
5269 region, but keep it in CFG. */
5270 static void
5271 remove_empty_bb (basic_block empty_bb, bool remove_from_cfg_p)
5273 /* The block should contain just a note or a label.
5274 We try to check whether it is unused below. */
5275 gcc_assert (BB_HEAD (empty_bb) == BB_END (empty_bb)
5276 || LABEL_P (BB_HEAD (empty_bb)));
5278 /* If basic block has predecessors or successors, redirect them. */
5279 if (remove_from_cfg_p
5280 && (EDGE_COUNT (empty_bb->preds) > 0
5281 || EDGE_COUNT (empty_bb->succs) > 0))
5283 basic_block pred;
5284 basic_block succ;
5286 /* We need to init PRED and SUCC before redirecting edges. */
5287 if (EDGE_COUNT (empty_bb->preds) > 0)
5289 edge e;
5291 gcc_assert (EDGE_COUNT (empty_bb->preds) == 1);
5293 e = EDGE_PRED (empty_bb, 0);
5294 gcc_assert (e->src == empty_bb->prev_bb
5295 && (e->flags & EDGE_FALLTHRU));
5297 pred = empty_bb->prev_bb;
5299 else
5300 pred = NULL;
5302 if (EDGE_COUNT (empty_bb->succs) > 0)
5304 /* We do not check fallthruness here as above, because
5305 after removing a jump the edge may actually be not fallthru. */
5306 gcc_assert (EDGE_COUNT (empty_bb->succs) == 1);
5307 succ = EDGE_SUCC (empty_bb, 0)->dest;
5309 else
5310 succ = NULL;
5312 if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
5314 edge e = EDGE_PRED (empty_bb, 0);
5316 if (e->flags & EDGE_FALLTHRU)
5317 redirect_edge_succ_nodup (e, succ);
5318 else
5319 sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
5322 if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
5324 edge e = EDGE_SUCC (empty_bb, 0);
5326 if (find_edge (pred, e->dest) == NULL)
5327 redirect_edge_pred (e, pred);
5331 /* Finish removing. */
5332 sel_remove_bb (empty_bb, remove_from_cfg_p);
5335 /* An implementation of create_basic_block hook, which additionally updates
5336 per-bb data structures. */
5337 static basic_block
5338 sel_create_basic_block (void *headp, void *endp, basic_block after)
5340 basic_block new_bb;
5341 insn_t new_bb_note;
5343 gcc_assert (flag_sel_sched_pipelining_outer_loops
5344 || !last_added_blocks.exists ());
5346 new_bb_note = get_bb_note_from_pool ();
5348 if (new_bb_note == NULL_RTX)
5349 new_bb = orig_cfg_hooks.create_basic_block (headp, endp, after);
5350 else
5352 new_bb = create_basic_block_structure ((rtx) headp, (rtx) endp,
5353 new_bb_note, after);
5354 new_bb->aux = NULL;
5357 last_added_blocks.safe_push (new_bb);
5359 return new_bb;
5362 /* Implement sched_init_only_bb (). */
5363 static void
5364 sel_init_only_bb (basic_block bb, basic_block after)
5366 gcc_assert (after == NULL);
5368 extend_regions ();
5369 rgn_make_new_region_out_of_new_block (bb);
5372 /* Update the latch when we've splitted or merged it from FROM block to TO.
5373 This should be checked for all outer loops, too. */
5374 static void
5375 change_loops_latches (basic_block from, basic_block to)
5377 gcc_assert (from != to);
5379 if (current_loop_nest)
5381 struct loop *loop;
5383 for (loop = current_loop_nest; loop; loop = loop_outer (loop))
5384 if (considered_for_pipelining_p (loop) && loop->latch == from)
5386 gcc_assert (loop == current_loop_nest);
5387 loop->latch = to;
5388 gcc_assert (loop_latch_edge (loop));
5393 /* Splits BB on two basic blocks, adding it to the region and extending
5394 per-bb data structures. Returns the newly created bb. */
5395 static basic_block
5396 sel_split_block (basic_block bb, rtx after)
5398 basic_block new_bb;
5399 insn_t insn;
5401 new_bb = sched_split_block_1 (bb, after);
5402 sel_add_bb (new_bb);
5404 /* This should be called after sel_add_bb, because this uses
5405 CONTAINING_RGN for the new block, which is not yet initialized.
5406 FIXME: this function may be a no-op now. */
5407 change_loops_latches (bb, new_bb);
5409 /* Update ORIG_BB_INDEX for insns moved into the new block. */
5410 FOR_BB_INSNS (new_bb, insn)
5411 if (INSN_P (insn))
5412 EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
5414 if (sel_bb_empty_p (bb))
5416 gcc_assert (!sel_bb_empty_p (new_bb));
5418 /* NEW_BB has data sets that need to be updated and BB holds
5419 data sets that should be removed. Exchange these data sets
5420 so that we won't lose BB's valid data sets. */
5421 exchange_data_sets (new_bb, bb);
5422 free_data_sets (bb);
5425 if (!sel_bb_empty_p (new_bb)
5426 && bitmap_bit_p (blocks_to_reschedule, bb->index))
5427 bitmap_set_bit (blocks_to_reschedule, new_bb->index);
5429 return new_bb;
5432 /* If BB ends with a jump insn whose ID is bigger then PREV_MAX_UID, return it.
5433 Otherwise returns NULL. */
5434 static rtx
5435 check_for_new_jump (basic_block bb, int prev_max_uid)
5437 rtx end;
5439 end = sel_bb_end (bb);
5440 if (end && INSN_UID (end) >= prev_max_uid)
5441 return end;
5442 return NULL;
5445 /* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
5446 New means having UID at least equal to PREV_MAX_UID. */
5447 static rtx
5448 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
5450 rtx jump;
5452 /* Return immediately if no new insns were emitted. */
5453 if (get_max_uid () == prev_max_uid)
5454 return NULL;
5456 /* Now check both blocks for new jumps. It will ever be only one. */
5457 if ((jump = check_for_new_jump (from, prev_max_uid)))
5458 return jump;
5460 if (jump_bb != NULL
5461 && (jump = check_for_new_jump (jump_bb, prev_max_uid)))
5462 return jump;
5463 return NULL;
5466 /* Splits E and adds the newly created basic block to the current region.
5467 Returns this basic block. */
5468 basic_block
5469 sel_split_edge (edge e)
5471 basic_block new_bb, src, other_bb = NULL;
5472 int prev_max_uid;
5473 rtx jump;
5475 src = e->src;
5476 prev_max_uid = get_max_uid ();
5477 new_bb = split_edge (e);
5479 if (flag_sel_sched_pipelining_outer_loops
5480 && current_loop_nest)
5482 int i;
5483 basic_block bb;
5485 /* Some of the basic blocks might not have been added to the loop.
5486 Add them here, until this is fixed in force_fallthru. */
5487 for (i = 0;
5488 last_added_blocks.iterate (i, &bb); i++)
5489 if (!bb->loop_father)
5491 add_bb_to_loop (bb, e->dest->loop_father);
5493 gcc_assert (!other_bb && (new_bb->index != bb->index));
5494 other_bb = bb;
5498 /* Add all last_added_blocks to the region. */
5499 sel_add_bb (NULL);
5501 jump = find_new_jump (src, new_bb, prev_max_uid);
5502 if (jump)
5503 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5505 /* Put the correct lv set on this block. */
5506 if (other_bb && !sel_bb_empty_p (other_bb))
5507 compute_live (sel_bb_head (other_bb));
5509 return new_bb;
5512 /* Implement sched_create_empty_bb (). */
5513 static basic_block
5514 sel_create_empty_bb (basic_block after)
5516 basic_block new_bb;
5518 new_bb = sched_create_empty_bb_1 (after);
5520 /* We'll explicitly initialize NEW_BB via sel_init_only_bb () a bit
5521 later. */
5522 gcc_assert (last_added_blocks.length () == 1
5523 && last_added_blocks[0] == new_bb);
5525 last_added_blocks.release ();
5526 return new_bb;
5529 /* Implement sched_create_recovery_block. ORIG_INSN is where block
5530 will be splitted to insert a check. */
5531 basic_block
5532 sel_create_recovery_block (insn_t orig_insn)
5534 basic_block first_bb, second_bb, recovery_block;
5535 basic_block before_recovery = NULL;
5536 rtx jump;
5538 first_bb = BLOCK_FOR_INSN (orig_insn);
5539 if (sel_bb_end_p (orig_insn))
5541 /* Avoid introducing an empty block while splitting. */
5542 gcc_assert (single_succ_p (first_bb));
5543 second_bb = single_succ (first_bb);
5545 else
5546 second_bb = sched_split_block (first_bb, orig_insn);
5548 recovery_block = sched_create_recovery_block (&before_recovery);
5549 if (before_recovery)
5550 copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
5552 gcc_assert (sel_bb_empty_p (recovery_block));
5553 sched_create_recovery_edges (first_bb, recovery_block, second_bb);
5554 if (current_loops != NULL)
5555 add_bb_to_loop (recovery_block, first_bb->loop_father);
5557 sel_add_bb (recovery_block);
5559 jump = BB_END (recovery_block);
5560 gcc_assert (sel_bb_head (recovery_block) == jump);
5561 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5563 return recovery_block;
5566 /* Merge basic block B into basic block A. */
5567 static void
5568 sel_merge_blocks (basic_block a, basic_block b)
5570 gcc_assert (sel_bb_empty_p (b)
5571 && EDGE_COUNT (b->preds) == 1
5572 && EDGE_PRED (b, 0)->src == b->prev_bb);
5574 move_bb_info (b->prev_bb, b);
5575 remove_empty_bb (b, false);
5576 merge_blocks (a, b);
5577 change_loops_latches (b, a);
5580 /* A wrapper for redirect_edge_and_branch_force, which also initializes
5581 data structures for possibly created bb and insns. Returns the newly
5582 added bb or NULL, when a bb was not needed. */
5583 void
5584 sel_redirect_edge_and_branch_force (edge e, basic_block to)
5586 basic_block jump_bb, src, orig_dest = e->dest;
5587 int prev_max_uid;
5588 rtx jump;
5590 /* This function is now used only for bookkeeping code creation, where
5591 we'll never get the single pred of orig_dest block and thus will not
5592 hit unreachable blocks when updating dominator info. */
5593 gcc_assert (!sel_bb_empty_p (e->src)
5594 && !single_pred_p (orig_dest));
5595 src = e->src;
5596 prev_max_uid = get_max_uid ();
5597 jump_bb = redirect_edge_and_branch_force (e, to);
5599 if (jump_bb != NULL)
5600 sel_add_bb (jump_bb);
5602 /* This function could not be used to spoil the loop structure by now,
5603 thus we don't care to update anything. But check it to be sure. */
5604 if (current_loop_nest
5605 && pipelining_p)
5606 gcc_assert (loop_latch_edge (current_loop_nest));
5608 jump = find_new_jump (src, jump_bb, prev_max_uid);
5609 if (jump)
5610 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5611 set_immediate_dominator (CDI_DOMINATORS, to,
5612 recompute_dominator (CDI_DOMINATORS, to));
5613 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5614 recompute_dominator (CDI_DOMINATORS, orig_dest));
5617 /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by
5618 redirected edge are in reverse topological order. */
5619 bool
5620 sel_redirect_edge_and_branch (edge e, basic_block to)
5622 bool latch_edge_p;
5623 basic_block src, orig_dest = e->dest;
5624 int prev_max_uid;
5625 rtx jump;
5626 edge redirected;
5627 bool recompute_toporder_p = false;
5628 bool maybe_unreachable = single_pred_p (orig_dest);
5630 latch_edge_p = (pipelining_p
5631 && current_loop_nest
5632 && e == loop_latch_edge (current_loop_nest));
5634 src = e->src;
5635 prev_max_uid = get_max_uid ();
5637 redirected = redirect_edge_and_branch (e, to);
5639 gcc_assert (redirected && !last_added_blocks.exists ());
5641 /* When we've redirected a latch edge, update the header. */
5642 if (latch_edge_p)
5644 current_loop_nest->header = to;
5645 gcc_assert (loop_latch_edge (current_loop_nest));
5648 /* In rare situations, the topological relation between the blocks connected
5649 by the redirected edge can change (see PR42245 for an example). Update
5650 block_to_bb/bb_to_block. */
5651 if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
5652 && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
5653 recompute_toporder_p = true;
5655 jump = find_new_jump (src, NULL, prev_max_uid);
5656 if (jump)
5657 sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
5659 /* Only update dominator info when we don't have unreachable blocks.
5660 Otherwise we'll update in maybe_tidy_empty_bb. */
5661 if (!maybe_unreachable)
5663 set_immediate_dominator (CDI_DOMINATORS, to,
5664 recompute_dominator (CDI_DOMINATORS, to));
5665 set_immediate_dominator (CDI_DOMINATORS, orig_dest,
5666 recompute_dominator (CDI_DOMINATORS, orig_dest));
5668 return recompute_toporder_p;
5671 /* This variable holds the cfg hooks used by the selective scheduler. */
5672 static struct cfg_hooks sel_cfg_hooks;
5674 /* Register sel-sched cfg hooks. */
5675 void
5676 sel_register_cfg_hooks (void)
5678 sched_split_block = sel_split_block;
5680 orig_cfg_hooks = get_cfg_hooks ();
5681 sel_cfg_hooks = orig_cfg_hooks;
5683 sel_cfg_hooks.create_basic_block = sel_create_basic_block;
5685 set_cfg_hooks (sel_cfg_hooks);
5687 sched_init_only_bb = sel_init_only_bb;
5688 sched_split_block = sel_split_block;
5689 sched_create_empty_bb = sel_create_empty_bb;
5692 /* Unregister sel-sched cfg hooks. */
5693 void
5694 sel_unregister_cfg_hooks (void)
5696 sched_create_empty_bb = NULL;
5697 sched_split_block = NULL;
5698 sched_init_only_bb = NULL;
5700 set_cfg_hooks (orig_cfg_hooks);
5704 /* Emit an insn rtx based on PATTERN. If a jump insn is wanted,
5705 LABEL is where this jump should be directed. */
5707 create_insn_rtx_from_pattern (rtx pattern, rtx label)
5709 rtx insn_rtx;
5711 gcc_assert (!INSN_P (pattern));
5713 start_sequence ();
5715 if (label == NULL_RTX)
5716 insn_rtx = emit_insn (pattern);
5717 else if (DEBUG_INSN_P (label))
5718 insn_rtx = emit_debug_insn (pattern);
5719 else
5721 insn_rtx = emit_jump_insn (pattern);
5722 JUMP_LABEL (insn_rtx) = label;
5723 ++LABEL_NUSES (label);
5726 end_sequence ();
5728 sched_extend_luids ();
5729 sched_extend_target ();
5730 sched_deps_init (false);
5732 /* Initialize INSN_CODE now. */
5733 recog_memoized (insn_rtx);
5734 return insn_rtx;
5737 /* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
5738 must not be clonable. */
5739 vinsn_t
5740 create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
5742 gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
5744 /* If VINSN_TYPE is not USE, retain its uniqueness. */
5745 return vinsn_create (insn_rtx, force_unique_p);
5748 /* Create a copy of INSN_RTX. */
5750 create_copy_of_insn_rtx (rtx insn_rtx)
5752 rtx res, link;
5754 if (DEBUG_INSN_P (insn_rtx))
5755 return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5756 insn_rtx);
5758 gcc_assert (NONJUMP_INSN_P (insn_rtx));
5760 res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
5761 NULL_RTX);
5763 /* Copy all REG_NOTES except REG_EQUAL/REG_EQUIV and REG_LABEL_OPERAND
5764 since mark_jump_label will make them. REG_LABEL_TARGETs are created
5765 there too, but are supposed to be sticky, so we copy them. */
5766 for (link = REG_NOTES (insn_rtx); link; link = XEXP (link, 1))
5767 if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND
5768 && REG_NOTE_KIND (link) != REG_EQUAL
5769 && REG_NOTE_KIND (link) != REG_EQUIV)
5771 if (GET_CODE (link) == EXPR_LIST)
5772 add_reg_note (res, REG_NOTE_KIND (link),
5773 copy_insn_1 (XEXP (link, 0)));
5774 else
5775 add_reg_note (res, REG_NOTE_KIND (link), XEXP (link, 0));
5778 return res;
5781 /* Change vinsn field of EXPR to hold NEW_VINSN. */
5782 void
5783 change_vinsn_in_expr (expr_t expr, vinsn_t new_vinsn)
5785 vinsn_detach (EXPR_VINSN (expr));
5787 EXPR_VINSN (expr) = new_vinsn;
5788 vinsn_attach (new_vinsn);
5791 /* Helpers for global init. */
5792 /* This structure is used to be able to call existing bundling mechanism
5793 and calculate insn priorities. */
5794 static struct haifa_sched_info sched_sel_haifa_sched_info =
5796 NULL, /* init_ready_list */
5797 NULL, /* can_schedule_ready_p */
5798 NULL, /* schedule_more_p */
5799 NULL, /* new_ready */
5800 NULL, /* rgn_rank */
5801 sel_print_insn, /* rgn_print_insn */
5802 contributes_to_priority,
5803 NULL, /* insn_finishes_block_p */
5805 NULL, NULL,
5806 NULL, NULL,
5807 0, 0,
5809 NULL, /* add_remove_insn */
5810 NULL, /* begin_schedule_ready */
5811 NULL, /* begin_move_insn */
5812 NULL, /* advance_target_bb */
5814 NULL,
5815 NULL,
5817 SEL_SCHED | NEW_BBS
5820 /* Setup special insns used in the scheduler. */
5821 void
5822 setup_nop_and_exit_insns (void)
5824 gcc_assert (nop_pattern == NULL_RTX
5825 && exit_insn == NULL_RTX);
5827 nop_pattern = constm1_rtx;
5829 start_sequence ();
5830 emit_insn (nop_pattern);
5831 exit_insn = get_insns ();
5832 end_sequence ();
5833 set_block_for_insn (exit_insn, EXIT_BLOCK_PTR);
5836 /* Free special insns used in the scheduler. */
5837 void
5838 free_nop_and_exit_insns (void)
5840 exit_insn = NULL_RTX;
5841 nop_pattern = NULL_RTX;
5844 /* Setup a special vinsn used in new insns initialization. */
5845 void
5846 setup_nop_vinsn (void)
5848 nop_vinsn = vinsn_create (exit_insn, false);
5849 vinsn_attach (nop_vinsn);
5852 /* Free a special vinsn used in new insns initialization. */
5853 void
5854 free_nop_vinsn (void)
5856 gcc_assert (VINSN_COUNT (nop_vinsn) == 1);
5857 vinsn_detach (nop_vinsn);
5858 nop_vinsn = NULL;
5861 /* Call a set_sched_flags hook. */
5862 void
5863 sel_set_sched_flags (void)
5865 /* ??? This means that set_sched_flags were called, and we decided to
5866 support speculation. However, set_sched_flags also modifies flags
5867 on current_sched_info, doing this only at global init. And we
5868 sometimes change c_s_i later. So put the correct flags again. */
5869 if (spec_info && targetm.sched.set_sched_flags)
5870 targetm.sched.set_sched_flags (spec_info);
5873 /* Setup pointers to global sched info structures. */
5874 void
5875 sel_setup_sched_infos (void)
5877 rgn_setup_common_sched_info ();
5879 memcpy (&sel_common_sched_info, common_sched_info,
5880 sizeof (sel_common_sched_info));
5882 sel_common_sched_info.fix_recovery_cfg = NULL;
5883 sel_common_sched_info.add_block = NULL;
5884 sel_common_sched_info.estimate_number_of_insns
5885 = sel_estimate_number_of_insns;
5886 sel_common_sched_info.luid_for_non_insn = sel_luid_for_non_insn;
5887 sel_common_sched_info.sched_pass_id = SCHED_SEL_PASS;
5889 common_sched_info = &sel_common_sched_info;
5891 current_sched_info = &sched_sel_haifa_sched_info;
5892 current_sched_info->sched_max_insns_priority =
5893 get_rgn_sched_max_insns_priority ();
5895 sel_set_sched_flags ();
5899 /* Adds basic block BB to region RGN at the position *BB_ORD_INDEX,
5900 *BB_ORD_INDEX after that is increased. */
5901 static void
5902 sel_add_block_to_region (basic_block bb, int *bb_ord_index, int rgn)
5904 RGN_NR_BLOCKS (rgn) += 1;
5905 RGN_DONT_CALC_DEPS (rgn) = 0;
5906 RGN_HAS_REAL_EBB (rgn) = 0;
5907 CONTAINING_RGN (bb->index) = rgn;
5908 BLOCK_TO_BB (bb->index) = *bb_ord_index;
5909 rgn_bb_table[RGN_BLOCKS (rgn) + *bb_ord_index] = bb->index;
5910 (*bb_ord_index)++;
5912 /* FIXME: it is true only when not scheduling ebbs. */
5913 RGN_BLOCKS (rgn + 1) = RGN_BLOCKS (rgn) + RGN_NR_BLOCKS (rgn);
5916 /* Functions to support pipelining of outer loops. */
5918 /* Creates a new empty region and returns it's number. */
5919 static int
5920 sel_create_new_region (void)
5922 int new_rgn_number = nr_regions;
5924 RGN_NR_BLOCKS (new_rgn_number) = 0;
5926 /* FIXME: This will work only when EBBs are not created. */
5927 if (new_rgn_number != 0)
5928 RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
5929 RGN_NR_BLOCKS (new_rgn_number - 1);
5930 else
5931 RGN_BLOCKS (new_rgn_number) = 0;
5933 /* Set the blocks of the next region so the other functions may
5934 calculate the number of blocks in the region. */
5935 RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
5936 RGN_NR_BLOCKS (new_rgn_number);
5938 nr_regions++;
5940 return new_rgn_number;
5943 /* If X has a smaller topological sort number than Y, returns -1;
5944 if greater, returns 1. */
5945 static int
5946 bb_top_order_comparator (const void *x, const void *y)
5948 basic_block bb1 = *(const basic_block *) x;
5949 basic_block bb2 = *(const basic_block *) y;
5951 gcc_assert (bb1 == bb2
5952 || rev_top_order_index[bb1->index]
5953 != rev_top_order_index[bb2->index]);
5955 /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
5956 bbs with greater number should go earlier. */
5957 if (rev_top_order_index[bb1->index] > rev_top_order_index[bb2->index])
5958 return -1;
5959 else
5960 return 1;
5963 /* Create a region for LOOP and return its number. If we don't want
5964 to pipeline LOOP, return -1. */
5965 static int
5966 make_region_from_loop (struct loop *loop)
5968 unsigned int i;
5969 int new_rgn_number = -1;
5970 struct loop *inner;
5972 /* Basic block index, to be assigned to BLOCK_TO_BB. */
5973 int bb_ord_index = 0;
5974 basic_block *loop_blocks;
5975 basic_block preheader_block;
5977 if (loop->num_nodes
5978 > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
5979 return -1;
5981 /* Don't pipeline loops whose latch belongs to some of its inner loops. */
5982 for (inner = loop->inner; inner; inner = inner->inner)
5983 if (flow_bb_inside_loop_p (inner, loop->latch))
5984 return -1;
5986 loop->ninsns = num_loop_insns (loop);
5987 if ((int) loop->ninsns > PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_INSNS))
5988 return -1;
5990 loop_blocks = get_loop_body_in_custom_order (loop, bb_top_order_comparator);
5992 for (i = 0; i < loop->num_nodes; i++)
5993 if (loop_blocks[i]->flags & BB_IRREDUCIBLE_LOOP)
5995 free (loop_blocks);
5996 return -1;
5999 preheader_block = loop_preheader_edge (loop)->src;
6000 gcc_assert (preheader_block);
6001 gcc_assert (loop_blocks[0] == loop->header);
6003 new_rgn_number = sel_create_new_region ();
6005 sel_add_block_to_region (preheader_block, &bb_ord_index, new_rgn_number);
6006 bitmap_set_bit (bbs_in_loop_rgns, preheader_block->index);
6008 for (i = 0; i < loop->num_nodes; i++)
6010 /* Add only those blocks that haven't been scheduled in the inner loop.
6011 The exception is the basic blocks with bookkeeping code - they should
6012 be added to the region (and they actually don't belong to the loop
6013 body, but to the region containing that loop body). */
6015 gcc_assert (new_rgn_number >= 0);
6017 if (! bitmap_bit_p (bbs_in_loop_rgns, loop_blocks[i]->index))
6019 sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
6020 new_rgn_number);
6021 bitmap_set_bit (bbs_in_loop_rgns, loop_blocks[i]->index);
6025 free (loop_blocks);
6026 MARK_LOOP_FOR_PIPELINING (loop);
6028 return new_rgn_number;
6031 /* Create a new region from preheader blocks LOOP_BLOCKS. */
6032 void
6033 make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
6035 unsigned int i;
6036 int new_rgn_number = -1;
6037 basic_block bb;
6039 /* Basic block index, to be assigned to BLOCK_TO_BB. */
6040 int bb_ord_index = 0;
6042 new_rgn_number = sel_create_new_region ();
6044 FOR_EACH_VEC_ELT (*loop_blocks, i, bb)
6046 gcc_assert (new_rgn_number >= 0);
6048 sel_add_block_to_region (bb, &bb_ord_index, new_rgn_number);
6051 vec_free (loop_blocks);
6055 /* Create region(s) from loop nest LOOP, such that inner loops will be
6056 pipelined before outer loops. Returns true when a region for LOOP
6057 is created. */
6058 static bool
6059 make_regions_from_loop_nest (struct loop *loop)
6061 struct loop *cur_loop;
6062 int rgn_number;
6064 /* Traverse all inner nodes of the loop. */
6065 for (cur_loop = loop->inner; cur_loop; cur_loop = cur_loop->next)
6066 if (! bitmap_bit_p (bbs_in_loop_rgns, cur_loop->header->index))
6067 return false;
6069 /* At this moment all regular inner loops should have been pipelined.
6070 Try to create a region from this loop. */
6071 rgn_number = make_region_from_loop (loop);
6073 if (rgn_number < 0)
6074 return false;
6076 loop_nests.safe_push (loop);
6077 return true;
6080 /* Initalize data structures needed. */
6081 void
6082 sel_init_pipelining (void)
6084 /* Collect loop information to be used in outer loops pipelining. */
6085 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
6086 | LOOPS_HAVE_FALLTHRU_PREHEADERS
6087 | LOOPS_HAVE_RECORDED_EXITS
6088 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
6089 current_loop_nest = NULL;
6091 bbs_in_loop_rgns = sbitmap_alloc (last_basic_block);
6092 bitmap_clear (bbs_in_loop_rgns);
6094 recompute_rev_top_order ();
6097 /* Returns a struct loop for region RGN. */
6098 loop_p
6099 get_loop_nest_for_rgn (unsigned int rgn)
6101 /* Regions created with extend_rgns don't have corresponding loop nests,
6102 because they don't represent loops. */
6103 if (rgn < loop_nests.length ())
6104 return loop_nests[rgn];
6105 else
6106 return NULL;
6109 /* True when LOOP was included into pipelining regions. */
6110 bool
6111 considered_for_pipelining_p (struct loop *loop)
6113 if (loop_depth (loop) == 0)
6114 return false;
6116 /* Now, the loop could be too large or irreducible. Check whether its
6117 region is in LOOP_NESTS.
6118 We determine the region number of LOOP as the region number of its
6119 latch. We can't use header here, because this header could be
6120 just removed preheader and it will give us the wrong region number.
6121 Latch can't be used because it could be in the inner loop too. */
6122 if (LOOP_MARKED_FOR_PIPELINING_P (loop))
6124 int rgn = CONTAINING_RGN (loop->latch->index);
6126 gcc_assert ((unsigned) rgn < loop_nests.length ());
6127 return true;
6130 return false;
6133 /* Makes regions from the rest of the blocks, after loops are chosen
6134 for pipelining. */
6135 static void
6136 make_regions_from_the_rest (void)
6138 int cur_rgn_blocks;
6139 int *loop_hdr;
6140 int i;
6142 basic_block bb;
6143 edge e;
6144 edge_iterator ei;
6145 int *degree;
6147 /* Index in rgn_bb_table where to start allocating new regions. */
6148 cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
6150 /* Make regions from all the rest basic blocks - those that don't belong to
6151 any loop or belong to irreducible loops. Prepare the data structures
6152 for extend_rgns. */
6154 /* LOOP_HDR[I] == -1 if I-th bb doesn't belong to any loop,
6155 LOOP_HDR[I] == LOOP_HDR[J] iff basic blocks I and J reside within the same
6156 loop. */
6157 loop_hdr = XNEWVEC (int, last_basic_block);
6158 degree = XCNEWVEC (int, last_basic_block);
6161 /* For each basic block that belongs to some loop assign the number
6162 of innermost loop it belongs to. */
6163 for (i = 0; i < last_basic_block; i++)
6164 loop_hdr[i] = -1;
6166 FOR_EACH_BB (bb)
6168 if (bb->loop_father && !bb->loop_father->num == 0
6169 && !(bb->flags & BB_IRREDUCIBLE_LOOP))
6170 loop_hdr[bb->index] = bb->loop_father->num;
6173 /* For each basic block degree is calculated as the number of incoming
6174 edges, that are going out of bbs that are not yet scheduled.
6175 The basic blocks that are scheduled have degree value of zero. */
6176 FOR_EACH_BB (bb)
6178 degree[bb->index] = 0;
6180 if (!bitmap_bit_p (bbs_in_loop_rgns, bb->index))
6182 FOR_EACH_EDGE (e, ei, bb->preds)
6183 if (!bitmap_bit_p (bbs_in_loop_rgns, e->src->index))
6184 degree[bb->index]++;
6186 else
6187 degree[bb->index] = -1;
6190 extend_rgns (degree, &cur_rgn_blocks, bbs_in_loop_rgns, loop_hdr);
6192 /* Any block that did not end up in a region is placed into a region
6193 by itself. */
6194 FOR_EACH_BB (bb)
6195 if (degree[bb->index] >= 0)
6197 rgn_bb_table[cur_rgn_blocks] = bb->index;
6198 RGN_NR_BLOCKS (nr_regions) = 1;
6199 RGN_BLOCKS (nr_regions) = cur_rgn_blocks++;
6200 RGN_DONT_CALC_DEPS (nr_regions) = 0;
6201 RGN_HAS_REAL_EBB (nr_regions) = 0;
6202 CONTAINING_RGN (bb->index) = nr_regions++;
6203 BLOCK_TO_BB (bb->index) = 0;
6206 free (degree);
6207 free (loop_hdr);
6210 /* Free data structures used in pipelining of loops. */
6211 void sel_finish_pipelining (void)
6213 loop_iterator li;
6214 struct loop *loop;
6216 /* Release aux fields so we don't free them later by mistake. */
6217 FOR_EACH_LOOP (li, loop, 0)
6218 loop->aux = NULL;
6220 loop_optimizer_finalize ();
6222 loop_nests.release ();
6224 free (rev_top_order_index);
6225 rev_top_order_index = NULL;
6228 /* This function replaces the find_rgns when
6229 FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set. */
6230 void
6231 sel_find_rgns (void)
6233 sel_init_pipelining ();
6234 extend_regions ();
6236 if (current_loops)
6238 loop_p loop;
6239 loop_iterator li;
6241 FOR_EACH_LOOP (li, loop, (flag_sel_sched_pipelining_outer_loops
6242 ? LI_FROM_INNERMOST
6243 : LI_ONLY_INNERMOST))
6244 make_regions_from_loop_nest (loop);
6247 /* Make regions from all the rest basic blocks and schedule them.
6248 These blocks include blocks that don't belong to any loop or belong
6249 to irreducible loops. */
6250 make_regions_from_the_rest ();
6252 /* We don't need bbs_in_loop_rgns anymore. */
6253 sbitmap_free (bbs_in_loop_rgns);
6254 bbs_in_loop_rgns = NULL;
6257 /* Add the preheader blocks from previous loop to current region taking
6258 it from LOOP_PREHEADER_BLOCKS (current_loop_nest) and record them in *BBS.
6259 This function is only used with -fsel-sched-pipelining-outer-loops. */
6260 void
6261 sel_add_loop_preheaders (bb_vec_t *bbs)
6263 int i;
6264 basic_block bb;
6265 vec<basic_block> *preheader_blocks
6266 = LOOP_PREHEADER_BLOCKS (current_loop_nest);
6268 if (!preheader_blocks)
6269 return;
6271 for (i = 0; preheader_blocks->iterate (i, &bb); i++)
6273 bbs->safe_push (bb);
6274 last_added_blocks.safe_push (bb);
6275 sel_add_bb (bb);
6278 vec_free (preheader_blocks);
6281 /* While pipelining outer loops, returns TRUE if BB is a loop preheader.
6282 Please note that the function should also work when pipelining_p is
6283 false, because it is used when deciding whether we should or should
6284 not reschedule pipelined code. */
6285 bool
6286 sel_is_loop_preheader_p (basic_block bb)
6288 if (current_loop_nest)
6290 struct loop *outer;
6292 if (preheader_removed)
6293 return false;
6295 /* Preheader is the first block in the region. */
6296 if (BLOCK_TO_BB (bb->index) == 0)
6297 return true;
6299 /* We used to find a preheader with the topological information.
6300 Check that the above code is equivalent to what we did before. */
6302 if (in_current_region_p (current_loop_nest->header))
6303 gcc_assert (!(BLOCK_TO_BB (bb->index)
6304 < BLOCK_TO_BB (current_loop_nest->header->index)));
6306 /* Support the situation when the latch block of outer loop
6307 could be from here. */
6308 for (outer = loop_outer (current_loop_nest);
6309 outer;
6310 outer = loop_outer (outer))
6311 if (considered_for_pipelining_p (outer) && outer->latch == bb)
6312 gcc_unreachable ();
6315 return false;
6318 /* Check whether JUMP_BB ends with a jump insn that leads only to DEST_BB and
6319 can be removed, making the corresponding edge fallthrough (assuming that
6320 all basic blocks between JUMP_BB and DEST_BB are empty). */
6321 static bool
6322 bb_has_removable_jump_to_p (basic_block jump_bb, basic_block dest_bb)
6324 if (!onlyjump_p (BB_END (jump_bb))
6325 || tablejump_p (BB_END (jump_bb), NULL, NULL))
6326 return false;
6328 /* Several outgoing edges, abnormal edge or destination of jump is
6329 not DEST_BB. */
6330 if (EDGE_COUNT (jump_bb->succs) != 1
6331 || EDGE_SUCC (jump_bb, 0)->flags & (EDGE_ABNORMAL | EDGE_CROSSING)
6332 || EDGE_SUCC (jump_bb, 0)->dest != dest_bb)
6333 return false;
6335 /* If not anything of the upper. */
6336 return true;
6339 /* Removes the loop preheader from the current region and saves it in
6340 PREHEADER_BLOCKS of the father loop, so they will be added later to
6341 region that represents an outer loop. */
6342 static void
6343 sel_remove_loop_preheader (void)
6345 int i, old_len;
6346 int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
6347 basic_block bb;
6348 bool all_empty_p = true;
6349 vec<basic_block> *preheader_blocks
6350 = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
6352 vec_check_alloc (preheader_blocks, 0);
6354 gcc_assert (current_loop_nest);
6355 old_len = preheader_blocks->length ();
6357 /* Add blocks that aren't within the current loop to PREHEADER_BLOCKS. */
6358 for (i = 0; i < RGN_NR_BLOCKS (cur_rgn); i++)
6360 bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6362 /* If the basic block belongs to region, but doesn't belong to
6363 corresponding loop, then it should be a preheader. */
6364 if (sel_is_loop_preheader_p (bb))
6366 preheader_blocks->safe_push (bb);
6367 if (BB_END (bb) != bb_note (bb))
6368 all_empty_p = false;
6372 /* Remove these blocks only after iterating over the whole region. */
6373 for (i = preheader_blocks->length () - 1; i >= old_len; i--)
6375 bb = (*preheader_blocks)[i];
6376 sel_remove_bb (bb, false);
6379 if (!considered_for_pipelining_p (loop_outer (current_loop_nest)))
6381 if (!all_empty_p)
6382 /* Immediately create new region from preheader. */
6383 make_region_from_loop_preheader (preheader_blocks);
6384 else
6386 /* If all preheader blocks are empty - dont create new empty region.
6387 Instead, remove them completely. */
6388 FOR_EACH_VEC_ELT (*preheader_blocks, i, bb)
6390 edge e;
6391 edge_iterator ei;
6392 basic_block prev_bb = bb->prev_bb, next_bb = bb->next_bb;
6394 /* Redirect all incoming edges to next basic block. */
6395 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
6397 if (! (e->flags & EDGE_FALLTHRU))
6398 redirect_edge_and_branch (e, bb->next_bb);
6399 else
6400 redirect_edge_succ (e, bb->next_bb);
6402 gcc_assert (BB_NOTE_LIST (bb) == NULL);
6403 delete_and_free_basic_block (bb);
6405 /* Check if after deleting preheader there is a nonconditional
6406 jump in PREV_BB that leads to the next basic block NEXT_BB.
6407 If it is so - delete this jump and clear data sets of its
6408 basic block if it becomes empty. */
6409 if (next_bb->prev_bb == prev_bb
6410 && prev_bb != ENTRY_BLOCK_PTR
6411 && bb_has_removable_jump_to_p (prev_bb, next_bb))
6413 redirect_edge_and_branch (EDGE_SUCC (prev_bb, 0), next_bb);
6414 if (BB_END (prev_bb) == bb_note (prev_bb))
6415 free_data_sets (prev_bb);
6418 set_immediate_dominator (CDI_DOMINATORS, next_bb,
6419 recompute_dominator (CDI_DOMINATORS,
6420 next_bb));
6423 vec_free (preheader_blocks);
6425 else
6426 /* Store preheader within the father's loop structure. */
6427 SET_LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest),
6428 preheader_blocks);
6430 #endif