1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate
;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param
= 0;
166 int sched_verbose
= 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump
= 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid
;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param
, const char *val
)
181 if (!strcmp (param
, "verbose"))
182 sched_verbose_param
= atoi (val
);
184 warning (0, "fix_sched_param: unknown param: %s", param
);
187 struct haifa_insn_data
*h_i_d
;
189 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
190 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
192 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
193 then it should be recalculated from scratch. */
194 #define INVALID_TICK (-(max_insn_queue_index + 1))
195 /* The minimal value of the INSN_TICK of an instruction. */
196 #define MIN_TICK (-max_insn_queue_index)
198 /* Issue points are used to distinguish between instructions in max_issue ().
199 For now, all instructions are equally good. */
200 #define ISSUE_POINTS(INSN) 1
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list
;
206 static struct spec_info_def spec_info_var
;
207 /* Description of the speculative part of the scheduling.
208 If NULL - no speculation. */
209 static spec_info_t spec_info
;
211 /* True, if recovery block was added during scheduling of current block.
212 Used to determine, if we need to fix INSN_TICKs. */
213 static bool added_recovery_block_p
;
215 /* Counters of different types of speculative instructions. */
216 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
218 /* Pointers to GLAT data. See init_glat for more information. */
219 regset
*glat_start
, *glat_end
;
221 /* Array used in {unlink, restore}_bb_notes. */
222 static rtx
*bb_header
= 0;
224 /* Number of basic_blocks. */
225 static int old_last_basic_block
;
227 /* Basic block after which recovery blocks will be created. */
228 static basic_block before_recovery
;
232 /* An instruction is ready to be scheduled when all insns preceding it
233 have already been scheduled. It is important to ensure that all
234 insns which use its result will not be executed until its result
235 has been computed. An insn is maintained in one of four structures:
237 (P) the "Pending" set of insns which cannot be scheduled until
238 their dependencies have been satisfied.
239 (Q) the "Queued" set of insns that can be scheduled when sufficient
241 (R) the "Ready" list of unscheduled, uncommitted insns.
242 (S) the "Scheduled" list of insns.
244 Initially, all insns are either "Pending" or "Ready" depending on
245 whether their dependencies are satisfied.
247 Insns move from the "Ready" list to the "Scheduled" list as they
248 are committed to the schedule. As this occurs, the insns in the
249 "Pending" list have their dependencies satisfied and move to either
250 the "Ready" list or the "Queued" set depending on whether
251 sufficient time has passed to make them ready. As time passes,
252 insns move from the "Queued" set to the "Ready" list.
254 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
255 insns, i.e., those that are ready, queued, and pending.
256 The "Queued" set (Q) is implemented by the variable `insn_queue'.
257 The "Ready" list (R) is implemented by the variables `ready' and
259 The "Scheduled" list (S) is the new insn chain built by this pass.
261 The transition (R->S) is implemented in the scheduling loop in
262 `schedule_block' when the best insn to schedule is chosen.
263 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
264 insns move from the ready list to the scheduled list.
265 The transition (Q->R) is implemented in 'queue_to_insn' as time
266 passes or stalls are introduced. */
268 /* Implement a circular buffer to delay instructions until sufficient
269 time has passed. For the new pipeline description interface,
270 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
271 than maximal time of instruction execution computed by genattr.c on
272 the base maximal time of functional unit reservations and getting a
273 result. This is the longest time an insn may be queued. */
275 static rtx
*insn_queue
;
276 static int q_ptr
= 0;
277 static int q_size
= 0;
278 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
279 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
281 #define QUEUE_SCHEDULED (-3)
282 #define QUEUE_NOWHERE (-2)
283 #define QUEUE_READY (-1)
284 /* QUEUE_SCHEDULED - INSN is scheduled.
285 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
287 QUEUE_READY - INSN is in ready list.
288 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
290 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
292 /* The following variable value refers for all current and future
293 reservations of the processor units. */
296 /* The following variable value is size of memory representing all
297 current and future reservations of the processor units. */
298 static size_t dfa_state_size
;
300 /* The following array is used to find the best insn from ready when
301 the automaton pipeline interface is used. */
302 static char *ready_try
;
304 /* Describe the ready list of the scheduler.
305 VEC holds space enough for all insns in the current region. VECLEN
306 says how many exactly.
307 FIRST is the index of the element with the highest priority; i.e. the
308 last one in the ready list, since elements are ordered by ascending
310 N_READY determines how many insns are on the ready list. */
320 /* The pointer to the ready list. */
321 static struct ready_list
*readyp
;
323 /* Scheduling clock. */
324 static int clock_var
;
326 /* Number of instructions in current scheduling region. */
327 static int rgn_n_insns
;
329 static int may_trap_exp (rtx
, int);
331 /* Nonzero iff the address is comprised from at most 1 register. */
332 #define CONST_BASED_ADDRESS_P(x) \
334 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
335 || (GET_CODE (x) == LO_SUM)) \
336 && (CONSTANT_P (XEXP (x, 0)) \
337 || CONSTANT_P (XEXP (x, 1)))))
339 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
340 as found by analyzing insn's expression. */
343 may_trap_exp (rtx x
, int is_store
)
352 if (code
== MEM
&& may_trap_p (x
))
359 /* The insn uses memory: a volatile load. */
360 if (MEM_VOLATILE_P (x
))
362 /* An exception-free load. */
365 /* A load with 1 base register, to be further checked. */
366 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
367 return PFREE_CANDIDATE
;
368 /* No info on the load, to be further checked. */
369 return PRISKY_CANDIDATE
;
374 int i
, insn_class
= TRAP_FREE
;
376 /* Neither store nor load, check if it may cause a trap. */
379 /* Recursive step: walk the insn... */
380 fmt
= GET_RTX_FORMAT (code
);
381 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
385 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
386 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
388 else if (fmt
[i
] == 'E')
391 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
393 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
394 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
395 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
399 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
406 /* Classifies insn for the purpose of verifying that it can be
407 moved speculatively, by examining it's patterns, returning:
408 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
409 TRAP_FREE: non-load insn.
410 IFREE: load from a globally safe location.
411 IRISKY: volatile load.
412 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
413 being either PFREE or PRISKY. */
416 haifa_classify_insn (rtx insn
)
418 rtx pat
= PATTERN (insn
);
419 int tmp_class
= TRAP_FREE
;
420 int insn_class
= TRAP_FREE
;
423 if (GET_CODE (pat
) == PARALLEL
)
425 int i
, len
= XVECLEN (pat
, 0);
427 for (i
= len
- 1; i
>= 0; i
--)
429 code
= GET_CODE (XVECEXP (pat
, 0, i
));
433 /* Test if it is a 'store'. */
434 tmp_class
= may_trap_exp (XEXP (XVECEXP (pat
, 0, i
), 0), 1);
437 /* Test if it is a store. */
438 tmp_class
= may_trap_exp (SET_DEST (XVECEXP (pat
, 0, i
)), 1);
439 if (tmp_class
== TRAP_RISKY
)
441 /* Test if it is a load. */
443 = WORST_CLASS (tmp_class
,
444 may_trap_exp (SET_SRC (XVECEXP (pat
, 0, i
)),
449 tmp_class
= TRAP_RISKY
;
454 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
455 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
461 code
= GET_CODE (pat
);
465 /* Test if it is a 'store'. */
466 tmp_class
= may_trap_exp (XEXP (pat
, 0), 1);
469 /* Test if it is a store. */
470 tmp_class
= may_trap_exp (SET_DEST (pat
), 1);
471 if (tmp_class
== TRAP_RISKY
)
473 /* Test if it is a load. */
475 WORST_CLASS (tmp_class
,
476 may_trap_exp (SET_SRC (pat
), 0));
480 tmp_class
= TRAP_RISKY
;
484 insn_class
= tmp_class
;
490 /* Forward declarations. */
492 HAIFA_INLINE
static int insn_cost1 (rtx
, enum reg_note
, rtx
, rtx
);
493 static int priority (rtx
);
494 static int rank_for_schedule (const void *, const void *);
495 static void swap_sort (rtx
*, int);
496 static void queue_insn (rtx
, int);
497 static int schedule_insn (rtx
);
498 static int find_set_reg_weight (rtx
);
499 static void find_insn_reg_weight (basic_block
);
500 static void find_insn_reg_weight1 (rtx
);
501 static void adjust_priority (rtx
);
502 static void advance_one_cycle (void);
504 /* Notes handling mechanism:
505 =========================
506 Generally, NOTES are saved before scheduling and restored after scheduling.
507 The scheduler distinguishes between two types of notes:
509 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
510 Before scheduling a region, a pointer to the note is added to the insn
511 that follows or precedes it. (This happens as part of the data dependence
512 computation). After scheduling an insn, the pointer contained in it is
513 used for regenerating the corresponding note (in reemit_notes).
515 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
516 these notes are put in a list (in rm_other_notes() and
517 unlink_other_notes ()). After scheduling the block, these notes are
518 inserted at the beginning of the block (in schedule_block()). */
520 static rtx
unlink_other_notes (rtx
, rtx
);
521 static void reemit_notes (rtx
);
523 static rtx
*ready_lastpos (struct ready_list
*);
524 static void ready_add (struct ready_list
*, rtx
, bool);
525 static void ready_sort (struct ready_list
*);
526 static rtx
ready_remove_first (struct ready_list
*);
528 static void queue_to_ready (struct ready_list
*);
529 static int early_queue_to_ready (state_t
, struct ready_list
*);
531 static void debug_ready_list (struct ready_list
*);
533 static void move_insn (rtx
);
535 /* The following functions are used to implement multi-pass scheduling
536 on the first cycle. */
537 static rtx
ready_element (struct ready_list
*, int);
538 static rtx
ready_remove (struct ready_list
*, int);
539 static void ready_remove_insn (rtx
);
540 static int max_issue (struct ready_list
*, int *, int);
542 static rtx
choose_ready (struct ready_list
*);
544 static void fix_inter_tick (rtx
, rtx
);
545 static int fix_tick_ready (rtx
);
546 static void change_queue_index (rtx
, int);
547 static void resolve_dep (rtx
, rtx
);
549 /* The following functions are used to implement scheduling of data/control
550 speculative instructions. */
552 static void extend_h_i_d (void);
553 static void extend_ready (int);
554 static void extend_global (rtx
);
555 static void extend_all (rtx
);
556 static void init_h_i_d (rtx
);
557 static void generate_recovery_code (rtx
);
558 static void process_insn_depend_be_in_spec (rtx
, rtx
, ds_t
);
559 static void begin_speculative_block (rtx
);
560 static void add_to_speculative_block (rtx
);
561 static dw_t
dep_weak (ds_t
);
562 static edge
find_fallthru_edge (basic_block
);
563 static void init_before_recovery (void);
564 static basic_block
create_recovery_block (void);
565 static void create_check_block_twin (rtx
, bool);
566 static void fix_recovery_deps (basic_block
);
567 static void change_pattern (rtx
, rtx
);
568 static int speculate_insn (rtx
, ds_t
, rtx
*);
569 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
570 static void restore_bb_notes (basic_block
);
571 static void extend_bb (void);
572 static void fix_jump_move (rtx
);
573 static void move_block_after_check (rtx
);
574 static void move_succs (VEC(edge
,gc
) **, basic_block
);
575 static void init_glat (void);
576 static void init_glat1 (basic_block
);
577 static void attach_life_info1 (basic_block
);
578 static void free_glat (void);
579 static void sched_remove_insn (rtx
);
580 static void clear_priorities (rtx
);
581 static void add_jump_dependencies (rtx
, rtx
);
582 static void calc_priorities (rtx
);
583 #ifdef ENABLE_CHECKING
584 static int has_edge_p (VEC(edge
,gc
) *, int);
585 static void check_cfg (rtx
, rtx
);
586 static void check_sched_flags (void);
589 #endif /* INSN_SCHEDULING */
591 /* Point to state used for the current scheduling pass. */
592 struct sched_info
*current_sched_info
;
594 #ifndef INSN_SCHEDULING
596 schedule_insns (void)
601 /* Working copy of frontend's sched_info variable. */
602 static struct sched_info current_sched_info_var
;
604 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
605 so that insns independent of the last scheduled insn will be preferred
606 over dependent instructions. */
608 static rtx last_scheduled_insn
;
610 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
611 This is the number of cycles between instruction issue and
612 instruction results. */
615 insn_cost (rtx insn
, rtx link
, rtx used
)
617 return insn_cost1 (insn
, used
? REG_NOTE_KIND (link
) : REG_NOTE_MAX
,
621 /* Compute cost of executing INSN given the dependence on the insn USED.
622 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
623 Otherwise, dependence between INSN and USED is assumed to be of type
624 DEP_TYPE. This function was introduced as a workaround for
625 targetm.adjust_cost hook.
626 This is the number of cycles between instruction issue and
627 instruction results. */
629 HAIFA_INLINE
static int
630 insn_cost1 (rtx insn
, enum reg_note dep_type
, rtx link
, rtx used
)
632 int cost
= INSN_COST (insn
);
636 /* A USE insn, or something else we don't need to
637 understand. We can't pass these directly to
638 result_ready_cost or insn_default_latency because it will
639 trigger a fatal error for unrecognizable insns. */
640 if (recog_memoized (insn
) < 0)
642 INSN_COST (insn
) = 0;
647 cost
= insn_default_latency (insn
);
651 INSN_COST (insn
) = cost
;
655 /* In this case estimate cost without caring how insn is used. */
659 /* A USE insn should never require the value used to be computed.
660 This allows the computation of a function's result and parameter
661 values to overlap the return and call. */
662 if (recog_memoized (used
) < 0)
666 gcc_assert (!link
|| dep_type
== REG_NOTE_KIND (link
));
668 if (INSN_CODE (insn
) >= 0)
670 if (dep_type
== REG_DEP_ANTI
)
672 else if (dep_type
== REG_DEP_OUTPUT
)
674 cost
= (insn_default_latency (insn
)
675 - insn_default_latency (used
));
679 else if (bypass_p (insn
))
680 cost
= insn_latency (insn
, used
);
683 if (targetm
.sched
.adjust_cost_2
)
684 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
);
688 if (targetm
.sched
.adjust_cost
)
689 cost
= targetm
.sched
.adjust_cost (used
, link
, insn
, cost
);
699 /* Compute the priority number for INSN. */
709 if (! INSN_PRIORITY_KNOWN (insn
))
711 int this_priority
= 0;
713 if (INSN_DEPEND (insn
) == 0)
714 this_priority
= insn_cost (insn
, 0, 0);
717 rtx prev_first
, twin
;
720 /* For recovery check instructions we calculate priority slightly
721 different than that of normal instructions. Instead of walking
722 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
723 of each instruction in the corresponding recovery block. */
725 rec
= RECOVERY_BLOCK (insn
);
726 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
728 prev_first
= PREV_INSN (insn
);
733 prev_first
= NEXT_INSN (BB_HEAD (rec
));
734 twin
= PREV_INSN (BB_END (rec
));
739 for (link
= INSN_DEPEND (twin
); link
; link
= XEXP (link
, 1))
744 next
= XEXP (link
, 0);
746 if (BLOCK_FOR_INSN (next
) != rec
)
748 /* Critical path is meaningful in block boundaries
750 if (! (*current_sched_info
->contributes_to_priority
)
752 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
753 then speculative instructions will less likely be
754 scheduled. That is because the priority of
755 their producers will increase, and, thus, the
756 producers will more likely be scheduled, thus,
757 resolving the dependence. */
758 || ((current_sched_info
->flags
& DO_SPECULATION
)
759 && (DEP_STATUS (link
) & SPECULATIVE
)
760 && !(spec_info
->flags
761 & COUNT_SPEC_IN_CRITICAL_PATH
)))
764 next_priority
= insn_cost1 (insn
,
766 REG_NOTE_KIND (link
) :
768 twin
== insn
? link
: 0,
769 next
) + priority (next
);
771 if (next_priority
> this_priority
)
772 this_priority
= next_priority
;
776 twin
= PREV_INSN (twin
);
778 while (twin
!= prev_first
);
780 INSN_PRIORITY (insn
) = this_priority
;
781 INSN_PRIORITY_KNOWN (insn
) = 1;
784 return INSN_PRIORITY (insn
);
787 /* Macros and functions for keeping the priority queue sorted, and
788 dealing with queuing and dequeuing of instructions. */
790 #define SCHED_SORT(READY, N_READY) \
791 do { if ((N_READY) == 2) \
792 swap_sort (READY, N_READY); \
793 else if ((N_READY) > 2) \
794 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
797 /* Returns a positive value if x is preferred; returns a negative value if
798 y is preferred. Should never return 0, since that will make the sort
802 rank_for_schedule (const void *x
, const void *y
)
804 rtx tmp
= *(const rtx
*) y
;
805 rtx tmp2
= *(const rtx
*) x
;
807 int tmp_class
, tmp2_class
, depend_count1
, depend_count2
;
808 int val
, priority_val
, weight_val
, info_val
;
810 /* The insn in a schedule group should be issued the first. */
811 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
812 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
814 /* Prefer insn with higher priority. */
815 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
820 /* Prefer speculative insn with greater dependencies weakness. */
827 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
829 dw1
= dep_weak (ds1
);
833 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
835 dw2
= dep_weak (ds2
);
840 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
844 /* Prefer an insn with smaller contribution to registers-pressure. */
845 if (!reload_completed
&&
846 (weight_val
= INSN_REG_WEIGHT (tmp
) - INSN_REG_WEIGHT (tmp2
)))
849 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
853 /* Compare insns based on their relation to the last-scheduled-insn. */
854 if (INSN_P (last_scheduled_insn
))
856 /* Classify the instructions into three classes:
857 1) Data dependent on last schedule insn.
858 2) Anti/Output dependent on last scheduled insn.
859 3) Independent of last scheduled insn, or has latency of one.
860 Choose the insn from the highest numbered class if different. */
861 link
= find_insn_list (tmp
, INSN_DEPEND (last_scheduled_insn
));
862 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp
) == 1)
864 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
869 link
= find_insn_list (tmp2
, INSN_DEPEND (last_scheduled_insn
));
870 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp2
) == 1)
872 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
877 if ((val
= tmp2_class
- tmp_class
))
881 /* Prefer the insn which has more later insns that depend on it.
882 This gives the scheduler more freedom when scheduling later
883 instructions at the expense of added register pressure. */
885 for (link
= INSN_DEPEND (tmp
); link
; link
= XEXP (link
, 1))
889 for (link
= INSN_DEPEND (tmp2
); link
; link
= XEXP (link
, 1))
892 val
= depend_count2
- depend_count1
;
896 /* If insns are equally good, sort by INSN_LUID (original insn order),
897 so that we make the sort stable. This minimizes instruction movement,
898 thus minimizing sched's effect on debugging and cross-jumping. */
899 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
902 /* Resort the array A in which only element at index N may be out of order. */
904 HAIFA_INLINE
static void
905 swap_sort (rtx
*a
, int n
)
910 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
918 /* Add INSN to the insn queue so that it can be executed at least
919 N_CYCLES after the currently executing insn. Preserve insns
920 chain for debugging purposes. */
922 HAIFA_INLINE
static void
923 queue_insn (rtx insn
, int n_cycles
)
925 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
926 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
928 gcc_assert (n_cycles
<= max_insn_queue_index
);
930 insn_queue
[next_q
] = link
;
933 if (sched_verbose
>= 2)
935 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
936 (*current_sched_info
->print_insn
) (insn
, 0));
938 fprintf (sched_dump
, "queued for %d cycles.\n", n_cycles
);
941 QUEUE_INDEX (insn
) = next_q
;
944 /* Remove INSN from queue. */
946 queue_remove (rtx insn
)
948 gcc_assert (QUEUE_INDEX (insn
) >= 0);
949 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
951 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
954 /* Return a pointer to the bottom of the ready list, i.e. the insn
955 with the lowest priority. */
957 HAIFA_INLINE
static rtx
*
958 ready_lastpos (struct ready_list
*ready
)
960 gcc_assert (ready
->n_ready
>= 1);
961 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
964 /* Add an element INSN to the ready list so that it ends up with the
965 lowest/highest priority depending on FIRST_P. */
967 HAIFA_INLINE
static void
968 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
972 if (ready
->first
== ready
->n_ready
)
974 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
975 ready_lastpos (ready
),
976 ready
->n_ready
* sizeof (rtx
));
977 ready
->first
= ready
->veclen
- 1;
979 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
983 if (ready
->first
== ready
->veclen
- 1)
986 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
987 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
988 ready_lastpos (ready
),
989 ready
->n_ready
* sizeof (rtx
));
990 ready
->first
= ready
->veclen
- 2;
992 ready
->vec
[++(ready
->first
)] = insn
;
997 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
998 QUEUE_INDEX (insn
) = QUEUE_READY
;
1001 /* Remove the element with the highest priority from the ready list and
1004 HAIFA_INLINE
static rtx
1005 ready_remove_first (struct ready_list
*ready
)
1009 gcc_assert (ready
->n_ready
);
1010 t
= ready
->vec
[ready
->first
--];
1012 /* If the queue becomes empty, reset it. */
1013 if (ready
->n_ready
== 0)
1014 ready
->first
= ready
->veclen
- 1;
1016 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
1017 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1022 /* The following code implements multi-pass scheduling for the first
1023 cycle. In other words, we will try to choose ready insn which
1024 permits to start maximum number of insns on the same cycle. */
1026 /* Return a pointer to the element INDEX from the ready. INDEX for
1027 insn with the highest priority is 0, and the lowest priority has
1030 HAIFA_INLINE
static rtx
1031 ready_element (struct ready_list
*ready
, int index
)
1033 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1035 return ready
->vec
[ready
->first
- index
];
1038 /* Remove the element INDEX from the ready list and return it. INDEX
1039 for insn with the highest priority is 0, and the lowest priority
1042 HAIFA_INLINE
static rtx
1043 ready_remove (struct ready_list
*ready
, int index
)
1049 return ready_remove_first (ready
);
1050 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1051 t
= ready
->vec
[ready
->first
- index
];
1053 for (i
= index
; i
< ready
->n_ready
; i
++)
1054 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
1055 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1059 /* Remove INSN from the ready list. */
1061 ready_remove_insn (rtx insn
)
1065 for (i
= 0; i
< readyp
->n_ready
; i
++)
1066 if (ready_element (readyp
, i
) == insn
)
1068 ready_remove (readyp
, i
);
1074 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1077 HAIFA_INLINE
static void
1078 ready_sort (struct ready_list
*ready
)
1080 rtx
*first
= ready_lastpos (ready
);
1081 SCHED_SORT (first
, ready
->n_ready
);
1084 /* PREV is an insn that is ready to execute. Adjust its priority if that
1085 will help shorten or lengthen register lifetimes as appropriate. Also
1086 provide a hook for the target to tweek itself. */
1088 HAIFA_INLINE
static void
1089 adjust_priority (rtx prev
)
1091 /* ??? There used to be code here to try and estimate how an insn
1092 affected register lifetimes, but it did it by looking at REG_DEAD
1093 notes, which we removed in schedule_region. Nor did it try to
1094 take into account register pressure or anything useful like that.
1096 Revisit when we have a machine model to work with and not before. */
1098 if (targetm
.sched
.adjust_priority
)
1099 INSN_PRIORITY (prev
) =
1100 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
1103 /* Advance time on one cycle. */
1104 HAIFA_INLINE
static void
1105 advance_one_cycle (void)
1107 if (targetm
.sched
.dfa_pre_cycle_insn
)
1108 state_transition (curr_state
,
1109 targetm
.sched
.dfa_pre_cycle_insn ());
1111 state_transition (curr_state
, NULL
);
1113 if (targetm
.sched
.dfa_post_cycle_insn
)
1114 state_transition (curr_state
,
1115 targetm
.sched
.dfa_post_cycle_insn ());
1118 /* Clock at which the previous instruction was issued. */
1119 static int last_clock_var
;
1121 /* INSN is the "currently executing insn". Launch each insn which was
1122 waiting on INSN. READY is the ready list which contains the insns
1123 that are ready to fire. CLOCK is the current cycle. The function
1124 returns necessary cycle advance after issuing the insn (it is not
1125 zero for insns in a schedule group). */
1128 schedule_insn (rtx insn
)
1133 if (sched_verbose
>= 1)
1137 print_insn (buf
, insn
, 0);
1139 fprintf (sched_dump
, ";;\t%3i--> %-40s:", clock_var
, buf
);
1141 if (recog_memoized (insn
) < 0)
1142 fprintf (sched_dump
, "nothing");
1144 print_reservation (sched_dump
, insn
);
1145 fputc ('\n', sched_dump
);
1148 /* Scheduling instruction should have all its dependencies resolved and
1149 should have been removed from the ready list. */
1150 gcc_assert (INSN_DEP_COUNT (insn
) == 0);
1151 gcc_assert (!LOG_LINKS (insn
));
1152 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
1154 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
1156 /* Now we can free RESOLVED_DEPS list. */
1157 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1158 free_DEPS_LIST_list (&RESOLVED_DEPS (insn
));
1160 free_INSN_LIST_list (&RESOLVED_DEPS (insn
));
1162 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
1163 if (INSN_TICK (insn
) > clock_var
)
1164 /* INSN has been prematurely moved from the queue to the ready list.
1165 This is possible only if following flag is set. */
1166 gcc_assert (flag_sched_stalled_insns
);
1168 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1169 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1170 INSN_TICK (insn
) = clock_var
;
1172 /* Update dependent instructions. */
1173 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
1175 rtx next
= XEXP (link
, 0);
1177 resolve_dep (next
, insn
);
1179 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1183 effective_cost
= try_ready (next
);
1185 if (effective_cost
>= 0
1186 && SCHED_GROUP_P (next
)
1187 && advance
< effective_cost
)
1188 advance
= effective_cost
;
1191 /* Check always has only one forward dependence (to the first insn in
1192 the recovery block), therefore, this will be executed only once. */
1194 gcc_assert (XEXP (link
, 1) == 0);
1195 fix_recovery_deps (RECOVERY_BLOCK (insn
));
1199 /* Annotate the instruction with issue information -- TImode
1200 indicates that the instruction is expected not to be able
1201 to issue on the same cycle as the previous insn. A machine
1202 may use this information to decide how the instruction should
1205 && GET_CODE (PATTERN (insn
)) != USE
1206 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1208 if (reload_completed
)
1209 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
1210 last_clock_var
= clock_var
;
1216 /* Functions for handling of notes. */
1218 /* Delete notes beginning with INSN and put them in the chain
1219 of notes ended by NOTE_LIST.
1220 Returns the insn following the notes. */
1223 unlink_other_notes (rtx insn
, rtx tail
)
1225 rtx prev
= PREV_INSN (insn
);
1227 while (insn
!= tail
&& NOTE_NOT_BB_P (insn
))
1229 rtx next
= NEXT_INSN (insn
);
1230 basic_block bb
= BLOCK_FOR_INSN (insn
);
1232 /* Delete the note from its current position. */
1234 NEXT_INSN (prev
) = next
;
1236 PREV_INSN (next
) = prev
;
1240 /* Basic block can begin with either LABEL or
1241 NOTE_INSN_BASIC_BLOCK. */
1242 gcc_assert (BB_HEAD (bb
) != insn
);
1244 /* Check if we are removing last insn in the BB. */
1245 if (BB_END (bb
) == insn
)
1249 /* See sched_analyze to see how these are handled. */
1250 if (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1251 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
)
1253 /* Insert the note at the end of the notes list. */
1254 PREV_INSN (insn
) = note_list
;
1256 NEXT_INSN (note_list
) = insn
;
1265 /* Return the head and tail pointers of ebb starting at BEG and ending
1269 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
1271 rtx beg_head
= BB_HEAD (beg
);
1272 rtx beg_tail
= BB_END (beg
);
1273 rtx end_head
= BB_HEAD (end
);
1274 rtx end_tail
= BB_END (end
);
1276 /* Don't include any notes or labels at the beginning of the BEG
1277 basic block, or notes at the end of the END basic blocks. */
1279 if (LABEL_P (beg_head
))
1280 beg_head
= NEXT_INSN (beg_head
);
1282 while (beg_head
!= beg_tail
)
1283 if (NOTE_P (beg_head
))
1284 beg_head
= NEXT_INSN (beg_head
);
1291 end_head
= beg_head
;
1292 else if (LABEL_P (end_head
))
1293 end_head
= NEXT_INSN (end_head
);
1295 while (end_head
!= end_tail
)
1296 if (NOTE_P (end_tail
))
1297 end_tail
= PREV_INSN (end_tail
);
1304 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1307 no_real_insns_p (rtx head
, rtx tail
)
1309 while (head
!= NEXT_INSN (tail
))
1311 if (!NOTE_P (head
) && !LABEL_P (head
))
1313 head
= NEXT_INSN (head
);
1318 /* Delete notes between HEAD and TAIL and put them in the chain
1319 of notes ended by NOTE_LIST. */
1322 rm_other_notes (rtx head
, rtx tail
)
1328 if (head
== tail
&& (! INSN_P (head
)))
1331 next_tail
= NEXT_INSN (tail
);
1332 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1336 /* Farm out notes, and maybe save them in NOTE_LIST.
1337 This is needed to keep the debugger from
1338 getting completely deranged. */
1339 if (NOTE_NOT_BB_P (insn
))
1343 insn
= unlink_other_notes (insn
, next_tail
);
1345 gcc_assert (prev
!= tail
&& prev
!= head
&& insn
!= next_tail
);
1350 /* Functions for computation of registers live/usage info. */
1352 /* This function looks for a new register being defined.
1353 If the destination register is already used by the source,
1354 a new register is not needed. */
1357 find_set_reg_weight (rtx x
)
1359 if (GET_CODE (x
) == CLOBBER
1360 && register_operand (SET_DEST (x
), VOIDmode
))
1362 if (GET_CODE (x
) == SET
1363 && register_operand (SET_DEST (x
), VOIDmode
))
1365 if (REG_P (SET_DEST (x
)))
1367 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
1377 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1380 find_insn_reg_weight (basic_block bb
)
1382 rtx insn
, next_tail
, head
, tail
;
1384 get_ebb_head_tail (bb
, bb
, &head
, &tail
);
1385 next_tail
= NEXT_INSN (tail
);
1387 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1388 find_insn_reg_weight1 (insn
);
1391 /* Calculate INSN_REG_WEIGHT for single instruction.
1392 Separated from find_insn_reg_weight because of need
1393 to initialize new instruction in generate_recovery_code. */
1395 find_insn_reg_weight1 (rtx insn
)
1400 /* Handle register life information. */
1401 if (! INSN_P (insn
))
1404 /* Increment weight for each register born here. */
1406 reg_weight
+= find_set_reg_weight (x
);
1407 if (GET_CODE (x
) == PARALLEL
)
1410 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
1412 x
= XVECEXP (PATTERN (insn
), 0, j
);
1413 reg_weight
+= find_set_reg_weight (x
);
1416 /* Decrement weight for each register that dies here. */
1417 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
1419 if (REG_NOTE_KIND (x
) == REG_DEAD
1420 || REG_NOTE_KIND (x
) == REG_UNUSED
)
1424 INSN_REG_WEIGHT (insn
) = reg_weight
;
1427 /* Move insns that became ready to fire from queue to ready list. */
1430 queue_to_ready (struct ready_list
*ready
)
1435 q_ptr
= NEXT_Q (q_ptr
);
1437 /* Add all pending insns that can be scheduled without stalls to the
1439 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
1441 insn
= XEXP (link
, 0);
1444 if (sched_verbose
>= 2)
1445 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1446 (*current_sched_info
->print_insn
) (insn
, 0));
1448 /* If the ready list is full, delay the insn for 1 cycle.
1449 See the comment in schedule_block for the rationale. */
1450 if (!reload_completed
1451 && ready
->n_ready
> MAX_SCHED_READY_INSNS
1452 && !SCHED_GROUP_P (insn
))
1454 if (sched_verbose
>= 2)
1455 fprintf (sched_dump
, "requeued because ready full\n");
1456 queue_insn (insn
, 1);
1460 ready_add (ready
, insn
, false);
1461 if (sched_verbose
>= 2)
1462 fprintf (sched_dump
, "moving to ready without stalls\n");
1465 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
1467 /* If there are no ready insns, stall until one is ready and add all
1468 of the pending insns at that point to the ready list. */
1469 if (ready
->n_ready
== 0)
1473 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
1475 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1477 for (; link
; link
= XEXP (link
, 1))
1479 insn
= XEXP (link
, 0);
1482 if (sched_verbose
>= 2)
1483 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1484 (*current_sched_info
->print_insn
) (insn
, 0));
1486 ready_add (ready
, insn
, false);
1487 if (sched_verbose
>= 2)
1488 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
1490 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
1492 advance_one_cycle ();
1497 advance_one_cycle ();
1500 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
1501 clock_var
+= stalls
;
1505 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1506 prematurely move INSN from the queue to the ready list. Currently,
1507 if a target defines the hook 'is_costly_dependence', this function
1508 uses the hook to check whether there exist any dependences which are
1509 considered costly by the target, between INSN and other insns that
1510 have already been scheduled. Dependences are checked up to Y cycles
1511 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1512 controlling this value.
1513 (Other considerations could be taken into account instead (or in
1514 addition) depending on user flags and target hooks. */
1517 ok_for_early_queue_removal (rtx insn
)
1520 rtx prev_insn
= last_scheduled_insn
;
1522 if (targetm
.sched
.is_costly_dependence
)
1524 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
1526 for ( ; prev_insn
; prev_insn
= PREV_INSN (prev_insn
))
1531 if (!NOTE_P (prev_insn
))
1533 dep_link
= find_insn_list (insn
, INSN_DEPEND (prev_insn
));
1536 dep_cost
= insn_cost (prev_insn
, dep_link
, insn
) ;
1537 if (targetm
.sched
.is_costly_dependence (prev_insn
, insn
,
1539 flag_sched_stalled_insns_dep
- n_cycles
))
1544 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
1550 prev_insn
= PREV_INSN (prev_insn
);
1558 /* Remove insns from the queue, before they become "ready" with respect
1559 to FU latency considerations. */
1562 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
1570 state_t temp_state
= alloca (dfa_state_size
);
1572 int insns_removed
= 0;
1575 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1578 X == 0: There is no limit on how many queued insns can be removed
1579 prematurely. (flag_sched_stalled_insns = -1).
1581 X >= 1: Only X queued insns can be removed prematurely in each
1582 invocation. (flag_sched_stalled_insns = X).
1584 Otherwise: Early queue removal is disabled.
1585 (flag_sched_stalled_insns = 0)
1588 if (! flag_sched_stalled_insns
)
1591 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
1593 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1595 if (sched_verbose
> 6)
1596 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
1601 next_link
= XEXP (link
, 1);
1602 insn
= XEXP (link
, 0);
1603 if (insn
&& sched_verbose
> 6)
1604 print_rtl_single (sched_dump
, insn
);
1606 memcpy (temp_state
, state
, dfa_state_size
);
1607 if (recog_memoized (insn
) < 0)
1608 /* non-negative to indicate that it's not ready
1609 to avoid infinite Q->R->Q->R... */
1612 cost
= state_transition (temp_state
, insn
);
1614 if (sched_verbose
>= 6)
1615 fprintf (sched_dump
, "transition cost = %d\n", cost
);
1617 move_to_ready
= false;
1620 move_to_ready
= ok_for_early_queue_removal (insn
);
1621 if (move_to_ready
== true)
1623 /* move from Q to R */
1625 ready_add (ready
, insn
, false);
1628 XEXP (prev_link
, 1) = next_link
;
1630 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
1632 free_INSN_LIST_node (link
);
1634 if (sched_verbose
>= 2)
1635 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
1636 (*current_sched_info
->print_insn
) (insn
, 0));
1639 if (insns_removed
== flag_sched_stalled_insns
)
1640 /* Remove no more than flag_sched_stalled_insns insns
1641 from Q at a time. */
1642 return insns_removed
;
1646 if (move_to_ready
== false)
1653 } /* for stalls.. */
1655 return insns_removed
;
1659 /* Print the ready list for debugging purposes. Callable from debugger. */
1662 debug_ready_list (struct ready_list
*ready
)
1667 if (ready
->n_ready
== 0)
1669 fprintf (sched_dump
, "\n");
1673 p
= ready_lastpos (ready
);
1674 for (i
= 0; i
< ready
->n_ready
; i
++)
1675 fprintf (sched_dump
, " %s", (*current_sched_info
->print_insn
) (p
[i
], 0));
1676 fprintf (sched_dump
, "\n");
1679 /* Search INSN for REG_SAVE_NOTE note pairs for
1680 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1681 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1682 saved value for NOTE_BLOCK_NUMBER which is useful for
1683 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1686 reemit_notes (rtx insn
)
1688 rtx note
, last
= insn
;
1690 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
1692 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
1694 enum insn_note note_type
= INTVAL (XEXP (note
, 0));
1696 last
= emit_note_before (note_type
, last
);
1697 remove_note (insn
, note
);
1702 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1704 move_insn (rtx insn
)
1706 rtx last
= last_scheduled_insn
;
1708 if (PREV_INSN (insn
) != last
)
1714 bb
= BLOCK_FOR_INSN (insn
);
1716 /* BB_HEAD is either LABEL or NOTE. */
1717 gcc_assert (BB_HEAD (bb
) != insn
);
1719 if (BB_END (bb
) == insn
)
1720 /* If this is last instruction in BB, move end marker one
1723 /* Jumps are always placed at the end of basic block. */
1724 jump_p
= control_flow_insn_p (insn
);
1727 || ((current_sched_info
->flags
& SCHED_RGN
)
1728 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1729 || (current_sched_info
->flags
& SCHED_EBB
));
1731 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
1733 BB_END (bb
) = PREV_INSN (insn
);
1736 gcc_assert (BB_END (bb
) != last
);
1739 /* We move the block note along with jump. */
1741 /* NT is needed for assertion below. */
1742 rtx nt
= current_sched_info
->next_tail
;
1744 note
= NEXT_INSN (insn
);
1745 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
1746 note
= NEXT_INSN (note
);
1750 || BARRIER_P (note
)))
1751 note
= NEXT_INSN (note
);
1753 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
1758 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
1759 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
1761 NEXT_INSN (note
) = NEXT_INSN (last
);
1762 PREV_INSN (NEXT_INSN (last
)) = note
;
1764 NEXT_INSN (last
) = insn
;
1765 PREV_INSN (insn
) = last
;
1767 bb
= BLOCK_FOR_INSN (last
);
1771 fix_jump_move (insn
);
1773 if (BLOCK_FOR_INSN (insn
) != bb
)
1774 move_block_after_check (insn
);
1776 gcc_assert (BB_END (bb
) == last
);
1779 set_block_for_insn (insn
, bb
);
1781 /* Update BB_END, if needed. */
1782 if (BB_END (bb
) == last
)
1786 reemit_notes (insn
);
1788 SCHED_GROUP_P (insn
) = 0;
1791 /* The following structure describe an entry of the stack of choices. */
1794 /* Ordinal number of the issued insn in the ready queue. */
1796 /* The number of the rest insns whose issues we should try. */
1798 /* The number of issued essential insns. */
1800 /* State after issuing the insn. */
1804 /* The following array is used to implement a stack of choices used in
1805 function max_issue. */
1806 static struct choice_entry
*choice_stack
;
1808 /* The following variable value is number of essential insns issued on
1809 the current cycle. An insn is essential one if it changes the
1810 processors state. */
1811 static int cycle_issued_insns
;
1813 /* The following variable value is maximal number of tries of issuing
1814 insns for the first cycle multipass insn scheduling. We define
1815 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1816 need this constraint if all real insns (with non-negative codes)
1817 had reservations because in this case the algorithm complexity is
1818 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1819 might be incomplete and such insn might occur. For such
1820 descriptions, the complexity of algorithm (without the constraint)
1821 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1822 static int max_lookahead_tries
;
1824 /* The following value is value of hook
1825 `first_cycle_multipass_dfa_lookahead' at the last call of
1827 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
1829 /* The following value is value of `issue_rate' at the last call of
1831 static int cached_issue_rate
= 0;
1833 /* The following function returns maximal (or close to maximal) number
1834 of insns which can be issued on the same cycle and one of which
1835 insns is insns with the best rank (the first insn in READY). To
1836 make this function tries different samples of ready insns. READY
1837 is current queue `ready'. Global array READY_TRY reflects what
1838 insns are already issued in this try. MAX_POINTS is the sum of points
1839 of all instructions in READY. The function stops immediately,
1840 if it reached the such a solution, that all instruction can be issued.
1841 INDEX will contain index of the best insn in READY. The following
1842 function is used only for first cycle multipass scheduling. */
1844 max_issue (struct ready_list
*ready
, int *index
, int max_points
)
1846 int n
, i
, all
, n_ready
, best
, delay
, tries_num
, points
= -1;
1847 struct choice_entry
*top
;
1851 memcpy (choice_stack
->state
, curr_state
, dfa_state_size
);
1853 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
1855 n_ready
= ready
->n_ready
;
1856 for (all
= i
= 0; i
< n_ready
; i
++)
1863 if (top
->rest
== 0 || i
>= n_ready
)
1865 if (top
== choice_stack
)
1867 if (best
< top
- choice_stack
&& ready_try
[0])
1869 best
= top
- choice_stack
;
1870 *index
= choice_stack
[1].index
;
1872 if (top
->n
== max_points
|| best
== all
)
1878 memcpy (curr_state
, top
->state
, dfa_state_size
);
1880 else if (!ready_try
[i
])
1883 if (tries_num
> max_lookahead_tries
)
1885 insn
= ready_element (ready
, i
);
1886 delay
= state_transition (curr_state
, insn
);
1889 if (state_dead_lock_p (curr_state
))
1894 if (memcmp (top
->state
, curr_state
, dfa_state_size
) != 0)
1895 n
+= ISSUE_POINTS (insn
);
1897 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
1900 memcpy (top
->state
, curr_state
, dfa_state_size
);
1907 while (top
!= choice_stack
)
1909 ready_try
[top
->index
] = 0;
1912 memcpy (curr_state
, choice_stack
->state
, dfa_state_size
);
1914 if (sched_verbose
>= 4)
1915 fprintf (sched_dump
, ";;\t\tChoosed insn : %s; points: %d/%d\n",
1916 (*current_sched_info
->print_insn
) (ready_element (ready
, *index
),
1918 points
, max_points
);
1923 /* The following function chooses insn from READY and modifies
1924 *N_READY and READY. The following function is used only for first
1925 cycle multipass scheduling. */
1928 choose_ready (struct ready_list
*ready
)
1932 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
1933 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
1934 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0)))
1935 return ready_remove_first (ready
);
1938 /* Try to choose the better insn. */
1939 int index
= 0, i
, n
;
1941 int more_issue
, max_points
, try_data
= 1, try_control
= 1;
1943 if (cached_first_cycle_multipass_dfa_lookahead
!= lookahead
)
1945 cached_first_cycle_multipass_dfa_lookahead
= lookahead
;
1946 max_lookahead_tries
= 100;
1947 for (i
= 0; i
< issue_rate
; i
++)
1948 max_lookahead_tries
*= lookahead
;
1950 insn
= ready_element (ready
, 0);
1951 if (INSN_CODE (insn
) < 0)
1952 return ready_remove_first (ready
);
1955 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
1956 | PREFER_NON_CONTROL_SPEC
))
1958 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
1963 x
= ready_element (ready
, i
);
1966 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
1967 && !(s
& DATA_SPEC
))
1970 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
1975 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
1976 && !(s
& CONTROL_SPEC
))
1979 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
1985 if ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
1986 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
1987 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
1988 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
1990 /* Discard speculative instruction that stands first in the ready
1993 change_queue_index (insn
, 1);
1997 max_points
= ISSUE_POINTS (insn
);
1998 more_issue
= issue_rate
- cycle_issued_insns
- 1;
2000 for (i
= 1; i
< ready
->n_ready
; i
++)
2002 insn
= ready_element (ready
, i
);
2004 = (INSN_CODE (insn
) < 0
2005 || (!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2006 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
2007 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2008 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2011 if (!ready_try
[i
] && more_issue
-- > 0)
2012 max_points
+= ISSUE_POINTS (insn
);
2015 if (max_issue (ready
, &index
, max_points
) == 0)
2016 return ready_remove_first (ready
);
2018 return ready_remove (ready
, index
);
2022 /* Use forward list scheduling to rearrange insns of block pointed to by
2023 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2027 schedule_block (basic_block
*target_bb
, int rgn_n_insns1
)
2029 struct ready_list ready
;
2030 int i
, first_cycle_insn_p
;
2032 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
2033 int sort_p
, advance
, start_clock_var
;
2035 /* Head/tail info for this block. */
2036 rtx prev_head
= current_sched_info
->prev_head
;
2037 rtx next_tail
= current_sched_info
->next_tail
;
2038 rtx head
= NEXT_INSN (prev_head
);
2039 rtx tail
= PREV_INSN (next_tail
);
2041 /* We used to have code to avoid getting parameters moved from hard
2042 argument registers into pseudos.
2044 However, it was removed when it proved to be of marginal benefit
2045 and caused problems because schedule_block and compute_forward_dependences
2046 had different notions of what the "head" insn was. */
2048 gcc_assert (head
!= tail
|| INSN_P (head
));
2050 added_recovery_block_p
= false;
2054 dump_new_block_header (0, *target_bb
, head
, tail
);
2056 state_reset (curr_state
);
2058 /* Allocate the ready list. */
2062 choice_stack
= NULL
;
2065 extend_ready (rgn_n_insns1
+ 1);
2067 ready
.first
= ready
.veclen
- 1;
2070 /* It is used for first cycle multipass scheduling. */
2071 temp_state
= alloca (dfa_state_size
);
2073 if (targetm
.sched
.md_init
)
2074 targetm
.sched
.md_init (sched_dump
, sched_verbose
, ready
.veclen
);
2076 /* We start inserting insns after PREV_HEAD. */
2077 last_scheduled_insn
= prev_head
;
2079 gcc_assert (NOTE_P (last_scheduled_insn
)
2080 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
2082 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2087 insn_queue
= alloca ((max_insn_queue_index
+ 1) * sizeof (rtx
));
2088 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
2090 /* Start just before the beginning of time. */
2093 /* We need queue and ready lists and clock_var be initialized
2094 in try_ready () (which is called through init_ready_list ()). */
2095 (*current_sched_info
->init_ready_list
) ();
2097 /* The algorithm is O(n^2) in the number of ready insns at any given
2098 time in the worst case. Before reload we are more likely to have
2099 big lists so truncate them to a reasonable size. */
2100 if (!reload_completed
&& ready
.n_ready
> MAX_SCHED_READY_INSNS
)
2102 ready_sort (&ready
);
2104 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2105 for (i
= MAX_SCHED_READY_INSNS
; i
< ready
.n_ready
; i
++)
2106 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
2109 if (sched_verbose
>= 2)
2111 fprintf (sched_dump
,
2112 ";;\t\tReady list on entry: %d insns\n", ready
.n_ready
);
2113 fprintf (sched_dump
,
2114 ";;\t\t before reload => truncated to %d insns\n", i
);
2117 /* Delay all insns past it for 1 cycle. */
2118 while (i
< ready
.n_ready
)
2119 queue_insn (ready_remove (&ready
, i
), 1);
2122 /* Now we can restore basic block notes and maintain precise cfg. */
2123 restore_bb_notes (*target_bb
);
2125 last_clock_var
= -1;
2130 /* Loop until all the insns in BB are scheduled. */
2131 while ((*current_sched_info
->schedule_more_p
) ())
2135 start_clock_var
= clock_var
;
2139 advance_one_cycle ();
2141 /* Add to the ready list all pending insns that can be issued now.
2142 If there are no ready insns, increment clock until one
2143 is ready and add all pending insns at that point to the ready
2145 queue_to_ready (&ready
);
2147 gcc_assert (ready
.n_ready
);
2149 if (sched_verbose
>= 2)
2151 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
2152 debug_ready_list (&ready
);
2154 advance
-= clock_var
- start_clock_var
;
2156 while (advance
> 0);
2160 /* Sort the ready list based on priority. */
2161 ready_sort (&ready
);
2163 if (sched_verbose
>= 2)
2165 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
2166 debug_ready_list (&ready
);
2170 /* Allow the target to reorder the list, typically for
2171 better instruction bundling. */
2172 if (sort_p
&& targetm
.sched
.reorder
2173 && (ready
.n_ready
== 0
2174 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2176 targetm
.sched
.reorder (sched_dump
, sched_verbose
,
2177 ready_lastpos (&ready
),
2178 &ready
.n_ready
, clock_var
);
2180 can_issue_more
= issue_rate
;
2182 first_cycle_insn_p
= 1;
2183 cycle_issued_insns
= 0;
2190 if (sched_verbose
>= 2)
2192 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
2194 debug_ready_list (&ready
);
2197 if (ready
.n_ready
== 0
2199 && reload_completed
)
2201 /* Allow scheduling insns directly from the queue in case
2202 there's nothing better to do (ready list is empty) but
2203 there are still vacant dispatch slots in the current cycle. */
2204 if (sched_verbose
>= 6)
2205 fprintf(sched_dump
,";;\t\tSecond chance\n");
2206 memcpy (temp_state
, curr_state
, dfa_state_size
);
2207 if (early_queue_to_ready (temp_state
, &ready
))
2208 ready_sort (&ready
);
2211 if (ready
.n_ready
== 0 || !can_issue_more
2212 || state_dead_lock_p (curr_state
)
2213 || !(*current_sched_info
->schedule_more_p
) ())
2216 /* Select and remove the insn from the ready list. */
2219 insn
= choose_ready (&ready
);
2224 insn
= ready_remove_first (&ready
);
2226 if (targetm
.sched
.dfa_new_cycle
2227 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
2228 insn
, last_clock_var
,
2229 clock_var
, &sort_p
))
2230 /* SORT_P is used by the target to override sorting
2231 of the ready list. This is needed when the target
2232 has modified its internal structures expecting that
2233 the insn will be issued next. As we need the insn
2234 to have the highest priority (so it will be returned by
2235 the ready_remove_first call above), we invoke
2236 ready_add (&ready, insn, true).
2237 But, still, there is one issue: INSN can be later
2238 discarded by scheduler's front end through
2239 current_sched_info->can_schedule_ready_p, hence, won't
2242 ready_add (&ready
, insn
, true);
2247 memcpy (temp_state
, curr_state
, dfa_state_size
);
2248 if (recog_memoized (insn
) < 0)
2250 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
2251 || asm_noperands (PATTERN (insn
)) >= 0);
2252 if (!first_cycle_insn_p
&& asm_p
)
2253 /* This is asm insn which is tryed to be issued on the
2254 cycle not first. Issue it on the next cycle. */
2257 /* A USE insn, or something else we don't need to
2258 understand. We can't pass these directly to
2259 state_transition because it will trigger a
2260 fatal error for unrecognizable insns. */
2265 cost
= state_transition (temp_state
, insn
);
2274 queue_insn (insn
, cost
);
2275 if (SCHED_GROUP_P (insn
))
2284 if (current_sched_info
->can_schedule_ready_p
2285 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
2286 /* We normally get here only if we don't want to move
2287 insn from the split block. */
2289 TODO_SPEC (insn
) = (TODO_SPEC (insn
) & ~SPECULATIVE
) | HARD_DEP
;
2293 /* DECISION is made. */
2295 if (TODO_SPEC (insn
) & SPECULATIVE
)
2296 generate_recovery_code (insn
);
2298 if (control_flow_insn_p (last_scheduled_insn
)
2299 /* This is used to to switch basic blocks by request
2300 from scheduler front-end (actually, sched-ebb.c only).
2301 This is used to process blocks with single fallthru
2302 edge. If succeeding block has jump, it [jump] will try
2303 move at the end of current bb, thus corrupting CFG. */
2304 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
2306 *target_bb
= current_sched_info
->advance_target_bb
2313 x
= next_real_insn (last_scheduled_insn
);
2315 dump_new_block_header (1, *target_bb
, x
, tail
);
2318 last_scheduled_insn
= bb_note (*target_bb
);
2321 /* Update counters, etc in the scheduler's front end. */
2322 (*current_sched_info
->begin_schedule_ready
) (insn
,
2323 last_scheduled_insn
);
2326 last_scheduled_insn
= insn
;
2328 if (memcmp (curr_state
, temp_state
, dfa_state_size
) != 0)
2330 cycle_issued_insns
++;
2331 memcpy (curr_state
, temp_state
, dfa_state_size
);
2334 if (targetm
.sched
.variable_issue
)
2336 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
2337 insn
, can_issue_more
);
2338 /* A naked CLOBBER or USE generates no instruction, so do
2339 not count them against the issue rate. */
2340 else if (GET_CODE (PATTERN (insn
)) != USE
2341 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
2344 advance
= schedule_insn (insn
);
2346 /* After issuing an asm insn we should start a new cycle. */
2347 if (advance
== 0 && asm_p
)
2352 first_cycle_insn_p
= 0;
2354 /* Sort the ready list based on priority. This must be
2355 redone here, as schedule_insn may have readied additional
2356 insns that will not be sorted correctly. */
2357 if (ready
.n_ready
> 0)
2358 ready_sort (&ready
);
2360 if (targetm
.sched
.reorder2
2361 && (ready
.n_ready
== 0
2362 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2365 targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
2367 ? ready_lastpos (&ready
) : NULL
,
2368 &ready
.n_ready
, clock_var
);
2376 fprintf (sched_dump
, ";;\tReady list (final): ");
2377 debug_ready_list (&ready
);
2380 if (current_sched_info
->queue_must_finish_empty
)
2381 /* Sanity check -- queue must be empty now. Meaningless if region has
2383 gcc_assert (!q_size
&& !ready
.n_ready
);
2386 /* We must maintain QUEUE_INDEX between blocks in region. */
2387 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
2391 x
= ready_element (&ready
, i
);
2392 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2393 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2397 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2400 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
2405 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2406 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2408 free_INSN_LIST_list (&insn_queue
[i
]);
2412 if (!current_sched_info
->queue_must_finish_empty
2413 || added_recovery_block_p
)
2415 /* INSN_TICK (minimum clock tick at which the insn becomes
2416 ready) may be not correct for the insn in the subsequent
2417 blocks of the region. We should use a correct value of
2418 `clock_var' or modify INSN_TICK. It is better to keep
2419 clock_var value equal to 0 at the start of a basic block.
2420 Therefore we modify INSN_TICK here. */
2421 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
2424 #ifdef ENABLE_CHECKING
2425 /* After the reload the ia64 backend doesn't maintain BB_END, so
2426 if we want to check anything, better do it now.
2427 And it already clobbered previously scheduled code. */
2428 if (reload_completed
)
2429 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head
)), 0);
2432 if (targetm
.sched
.md_finish
)
2433 targetm
.sched
.md_finish (sched_dump
, sched_verbose
);
2435 /* Update head/tail boundaries. */
2436 head
= NEXT_INSN (prev_head
);
2437 tail
= last_scheduled_insn
;
2439 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2440 previously found among the insns. Insert them at the beginning
2444 basic_block head_bb
= BLOCK_FOR_INSN (head
);
2445 rtx note_head
= note_list
;
2447 while (PREV_INSN (note_head
))
2449 set_block_for_insn (note_head
, head_bb
);
2450 note_head
= PREV_INSN (note_head
);
2452 /* In the above cycle we've missed this note: */
2453 set_block_for_insn (note_head
, head_bb
);
2455 PREV_INSN (note_head
) = PREV_INSN (head
);
2456 NEXT_INSN (PREV_INSN (head
)) = note_head
;
2457 PREV_INSN (head
) = note_list
;
2458 NEXT_INSN (note_list
) = head
;
2465 fprintf (sched_dump
, ";; total time = %d\n;; new head = %d\n",
2466 clock_var
, INSN_UID (head
));
2467 fprintf (sched_dump
, ";; new tail = %d\n\n",
2471 current_sched_info
->head
= head
;
2472 current_sched_info
->tail
= tail
;
2477 for (i
= 0; i
<= rgn_n_insns
; i
++)
2478 free (choice_stack
[i
].state
);
2479 free (choice_stack
);
2482 /* Set_priorities: compute priority of each insn in the block. */
2485 set_priorities (rtx head
, rtx tail
)
2489 int sched_max_insns_priority
=
2490 current_sched_info
->sched_max_insns_priority
;
2493 if (head
== tail
&& (! INSN_P (head
)))
2498 prev_head
= PREV_INSN (head
);
2499 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
2505 (void) priority (insn
);
2507 if (INSN_PRIORITY_KNOWN (insn
))
2508 sched_max_insns_priority
=
2509 MAX (sched_max_insns_priority
, INSN_PRIORITY (insn
));
2512 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
2517 /* Next LUID to assign to an instruction. */
2520 /* Initialize some global state for the scheduler. */
2529 /* Switch to working copy of sched_info. */
2530 memcpy (¤t_sched_info_var
, current_sched_info
,
2531 sizeof (current_sched_info_var
));
2532 current_sched_info
= ¤t_sched_info_var
;
2534 /* Disable speculative loads in their presence if cc0 defined. */
2536 flag_schedule_speculative_load
= 0;
2539 /* Set dump and sched_verbose for the desired debugging output. If no
2540 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2541 For -fsched-verbose=N, N>=10, print everything to stderr. */
2542 sched_verbose
= sched_verbose_param
;
2543 if (sched_verbose_param
== 0 && dump_file
)
2545 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
2546 ? stderr
: dump_file
);
2548 /* Initialize SPEC_INFO. */
2549 if (targetm
.sched
.set_sched_flags
)
2551 spec_info
= &spec_info_var
;
2552 targetm
.sched
.set_sched_flags (spec_info
);
2553 if (current_sched_info
->flags
& DO_SPECULATION
)
2554 spec_info
->weakness_cutoff
=
2555 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
2557 /* So we won't read anything accidentally. */
2559 #ifdef ENABLE_CHECKING
2560 check_sched_flags ();
2564 /* So we won't read anything accidentally. */
2567 /* Initialize issue_rate. */
2568 if (targetm
.sched
.issue_rate
)
2569 issue_rate
= targetm
.sched
.issue_rate ();
2573 if (cached_issue_rate
!= issue_rate
)
2575 cached_issue_rate
= issue_rate
;
2576 /* To invalidate max_lookahead_tries: */
2577 cached_first_cycle_multipass_dfa_lookahead
= 0;
2584 for (i
= 0; i
< old_max_uid
; i
++)
2587 h_i_d
[i
].todo_spec
= HARD_DEP
;
2588 h_i_d
[i
].queue_index
= QUEUE_NOWHERE
;
2589 h_i_d
[i
].tick
= INVALID_TICK
;
2590 h_i_d
[i
].inter_tick
= INVALID_TICK
;
2593 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
2594 targetm
.sched
.init_dfa_pre_cycle_insn ();
2596 if (targetm
.sched
.init_dfa_post_cycle_insn
)
2597 targetm
.sched
.init_dfa_post_cycle_insn ();
2600 dfa_state_size
= state_size ();
2601 curr_state
= xmalloc (dfa_state_size
);
2606 for (insn
= BB_HEAD (b
); ; insn
= NEXT_INSN (insn
))
2608 INSN_LUID (insn
) = luid
;
2610 /* Increment the next luid, unless this is a note. We don't
2611 really need separate IDs for notes and we don't want to
2612 schedule differently depending on whether or not there are
2613 line-number notes, i.e., depending on whether or not we're
2614 generating debugging information. */
2618 if (insn
== BB_END (b
))
2622 init_dependency_caches (luid
);
2624 init_alias_analysis ();
2626 old_last_basic_block
= 0;
2631 if (current_sched_info
->flags
& USE_GLAT
)
2634 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2635 removing death notes. */
2636 FOR_EACH_BB_REVERSE (b
)
2637 find_insn_reg_weight (b
);
2639 if (targetm
.sched
.md_init_global
)
2640 targetm
.sched
.md_init_global (sched_dump
, sched_verbose
, old_max_uid
);
2642 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
2643 before_recovery
= 0;
2645 #ifdef ENABLE_CHECKING
2646 /* This is used preferably for finding bugs in check_cfg () itself. */
2651 /* Free global data used during insn scheduling. */
2659 free_dependency_caches ();
2660 end_alias_analysis ();
2663 if (targetm
.sched
.md_finish_global
)
2664 targetm
.sched
.md_finish_global (sched_dump
, sched_verbose
);
2666 if (spec_info
&& spec_info
->dump
)
2668 char c
= reload_completed
? 'a' : 'b';
2670 fprintf (spec_info
->dump
,
2671 ";; %s:\n", current_function_name ());
2673 fprintf (spec_info
->dump
,
2674 ";; Procedure %cr-begin-data-spec motions == %d\n",
2676 fprintf (spec_info
->dump
,
2677 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2679 fprintf (spec_info
->dump
,
2680 ";; Procedure %cr-begin-control-spec motions == %d\n",
2681 c
, nr_begin_control
);
2682 fprintf (spec_info
->dump
,
2683 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2684 c
, nr_be_in_control
);
2687 #ifdef ENABLE_CHECKING
2688 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2689 if (!reload_completed
)
2693 current_sched_info
= NULL
;
2696 /* Fix INSN_TICKs of the instructions in the current block as well as
2697 INSN_TICKs of their dependents.
2698 HEAD and TAIL are the begin and the end of the current scheduled block. */
2700 fix_inter_tick (rtx head
, rtx tail
)
2702 /* Set of instructions with corrected INSN_TICK. */
2703 bitmap_head processed
;
2704 int next_clock
= clock_var
+ 1;
2706 bitmap_initialize (&processed
, 0);
2708 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2709 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2710 across different blocks. */
2711 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
2718 tick
= INSN_TICK (head
);
2719 gcc_assert (tick
>= MIN_TICK
);
2721 /* Fix INSN_TICK of instruction from just scheduled block. */
2722 if (!bitmap_bit_p (&processed
, INSN_LUID (head
)))
2724 bitmap_set_bit (&processed
, INSN_LUID (head
));
2727 if (tick
< MIN_TICK
)
2730 INSN_TICK (head
) = tick
;
2733 for (link
= INSN_DEPEND (head
); link
; link
= XEXP (link
, 1))
2737 next
= XEXP (link
, 0);
2738 tick
= INSN_TICK (next
);
2740 if (tick
!= INVALID_TICK
2741 /* If NEXT has its INSN_TICK calculated, fix it.
2742 If not - it will be properly calculated from
2743 scratch later in fix_tick_ready. */
2744 && !bitmap_bit_p (&processed
, INSN_LUID (next
)))
2746 bitmap_set_bit (&processed
, INSN_LUID (next
));
2749 if (tick
< MIN_TICK
)
2752 if (tick
> INTER_TICK (next
))
2753 INTER_TICK (next
) = tick
;
2755 tick
= INTER_TICK (next
);
2757 INSN_TICK (next
) = tick
;
2762 bitmap_clear (&processed
);
2765 /* Check if NEXT is ready to be added to the ready or queue list.
2766 If "yes", add it to the proper list.
2768 -1 - is not ready yet,
2769 0 - added to the ready list,
2770 0 < N - queued for N cycles. */
2772 try_ready (rtx next
)
2777 ts
= &TODO_SPEC (next
);
2780 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
))
2781 && ((old_ts
& HARD_DEP
)
2782 || (old_ts
& SPECULATIVE
)));
2784 if (!(current_sched_info
->flags
& DO_SPECULATION
))
2786 if (!LOG_LINKS (next
))
2791 *ts
&= ~SPECULATIVE
& ~HARD_DEP
;
2793 link
= LOG_LINKS (next
);
2796 /* LOG_LINKS are maintained sorted.
2797 So if DEP_STATUS of the first dep is SPECULATIVE,
2798 than all other deps are speculative too. */
2799 if (DEP_STATUS (link
) & SPECULATIVE
)
2801 /* Now we've got NEXT with speculative deps only.
2802 1. Look at the deps to see what we have to do.
2803 2. Check if we can do 'todo'. */
2804 *ts
= DEP_STATUS (link
) & SPECULATIVE
;
2805 while ((link
= XEXP (link
, 1)))
2806 *ts
= ds_merge (*ts
, DEP_STATUS (link
) & SPECULATIVE
);
2808 if (dep_weak (*ts
) < spec_info
->weakness_cutoff
)
2809 /* Too few points. */
2810 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
2818 gcc_assert (*ts
== old_ts
2819 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
2820 else if (current_sched_info
->new_ready
)
2821 *ts
= current_sched_info
->new_ready (next
, *ts
);
2823 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2824 have its original pattern or changed (speculative) one. This is due
2825 to changing ebb in region scheduling.
2826 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
2827 has speculative pattern.
2829 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
2830 control-speculative NEXT could have been discarded by sched-rgn.c
2831 (the same case as when discarded by can_schedule_ready_p ()). */
2833 if ((*ts
& SPECULATIVE
)
2834 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
2835 need to change anything. */
2841 gcc_assert ((*ts
& SPECULATIVE
) && !(*ts
& ~SPECULATIVE
));
2843 res
= speculate_insn (next
, *ts
, &new_pat
);
2848 /* It would be nice to change DEP_STATUS of all dependences,
2849 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
2850 so we won't reanalyze anything. */
2851 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
2855 /* We follow the rule, that every speculative insn
2856 has non-null ORIG_PAT. */
2857 if (!ORIG_PAT (next
))
2858 ORIG_PAT (next
) = PATTERN (next
);
2862 if (!ORIG_PAT (next
))
2863 /* If we gonna to overwrite the original pattern of insn,
2865 ORIG_PAT (next
) = PATTERN (next
);
2867 change_pattern (next
, new_pat
);
2875 /* We need to restore pattern only if (*ts == 0), because otherwise it is
2876 either correct (*ts & SPECULATIVE),
2877 or we simply don't care (*ts & HARD_DEP). */
2879 gcc_assert (!ORIG_PAT (next
)
2880 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
2884 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
2885 control-speculative NEXT could have been discarded by sched-rgn.c
2886 (the same case as when discarded by can_schedule_ready_p ()). */
2887 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
2889 change_queue_index (next
, QUEUE_NOWHERE
);
2892 else if (!(*ts
& BEGIN_SPEC
) && ORIG_PAT (next
) && !IS_SPECULATION_CHECK_P (next
))
2893 /* We should change pattern of every previously speculative
2894 instruction - and we determine if NEXT was speculative by using
2895 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
2896 pat too, so skip them. */
2898 change_pattern (next
, ORIG_PAT (next
));
2899 ORIG_PAT (next
) = 0;
2902 if (sched_verbose
>= 2)
2904 int s
= TODO_SPEC (next
);
2906 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
2907 (*current_sched_info
->print_insn
) (next
, 0));
2909 if (spec_info
&& spec_info
->dump
)
2912 fprintf (spec_info
->dump
, "; data-spec;");
2913 if (s
& BEGIN_CONTROL
)
2914 fprintf (spec_info
->dump
, "; control-spec;");
2915 if (s
& BE_IN_CONTROL
)
2916 fprintf (spec_info
->dump
, "; in-control-spec;");
2919 fprintf (sched_dump
, "\n");
2922 adjust_priority (next
);
2924 return fix_tick_ready (next
);
2927 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
2929 fix_tick_ready (rtx next
)
2934 link
= RESOLVED_DEPS (next
);
2940 tick
= INSN_TICK (next
);
2941 /* if tick is not equal to INVALID_TICK, then update
2942 INSN_TICK of NEXT with the most recent resolved dependence
2943 cost. Otherwise, recalculate from scratch. */
2944 full_p
= tick
== INVALID_TICK
;
2950 pro
= XEXP (link
, 0);
2951 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
2953 tick1
= INSN_TICK (pro
) + insn_cost (pro
, link
, next
);
2957 while ((link
= XEXP (link
, 1)) && full_p
);
2962 INSN_TICK (next
) = tick
;
2964 delay
= tick
- clock_var
;
2966 delay
= QUEUE_READY
;
2968 change_queue_index (next
, delay
);
2973 /* Move NEXT to the proper queue list with (DELAY >= 1),
2974 or add it to the ready list (DELAY == QUEUE_READY),
2975 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
2977 change_queue_index (rtx next
, int delay
)
2979 int i
= QUEUE_INDEX (next
);
2981 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
2983 gcc_assert (i
!= QUEUE_SCHEDULED
);
2985 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
2986 || (delay
< 0 && delay
== i
))
2987 /* We have nothing to do. */
2990 /* Remove NEXT from wherever it is now. */
2991 if (i
== QUEUE_READY
)
2992 ready_remove_insn (next
);
2994 queue_remove (next
);
2996 /* Add it to the proper place. */
2997 if (delay
== QUEUE_READY
)
2998 ready_add (readyp
, next
, false);
2999 else if (delay
>= 1)
3000 queue_insn (next
, delay
);
3002 if (sched_verbose
>= 2)
3004 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
3005 (*current_sched_info
->print_insn
) (next
, 0));
3007 if (delay
== QUEUE_READY
)
3008 fprintf (sched_dump
, " into ready\n");
3009 else if (delay
>= 1)
3010 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
3012 fprintf (sched_dump
, " removed from ready or queue lists\n");
3016 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3018 resolve_dep (rtx next
, rtx insn
)
3022 INSN_DEP_COUNT (next
)--;
3024 dep
= remove_list_elem (insn
, &LOG_LINKS (next
));
3025 XEXP (dep
, 1) = RESOLVED_DEPS (next
);
3026 RESOLVED_DEPS (next
) = dep
;
3028 gcc_assert ((INSN_DEP_COUNT (next
) != 0 || !LOG_LINKS (next
))
3029 && (LOG_LINKS (next
) || INSN_DEP_COUNT (next
) == 0));
3032 /* Extend H_I_D data. */
3036 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3037 pseudos which do not cross calls. */
3038 int new_max_uid
= get_max_uid() + 1;
3040 h_i_d
= xrecalloc (h_i_d
, new_max_uid
, old_max_uid
, sizeof (*h_i_d
));
3041 old_max_uid
= new_max_uid
;
3043 if (targetm
.sched
.h_i_d_extended
)
3044 targetm
.sched
.h_i_d_extended ();
3047 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3048 N_NEW_INSNS is the number of additional elements to allocate. */
3050 extend_ready (int n_new_insns
)
3054 readyp
->veclen
= rgn_n_insns
+ n_new_insns
+ 1 + issue_rate
;
3055 readyp
->vec
= XRESIZEVEC (rtx
, readyp
->vec
, readyp
->veclen
);
3057 ready_try
= xrecalloc (ready_try
, rgn_n_insns
+ n_new_insns
+ 1,
3058 rgn_n_insns
+ 1, sizeof (char));
3060 rgn_n_insns
+= n_new_insns
;
3062 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
3065 for (i
= rgn_n_insns
; n_new_insns
--; i
--)
3066 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
3069 /* Extend global scheduler structures (those, that live across calls to
3070 schedule_block) to include information about just emitted INSN. */
3072 extend_global (rtx insn
)
3074 gcc_assert (INSN_P (insn
));
3075 /* These structures have scheduler scope. */
3079 extend_dependency_caches (1, 0);
3082 /* Extends global and local scheduler structures to include information
3083 about just emitted INSN. */
3085 extend_all (rtx insn
)
3087 extend_global (insn
);
3089 /* These structures have block scope. */
3092 (*current_sched_info
->add_remove_insn
) (insn
, 0);
3095 /* Initialize h_i_d entry of the new INSN with default values.
3096 Values, that are not explicitly initialized here, hold zero. */
3098 init_h_i_d (rtx insn
)
3100 INSN_LUID (insn
) = luid
++;
3101 INSN_COST (insn
) = -1;
3102 TODO_SPEC (insn
) = HARD_DEP
;
3103 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
3104 INSN_TICK (insn
) = INVALID_TICK
;
3105 INTER_TICK (insn
) = INVALID_TICK
;
3106 find_insn_reg_weight1 (insn
);
3109 /* Generates recovery code for INSN. */
3111 generate_recovery_code (rtx insn
)
3113 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
3114 begin_speculative_block (insn
);
3116 /* Here we have insn with no dependencies to
3117 instructions other then CHECK_SPEC ones. */
3119 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
3120 add_to_speculative_block (insn
);
3124 Tries to add speculative dependencies of type FS between instructions
3125 in LINK list and TWIN. */
3127 process_insn_depend_be_in_spec (rtx link
, rtx twin
, ds_t fs
)
3129 for (; link
; link
= XEXP (link
, 1))
3134 consumer
= XEXP (link
, 0);
3136 ds
= DEP_STATUS (link
);
3138 if (/* If we want to create speculative dep. */
3140 /* And we can do that because this is a true dep. */
3141 && (ds
& DEP_TYPES
) == DEP_TRUE
)
3143 gcc_assert (!(ds
& BE_IN_SPEC
));
3145 if (/* If this dep can be overcome with 'begin speculation'. */
3147 /* Then we have a choice: keep the dep 'begin speculative'
3148 or transform it into 'be in speculative'. */
3150 if (/* In try_ready we assert that if insn once became ready
3151 it can be removed from the ready (or queue) list only
3152 due to backend decision. Hence we can't let the
3153 probability of the speculative dep to decrease. */
3154 dep_weak (ds
) <= dep_weak (fs
))
3155 /* Transform it to be in speculative. */
3156 ds
= (ds
& ~BEGIN_SPEC
) | fs
;
3159 /* Mark the dep as 'be in speculative'. */
3163 add_back_forw_dep (consumer
, twin
, REG_NOTE_KIND (link
), ds
);
3167 /* Generates recovery code for BEGIN speculative INSN. */
3169 begin_speculative_block (rtx insn
)
3171 if (TODO_SPEC (insn
) & BEGIN_DATA
)
3173 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
3176 create_check_block_twin (insn
, false);
3178 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
3181 /* Generates recovery code for BE_IN speculative INSN. */
3183 add_to_speculative_block (rtx insn
)
3186 rtx link
, twins
= NULL
;
3188 ts
= TODO_SPEC (insn
);
3189 gcc_assert (!(ts
& ~BE_IN_SPEC
));
3191 if (ts
& BE_IN_DATA
)
3193 if (ts
& BE_IN_CONTROL
)
3196 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
3197 gcc_assert (!TODO_SPEC (insn
));
3199 DONE_SPEC (insn
) |= ts
;
3201 /* First we convert all simple checks to branchy. */
3202 for (link
= LOG_LINKS (insn
); link
;)
3206 check
= XEXP (link
, 0);
3208 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
3210 create_check_block_twin (check
, true);
3211 link
= LOG_LINKS (insn
);
3214 link
= XEXP (link
, 1);
3217 clear_priorities (insn
);
3221 rtx link
, check
, twin
;
3224 link
= LOG_LINKS (insn
);
3225 gcc_assert (!(DEP_STATUS (link
) & BEGIN_SPEC
)
3226 && (DEP_STATUS (link
) & BE_IN_SPEC
)
3227 && (DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3229 check
= XEXP (link
, 0);
3231 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
3232 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
3234 rec
= BLOCK_FOR_INSN (check
);
3236 twin
= emit_insn_before (copy_rtx (PATTERN (insn
)), BB_END (rec
));
3237 extend_global (twin
);
3239 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3241 if (sched_verbose
&& spec_info
->dump
)
3242 /* INSN_BB (insn) isn't determined for twin insns yet.
3243 So we can't use current_sched_info->print_insn. */
3244 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3245 INSN_UID (twin
), rec
->index
);
3247 twins
= alloc_INSN_LIST (twin
, twins
);
3249 /* Add dependences between TWIN and all appropriate
3250 instructions from REC. */
3253 add_back_forw_dep (twin
, check
, REG_DEP_TRUE
, DEP_TRUE
);
3257 link
= XEXP (link
, 1);
3260 check
= XEXP (link
, 0);
3261 if (BLOCK_FOR_INSN (check
) == rec
)
3271 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, ts
);
3273 for (link
= LOG_LINKS (insn
); link
;)
3275 check
= XEXP (link
, 0);
3277 if (BLOCK_FOR_INSN (check
) == rec
)
3279 delete_back_forw_dep (insn
, check
);
3280 link
= LOG_LINKS (insn
);
3283 link
= XEXP (link
, 1);
3286 while (LOG_LINKS (insn
));
3288 /* We can't add the dependence between insn and twin earlier because
3289 that would make twin appear in the INSN_DEPEND (insn). */
3294 twin
= XEXP (twins
, 0);
3295 calc_priorities (twin
);
3296 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3298 twin
= XEXP (twins
, 1);
3299 free_INSN_LIST_node (twins
);
3304 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3306 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
3308 gcc_assert (new_nmemb
>= old_nmemb
);
3309 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
3310 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
3314 /* Return the probability of speculation success for the speculation
3322 dt
= FIRST_SPEC_TYPE
;
3327 res
*= (ds_t
) get_dep_weak (ds
, dt
);
3331 if (dt
== LAST_SPEC_TYPE
)
3333 dt
<<= SPEC_TYPE_SHIFT
;
3339 res
/= MAX_DEP_WEAK
;
3341 if (res
< MIN_DEP_WEAK
)
3344 gcc_assert (res
<= MAX_DEP_WEAK
);
3350 Find fallthru edge from PRED. */
3352 find_fallthru_edge (basic_block pred
)
3358 succ
= pred
->next_bb
;
3359 gcc_assert (succ
->prev_bb
== pred
);
3361 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
3363 FOR_EACH_EDGE (e
, ei
, pred
->succs
)
3364 if (e
->flags
& EDGE_FALLTHRU
)
3366 gcc_assert (e
->dest
== succ
);
3372 FOR_EACH_EDGE (e
, ei
, succ
->preds
)
3373 if (e
->flags
& EDGE_FALLTHRU
)
3375 gcc_assert (e
->src
== pred
);
3383 /* Initialize BEFORE_RECOVERY variable. */
3385 init_before_recovery (void)
3390 last
= EXIT_BLOCK_PTR
->prev_bb
;
3391 e
= find_fallthru_edge (last
);
3395 /* We create two basic blocks:
3396 1. Single instruction block is inserted right after E->SRC
3398 2. Empty block right before EXIT_BLOCK.
3399 Between these two blocks recovery blocks will be emitted. */
3401 basic_block single
, empty
;
3404 single
= create_empty_bb (last
);
3405 empty
= create_empty_bb (single
);
3407 single
->count
= last
->count
;
3408 empty
->count
= last
->count
;
3409 single
->frequency
= last
->frequency
;
3410 empty
->frequency
= last
->frequency
;
3411 BB_COPY_PARTITION (single
, last
);
3412 BB_COPY_PARTITION (empty
, last
);
3414 redirect_edge_succ (e
, single
);
3415 make_single_succ_edge (single
, empty
, 0);
3416 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
,
3417 EDGE_FALLTHRU
| EDGE_CAN_FALLTHRU
);
3419 label
= block_label (empty
);
3420 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
3421 JUMP_LABEL (x
) = label
;
3422 LABEL_NUSES (label
)++;
3425 emit_barrier_after (x
);
3427 add_block (empty
, 0);
3428 add_block (single
, 0);
3430 before_recovery
= single
;
3432 if (sched_verbose
>= 2 && spec_info
->dump
)
3433 fprintf (spec_info
->dump
,
3434 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3435 last
->index
, single
->index
, empty
->index
);
3438 before_recovery
= last
;
3441 /* Returns new recovery block. */
3443 create_recovery_block (void)
3449 added_recovery_block_p
= true;
3451 if (!before_recovery
)
3452 init_before_recovery ();
3454 barrier
= get_last_bb_insn (before_recovery
);
3455 gcc_assert (BARRIER_P (barrier
));
3457 label
= emit_label_after (gen_label_rtx (), barrier
);
3459 rec
= create_basic_block (label
, label
, before_recovery
);
3461 /* Recovery block always end with an unconditional jump. */
3462 emit_barrier_after (BB_END (rec
));
3464 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
3465 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
3467 if (sched_verbose
&& spec_info
->dump
)
3468 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
3471 before_recovery
= rec
;
3476 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3477 INSN is a simple check, that should be converted to branchy one. */
3479 create_check_block_twin (rtx insn
, bool mutate_p
)
3482 rtx label
, check
, twin
, link
;
3485 gcc_assert (ORIG_PAT (insn
)
3487 || (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
3488 && !(TODO_SPEC (insn
) & SPECULATIVE
))));
3490 /* Create recovery block. */
3491 if (mutate_p
|| targetm
.sched
.needs_block_p (insn
))
3493 rec
= create_recovery_block ();
3494 label
= BB_HEAD (rec
);
3498 rec
= EXIT_BLOCK_PTR
;
3503 check
= targetm
.sched
.gen_check (insn
, label
, mutate_p
);
3505 if (rec
!= EXIT_BLOCK_PTR
)
3507 /* To have mem_reg alive at the beginning of second_bb,
3508 we emit check BEFORE insn, so insn after splitting
3509 insn will be at the beginning of second_bb, which will
3510 provide us with the correct life information. */
3511 check
= emit_jump_insn_before (check
, insn
);
3512 JUMP_LABEL (check
) = label
;
3513 LABEL_NUSES (label
)++;
3516 check
= emit_insn_before (check
, insn
);
3518 /* Extend data structures. */
3520 RECOVERY_BLOCK (check
) = rec
;
3522 if (sched_verbose
&& spec_info
->dump
)
3523 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
3524 (*current_sched_info
->print_insn
) (check
, 0));
3526 gcc_assert (ORIG_PAT (insn
));
3528 /* Initialize TWIN (twin is a duplicate of original instruction
3529 in the recovery block). */
3530 if (rec
!= EXIT_BLOCK_PTR
)
3534 for (link
= RESOLVED_DEPS (insn
); link
; link
= XEXP (link
, 1))
3535 if (DEP_STATUS (link
) & DEP_OUTPUT
)
3537 RESOLVED_DEPS (check
) =
3538 alloc_DEPS_LIST (XEXP (link
, 0), RESOLVED_DEPS (check
), DEP_TRUE
);
3539 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check
), REG_DEP_TRUE
);
3542 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
3543 extend_global (twin
);
3545 if (sched_verbose
&& spec_info
->dump
)
3546 /* INSN_BB (insn) isn't determined for twin insns yet.
3547 So we can't use current_sched_info->print_insn. */
3548 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3549 INSN_UID (twin
), rec
->index
);
3553 ORIG_PAT (check
) = ORIG_PAT (insn
);
3554 HAS_INTERNAL_DEP (check
) = 1;
3556 /* ??? We probably should change all OUTPUT dependencies to
3560 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3562 if (rec
!= EXIT_BLOCK_PTR
)
3563 /* In case of branchy check, fix CFG. */
3565 basic_block first_bb
, second_bb
;
3570 first_bb
= BLOCK_FOR_INSN (check
);
3571 e
= split_block (first_bb
, check
);
3572 /* split_block emits note if *check == BB_END. Probably it
3573 is better to rip that note off. */
3574 gcc_assert (e
->src
== first_bb
);
3575 second_bb
= e
->dest
;
3577 /* This is fixing of incoming edge. */
3578 /* ??? Which other flags should be specified? */
3579 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
3580 /* Partition type is the same, if it is "unpartitioned". */
3581 edge_flags
= EDGE_CROSSING
;
3585 e
= make_edge (first_bb
, rec
, edge_flags
);
3587 add_block (second_bb
, first_bb
);
3589 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb
)));
3590 label
= block_label (second_bb
);
3591 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
3592 JUMP_LABEL (jump
) = label
;
3593 LABEL_NUSES (label
)++;
3594 extend_global (jump
);
3596 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
3597 /* Partition type is the same, if it is "unpartitioned". */
3599 /* Rewritten from cfgrtl.c. */
3600 if (flag_reorder_blocks_and_partition
3601 && targetm
.have_named_sections
3602 /*&& !any_condjump_p (jump)*/)
3603 /* any_condjump_p (jump) == false.
3604 We don't need the same note for the check because
3605 any_condjump_p (check) == true. */
3607 REG_NOTES (jump
) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP
,
3611 edge_flags
= EDGE_CROSSING
;
3616 make_single_succ_edge (rec
, second_bb
, edge_flags
);
3618 add_block (rec
, EXIT_BLOCK_PTR
);
3621 /* Move backward dependences from INSN to CHECK and
3622 move forward dependences from INSN to TWIN. */
3623 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
3627 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3628 check --TRUE--> producer ??? or ANTI ???
3629 twin --TRUE--> producer
3630 twin --ANTI--> check
3632 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3633 check --ANTI--> producer
3634 twin --ANTI--> producer
3635 twin --ANTI--> check
3637 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3638 check ~~TRUE~~> producer
3639 twin ~~TRUE~~> producer
3640 twin --ANTI--> check */
3642 ds
= DEP_STATUS (link
);
3644 if (ds
& BEGIN_SPEC
)
3646 gcc_assert (!mutate_p
);
3650 if (rec
!= EXIT_BLOCK_PTR
)
3652 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3653 add_back_forw_dep (twin
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3656 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3659 for (link
= LOG_LINKS (insn
); link
;)
3660 if ((DEP_STATUS (link
) & BEGIN_SPEC
)
3662 /* We can delete this dep only if we totally overcome it with
3663 BEGIN_SPECULATION. */
3665 delete_back_forw_dep (insn
, XEXP (link
, 0));
3666 link
= LOG_LINKS (insn
);
3669 link
= XEXP (link
, 1);
3673 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3676 gcc_assert (!DONE_SPEC (insn
));
3680 ds_t ts
= TODO_SPEC (insn
);
3682 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
3683 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
3685 if (ts
& BEGIN_DATA
)
3686 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
3687 if (ts
& BEGIN_CONTROL
)
3688 fs
= set_dep_weak (fs
, BE_IN_CONTROL
, get_dep_weak (ts
, BEGIN_CONTROL
));
3691 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
3693 /* Future speculations: call the helper. */
3694 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, fs
);
3696 if (rec
!= EXIT_BLOCK_PTR
)
3698 /* Which types of dependencies should we use here is,
3699 generally, machine-dependent question... But, for now,
3704 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
);
3705 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3709 if (spec_info
->dump
)
3710 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
3711 (*current_sched_info
->print_insn
) (insn
, 0));
3713 for (link
= INSN_DEPEND (insn
); link
; link
= INSN_DEPEND (insn
))
3714 delete_back_forw_dep (XEXP (link
, 0), insn
);
3716 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
3719 sched_remove_insn (insn
);
3722 add_back_forw_dep (twin
, check
, REG_DEP_ANTI
, DEP_ANTI
);
3725 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
3728 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3729 because it'll be done later in add_to_speculative_block. */
3731 clear_priorities (twin
);
3732 calc_priorities (twin
);
3736 /* Removes dependency between instructions in the recovery block REC
3737 and usual region instructions. It keeps inner dependences so it
3738 won't be necessary to recompute them. */
3740 fix_recovery_deps (basic_block rec
)
3742 rtx note
, insn
, link
, jump
, ready_list
= 0;
3743 bitmap_head in_ready
;
3745 bitmap_initialize (&in_ready
, 0);
3747 /* NOTE - a basic block note. */
3748 note
= NEXT_INSN (BB_HEAD (rec
));
3749 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
3750 insn
= BB_END (rec
);
3751 gcc_assert (JUMP_P (insn
));
3752 insn
= PREV_INSN (insn
);
3756 for (link
= INSN_DEPEND (insn
); link
;)
3760 consumer
= XEXP (link
, 0);
3762 if (BLOCK_FOR_INSN (consumer
) != rec
)
3764 delete_back_forw_dep (consumer
, insn
);
3766 if (!bitmap_bit_p (&in_ready
, INSN_LUID (consumer
)))
3768 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
3769 bitmap_set_bit (&in_ready
, INSN_LUID (consumer
));
3772 link
= INSN_DEPEND (insn
);
3776 gcc_assert ((DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3778 link
= XEXP (link
, 1);
3782 insn
= PREV_INSN (insn
);
3784 while (insn
!= note
);
3786 bitmap_clear (&in_ready
);
3788 /* Try to add instructions to the ready or queue list. */
3789 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
3790 try_ready (XEXP (link
, 0));
3791 free_INSN_LIST_list (&ready_list
);
3793 /* Fixing jump's dependences. */
3794 insn
= BB_HEAD (rec
);
3795 jump
= BB_END (rec
);
3797 gcc_assert (LABEL_P (insn
));
3798 insn
= NEXT_INSN (insn
);
3800 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
3801 add_jump_dependencies (insn
, jump
);
3804 /* Changes pattern of the INSN to NEW_PAT. */
3806 change_pattern (rtx insn
, rtx new_pat
)
3810 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
3812 /* Invalidate INSN_COST, so it'll be recalculated. */
3813 INSN_COST (insn
) = -1;
3814 /* Invalidate INSN_TICK, so it'll be recalculated. */
3815 INSN_TICK (insn
) = INVALID_TICK
;
3816 dfa_clear_single_insn_cache (insn
);
3820 /* -1 - can't speculate,
3821 0 - for speculation with REQUEST mode it is OK to use
3822 current instruction pattern,
3823 1 - need to change pattern for *NEW_PAT to be speculative. */
3825 speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
3827 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
3828 && (request
& SPECULATIVE
));
3830 if (!NONJUMP_INSN_P (insn
)
3831 || HAS_INTERNAL_DEP (insn
)
3832 || SCHED_GROUP_P (insn
)
3833 || side_effects_p (PATTERN (insn
))
3834 || (request
& spec_info
->mask
) != request
)
3837 gcc_assert (!IS_SPECULATION_CHECK_P (insn
));
3839 if (request
& BE_IN_SPEC
)
3841 if (may_trap_p (PATTERN (insn
)))
3844 if (!(request
& BEGIN_SPEC
))
3848 return targetm
.sched
.speculate_insn (insn
, request
& BEGIN_SPEC
, new_pat
);
3851 /* Print some information about block BB, which starts with HEAD and
3852 ends with TAIL, before scheduling it.
3853 I is zero, if scheduler is about to start with the fresh ebb. */
3855 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
3858 fprintf (sched_dump
,
3859 ";; ======================================================\n");
3861 fprintf (sched_dump
,
3862 ";; =====================ADVANCING TO=====================\n");
3863 fprintf (sched_dump
,
3864 ";; -- basic block %d from %d to %d -- %s reload\n",
3865 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
3866 (reload_completed
? "after" : "before"));
3867 fprintf (sched_dump
,
3868 ";; ======================================================\n");
3869 fprintf (sched_dump
, "\n");
3872 /* Unlink basic block notes and labels and saves them, so they
3873 can be easily restored. We unlink basic block notes in EBB to
3874 provide back-compatibility with the previous code, as target backends
3875 assume, that there'll be only instructions between
3876 current_sched_info->{head and tail}. We restore these notes as soon
3878 FIRST (LAST) is the first (last) basic block in the ebb.
3879 NB: In usual case (FIRST == LAST) nothing is really done. */
3881 unlink_bb_notes (basic_block first
, basic_block last
)
3883 /* We DON'T unlink basic block notes of the first block in the ebb. */
3887 bb_header
= xmalloc (last_basic_block
* sizeof (*bb_header
));
3889 /* Make a sentinel. */
3890 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
3891 bb_header
[last
->next_bb
->index
] = 0;
3893 first
= first
->next_bb
;
3896 rtx prev
, label
, note
, next
;
3898 label
= BB_HEAD (last
);
3899 if (LABEL_P (label
))
3900 note
= NEXT_INSN (label
);
3903 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
3905 prev
= PREV_INSN (label
);
3906 next
= NEXT_INSN (note
);
3907 gcc_assert (prev
&& next
);
3909 NEXT_INSN (prev
) = next
;
3910 PREV_INSN (next
) = prev
;
3912 bb_header
[last
->index
] = label
;
3917 last
= last
->prev_bb
;
3922 /* Restore basic block notes.
3923 FIRST is the first basic block in the ebb. */
3925 restore_bb_notes (basic_block first
)
3930 /* We DON'T unlink basic block notes of the first block in the ebb. */
3931 first
= first
->next_bb
;
3932 /* Remember: FIRST is actually a second basic block in the ebb. */
3934 while (first
!= EXIT_BLOCK_PTR
3935 && bb_header
[first
->index
])
3937 rtx prev
, label
, note
, next
;
3939 label
= bb_header
[first
->index
];
3940 prev
= PREV_INSN (label
);
3941 next
= NEXT_INSN (prev
);
3943 if (LABEL_P (label
))
3944 note
= NEXT_INSN (label
);
3947 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
3949 bb_header
[first
->index
] = 0;
3951 NEXT_INSN (prev
) = label
;
3952 NEXT_INSN (note
) = next
;
3953 PREV_INSN (next
) = note
;
3955 first
= first
->next_bb
;
3962 /* Extend per basic block data structures of the scheduler.
3963 If BB is NULL, initialize structures for the whole CFG.
3964 Otherwise, initialize them for the just created BB. */
3970 old_last_basic_block
= last_basic_block
;
3972 if (current_sched_info
->flags
& USE_GLAT
)
3974 glat_start
= xrealloc (glat_start
,
3975 last_basic_block
* sizeof (*glat_start
));
3976 glat_end
= xrealloc (glat_end
, last_basic_block
* sizeof (*glat_end
));
3979 /* The following is done to keep current_sched_info->next_tail non null. */
3981 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
3982 if (NEXT_INSN (insn
) == 0
3985 /* Don't emit a NOTE if it would end up before a BARRIER. */
3986 && !BARRIER_P (NEXT_INSN (insn
))))
3988 emit_note_after (NOTE_INSN_DELETED
, insn
);
3989 /* Make insn to appear outside BB. */
3990 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
3994 /* Add a basic block BB to extended basic block EBB.
3995 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
3996 If EBB is NULL, then BB should be a new region. */
3998 add_block (basic_block bb
, basic_block ebb
)
4000 gcc_assert (current_sched_info
->flags
& DETACH_LIFE_INFO
4001 && bb
->il
.rtl
->global_live_at_start
== 0
4002 && bb
->il
.rtl
->global_live_at_end
== 0);
4006 glat_start
[bb
->index
] = 0;
4007 glat_end
[bb
->index
] = 0;
4009 if (current_sched_info
->add_block
)
4010 /* This changes only data structures of the front-end. */
4011 current_sched_info
->add_block (bb
, ebb
);
4015 Fix CFG after both in- and inter-block movement of
4016 control_flow_insn_p JUMP. */
4018 fix_jump_move (rtx jump
)
4020 basic_block bb
, jump_bb
, jump_bb_next
;
4022 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4023 jump_bb
= BLOCK_FOR_INSN (jump
);
4024 jump_bb_next
= jump_bb
->next_bb
;
4026 gcc_assert (current_sched_info
->flags
& SCHED_EBB
4027 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
4029 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
4030 /* if jump_bb_next is not empty. */
4031 BB_END (jump_bb
) = BB_END (jump_bb_next
);
4033 if (BB_END (bb
) != PREV_INSN (jump
))
4034 /* Then there are instruction after jump that should be placed
4036 BB_END (jump_bb_next
) = BB_END (bb
);
4038 /* Otherwise jump_bb_next is empty. */
4039 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
4041 /* To make assertion in move_insn happy. */
4042 BB_END (bb
) = PREV_INSN (jump
);
4044 update_bb_for_insn (jump_bb_next
);
4047 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4049 move_block_after_check (rtx jump
)
4051 basic_block bb
, jump_bb
, jump_bb_next
;
4054 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4055 jump_bb
= BLOCK_FOR_INSN (jump
);
4056 jump_bb_next
= jump_bb
->next_bb
;
4058 update_bb_for_insn (jump_bb
);
4060 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
4061 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
4063 unlink_block (jump_bb_next
);
4064 link_block (jump_bb_next
, bb
);
4068 move_succs (&(jump_bb
->succs
), bb
);
4069 move_succs (&(jump_bb_next
->succs
), jump_bb
);
4070 move_succs (&t
, jump_bb_next
);
4072 if (current_sched_info
->fix_recovery_cfg
)
4073 current_sched_info
->fix_recovery_cfg
4074 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
4077 /* Helper function for move_block_after_check.
4078 This functions attaches edge vector pointed to by SUCCSP to
4081 move_succs (VEC(edge
,gc
) **succsp
, basic_block to
)
4086 gcc_assert (to
->succs
== 0);
4088 to
->succs
= *succsp
;
4090 FOR_EACH_EDGE (e
, ei
, to
->succs
)
4096 /* Initialize GLAT (global_live_at_{start, end}) structures.
4097 GLAT structures are used to substitute global_live_{start, end}
4098 regsets during scheduling. This is necessary to use such functions as
4099 split_block (), as they assume consistency of register live information. */
4109 /* Helper function for init_glat. */
4111 init_glat1 (basic_block bb
)
4113 gcc_assert (bb
->il
.rtl
->global_live_at_start
!= 0
4114 && bb
->il
.rtl
->global_live_at_end
!= 0);
4116 glat_start
[bb
->index
] = bb
->il
.rtl
->global_live_at_start
;
4117 glat_end
[bb
->index
] = bb
->il
.rtl
->global_live_at_end
;
4119 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4121 bb
->il
.rtl
->global_live_at_start
= 0;
4122 bb
->il
.rtl
->global_live_at_end
= 0;
4126 /* Attach reg_live_info back to basic blocks.
4127 Also save regsets, that should not have been changed during scheduling,
4128 for checking purposes (see check_reg_live). */
4130 attach_life_info (void)
4135 attach_life_info1 (bb
);
4138 /* Helper function for attach_life_info. */
4140 attach_life_info1 (basic_block bb
)
4142 gcc_assert (bb
->il
.rtl
->global_live_at_start
== 0
4143 && bb
->il
.rtl
->global_live_at_end
== 0);
4145 if (glat_start
[bb
->index
])
4147 gcc_assert (glat_end
[bb
->index
]);
4149 bb
->il
.rtl
->global_live_at_start
= glat_start
[bb
->index
];
4150 bb
->il
.rtl
->global_live_at_end
= glat_end
[bb
->index
];
4152 /* Make them NULL, so they won't be freed in free_glat. */
4153 glat_start
[bb
->index
] = 0;
4154 glat_end
[bb
->index
] = 0;
4156 #ifdef ENABLE_CHECKING
4157 if (bb
->index
< NUM_FIXED_BLOCKS
4158 || current_sched_info
->region_head_or_leaf_p (bb
, 0))
4160 glat_start
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4161 COPY_REG_SET (glat_start
[bb
->index
],
4162 bb
->il
.rtl
->global_live_at_start
);
4165 if (bb
->index
< NUM_FIXED_BLOCKS
4166 || current_sched_info
->region_head_or_leaf_p (bb
, 1))
4168 glat_end
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4169 COPY_REG_SET (glat_end
[bb
->index
], bb
->il
.rtl
->global_live_at_end
);
4175 gcc_assert (!glat_end
[bb
->index
]);
4177 bb
->il
.rtl
->global_live_at_start
= ALLOC_REG_SET (®_obstack
);
4178 bb
->il
.rtl
->global_live_at_end
= ALLOC_REG_SET (®_obstack
);
4182 /* Free GLAT information. */
4186 #ifdef ENABLE_CHECKING
4187 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4193 if (glat_start
[bb
->index
])
4194 FREE_REG_SET (glat_start
[bb
->index
]);
4195 if (glat_end
[bb
->index
])
4196 FREE_REG_SET (glat_end
[bb
->index
]);
4205 /* Remove INSN from the instruction stream.
4206 INSN should have any dependencies. */
4208 sched_remove_insn (rtx insn
)
4210 change_queue_index (insn
, QUEUE_NOWHERE
);
4211 current_sched_info
->add_remove_insn (insn
, 1);
4215 /* Clear priorities of all instructions, that are
4216 forward dependent on INSN. */
4218 clear_priorities (rtx insn
)
4222 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4226 pro
= XEXP (link
, 0);
4227 if (INSN_PRIORITY_KNOWN (pro
))
4229 INSN_PRIORITY_KNOWN (pro
) = 0;
4230 clear_priorities (pro
);
4235 /* Recompute priorities of instructions, whose priorities might have been
4236 changed due to changes in INSN. */
4238 calc_priorities (rtx insn
)
4242 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4246 pro
= XEXP (link
, 0);
4247 if (!INSN_PRIORITY_KNOWN (pro
))
4250 calc_priorities (pro
);
4256 /* Add dependences between JUMP and other instructions in the recovery
4257 block. INSN is the first insn the recovery block. */
4259 add_jump_dependencies (rtx insn
, rtx jump
)
4263 insn
= NEXT_INSN (insn
);
4267 if (!INSN_DEPEND (insn
))
4268 add_back_forw_dep (jump
, insn
, REG_DEP_ANTI
, DEP_ANTI
);
4271 gcc_assert (LOG_LINKS (jump
));
4274 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4276 bb_note (basic_block bb
)
4280 note
= BB_HEAD (bb
);
4282 note
= NEXT_INSN (note
);
4284 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4288 #ifdef ENABLE_CHECKING
4289 extern void debug_spec_status (ds_t
);
4291 /* Dump information about the dependence status S. */
4293 debug_spec_status (ds_t s
)
4298 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak (s
, BEGIN_DATA
));
4300 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak (s
, BE_IN_DATA
));
4301 if (s
& BEGIN_CONTROL
)
4302 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak (s
, BEGIN_CONTROL
));
4303 if (s
& BE_IN_CONTROL
)
4304 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak (s
, BE_IN_CONTROL
));
4307 fprintf (f
, "HARD_DEP; ");
4310 fprintf (f
, "DEP_TRUE; ");
4312 fprintf (f
, "DEP_ANTI; ");
4314 fprintf (f
, "DEP_OUTPUT; ");
4319 /* Helper function for check_cfg.
4320 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4323 has_edge_p (VEC(edge
,gc
) *el
, int type
)
4328 FOR_EACH_EDGE (e
, ei
, el
)
4329 if (e
->flags
& type
)
4334 /* Check few properties of CFG between HEAD and TAIL.
4335 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4336 instruction stream. */
4338 check_cfg (rtx head
, rtx tail
)
4342 int not_first
= 0, not_last
;
4345 head
= get_insns ();
4347 tail
= get_last_insn ();
4348 next_tail
= NEXT_INSN (tail
);
4352 not_last
= head
!= tail
;
4355 gcc_assert (NEXT_INSN (PREV_INSN (head
)) == head
);
4357 gcc_assert (PREV_INSN (NEXT_INSN (head
)) == head
);
4360 || (NOTE_INSN_BASIC_BLOCK_P (head
)
4362 || (not_first
&& !LABEL_P (PREV_INSN (head
))))))
4364 gcc_assert (bb
== 0);
4365 bb
= BLOCK_FOR_INSN (head
);
4367 gcc_assert (BB_HEAD (bb
) == head
);
4369 /* This is the case of jump table. See inside_basic_block_p (). */
4370 gcc_assert (LABEL_P (head
) && !inside_basic_block_p (head
));
4375 gcc_assert (!inside_basic_block_p (head
));
4376 head
= NEXT_INSN (head
);
4380 gcc_assert (inside_basic_block_p (head
)
4382 gcc_assert (BLOCK_FOR_INSN (head
) == bb
);
4386 head
= NEXT_INSN (head
);
4387 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head
));
4391 if (control_flow_insn_p (head
))
4393 gcc_assert (BB_END (bb
) == head
);
4395 if (any_uncondjump_p (head
))
4396 gcc_assert (EDGE_COUNT (bb
->succs
) == 1
4397 && BARRIER_P (NEXT_INSN (head
)));
4398 else if (any_condjump_p (head
))
4399 gcc_assert (/* Usual case. */
4400 (EDGE_COUNT (bb
->succs
) > 1
4401 && !BARRIER_P (NEXT_INSN (head
)))
4402 /* Or jump to the next instruction. */
4403 || (EDGE_COUNT (bb
->succs
) == 1
4404 && (BB_HEAD (EDGE_I (bb
->succs
, 0)->dest
)
4405 == JUMP_LABEL (head
))));
4407 if (BB_END (bb
) == head
)
4409 if (EDGE_COUNT (bb
->succs
) > 1)
4410 gcc_assert (control_flow_insn_p (head
)
4411 || has_edge_p (bb
->succs
, EDGE_COMPLEX
));
4415 head
= NEXT_INSN (head
);
4421 while (head
!= next_tail
);
4423 gcc_assert (bb
== 0);
4426 /* Perform a few consistency checks of flags in different data structures. */
4428 check_sched_flags (void)
4430 unsigned int f
= current_sched_info
->flags
;
4432 if (flag_sched_stalled_insns
)
4433 gcc_assert (!(f
& DO_SPECULATION
));
4434 if (f
& DO_SPECULATION
)
4435 gcc_assert (!flag_sched_stalled_insns
4436 && (f
& DETACH_LIFE_INFO
)
4438 && spec_info
->mask
);
4439 if (f
& DETACH_LIFE_INFO
)
4440 gcc_assert (f
& USE_GLAT
);
4443 /* Check global_live_at_{start, end} regsets.
4444 If FATAL_P is TRUE, then abort execution at the first failure.
4445 Otherwise, print diagnostics to STDERR (this mode is for calling
4448 check_reg_live (bool fatal_p
)
4460 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_start
,
4465 gcc_assert (!fatal_p
);
4467 fprintf (stderr
, ";; check_reg_live_at_start (%d) failed.\n", i
);
4473 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_end
,
4478 gcc_assert (!fatal_p
);
4480 fprintf (stderr
, ";; check_reg_live_at_end (%d) failed.\n", i
);
4485 #endif /* ENABLE_CHECKING */
4487 #endif /* INSN_SCHEDULING */