1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
62 2. choose insn with least contribution to register pressure,
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
129 #include "coretypes.h"
134 #include "hard-reg-set.h"
136 #include "function.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
143 #include "sched-int.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate
;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param
= 0;
166 int sched_verbose
= 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump
= 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid
;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
179 fix_sched_param (const char *param
, const char *val
)
181 if (!strcmp (param
, "verbose"))
182 sched_verbose_param
= atoi (val
);
184 warning (0, "fix_sched_param: unknown param: %s", param
);
187 struct haifa_insn_data
*h_i_d
;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx
*line_note_head
;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list
;
211 static struct spec_info_def spec_info_var
;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info
;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p
;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data
, nr_be_in_data
, nr_begin_control
, nr_be_in_control
;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset
*glat_start
, *glat_end
;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx
*bb_header
= 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block
;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery
;
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx
*insn_queue
;
281 static int q_ptr
= 0;
282 static int q_size
= 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size
;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try
;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
315 N_READY determines how many insns are on the ready list. */
325 /* The pointer to the ready list. */
326 static struct ready_list
*readyp
;
328 /* Scheduling clock. */
329 static int clock_var
;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns
;
334 static int may_trap_exp (rtx
, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
348 may_trap_exp (rtx x
, int is_store
)
357 if (code
== MEM
&& may_trap_p (x
))
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x
))
367 /* An exception-free load. */
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x
, 0)))
372 return PFREE_CANDIDATE
;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE
;
379 int i
, insn_class
= TRAP_FREE
;
381 /* Neither store nor load, check if it may cause a trap. */
384 /* Recursive step: walk the insn... */
385 fmt
= GET_RTX_FORMAT (code
);
386 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
390 int tmp_class
= may_trap_exp (XEXP (x
, i
), is_store
);
391 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
393 else if (fmt
[i
] == 'E')
396 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
398 int tmp_class
= may_trap_exp (XVECEXP (x
, i
, j
), is_store
);
399 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
400 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
404 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn
)
423 rtx pat
= PATTERN (insn
);
424 int tmp_class
= TRAP_FREE
;
425 int insn_class
= TRAP_FREE
;
428 if (GET_CODE (pat
) == PARALLEL
)
430 int i
, len
= XVECLEN (pat
, 0);
432 for (i
= len
- 1; i
>= 0; i
--)
434 code
= GET_CODE (XVECEXP (pat
, 0, i
));
438 /* Test if it is a 'store'. */
439 tmp_class
= may_trap_exp (XEXP (XVECEXP (pat
, 0, i
), 0), 1);
442 /* Test if it is a store. */
443 tmp_class
= may_trap_exp (SET_DEST (XVECEXP (pat
, 0, i
)), 1);
444 if (tmp_class
== TRAP_RISKY
)
446 /* Test if it is a load. */
448 = WORST_CLASS (tmp_class
,
449 may_trap_exp (SET_SRC (XVECEXP (pat
, 0, i
)),
454 tmp_class
= TRAP_RISKY
;
459 insn_class
= WORST_CLASS (insn_class
, tmp_class
);
460 if (insn_class
== TRAP_RISKY
|| insn_class
== IRISKY
)
466 code
= GET_CODE (pat
);
470 /* Test if it is a 'store'. */
471 tmp_class
= may_trap_exp (XEXP (pat
, 0), 1);
474 /* Test if it is a store. */
475 tmp_class
= may_trap_exp (SET_DEST (pat
), 1);
476 if (tmp_class
== TRAP_RISKY
)
478 /* Test if it is a load. */
480 WORST_CLASS (tmp_class
,
481 may_trap_exp (SET_SRC (pat
), 0));
485 tmp_class
= TRAP_RISKY
;
489 insn_class
= tmp_class
;
495 /* Forward declarations. */
497 HAIFA_INLINE
static int insn_cost1 (rtx
, enum reg_note
, rtx
, rtx
);
498 static int priority (rtx
);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx
*, int);
501 static void queue_insn (rtx
, int);
502 static int schedule_insn (rtx
);
503 static int find_set_reg_weight (rtx
);
504 static void find_insn_reg_weight (basic_block
);
505 static void find_insn_reg_weight1 (rtx
);
506 static void adjust_priority (rtx
);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
515 Before scheduling a region, a pointer to the note is added to the insn
516 that follows or precedes it. (This happens as part of the data dependence
517 computation). After scheduling an insn, the pointer contained in it is
518 used for regenerating the corresponding note (in reemit_notes).
520 (2) All other notes (e.g. INSN_DELETED): Before scheduling a block,
521 these notes are put in a list (in rm_other_notes() and
522 unlink_other_notes ()). After scheduling the block, these notes are
523 inserted at the beginning of the block (in schedule_block()). */
525 static rtx
unlink_other_notes (rtx
, rtx
);
526 static void reemit_notes (rtx
);
528 static rtx
*ready_lastpos (struct ready_list
*);
529 static void ready_add (struct ready_list
*, rtx
, bool);
530 static void ready_sort (struct ready_list
*);
531 static rtx
ready_remove_first (struct ready_list
*);
533 static void queue_to_ready (struct ready_list
*);
534 static int early_queue_to_ready (state_t
, struct ready_list
*);
536 static void debug_ready_list (struct ready_list
*);
538 static void move_insn (rtx
);
540 /* The following functions are used to implement multi-pass scheduling
541 on the first cycle. */
542 static rtx
ready_element (struct ready_list
*, int);
543 static rtx
ready_remove (struct ready_list
*, int);
544 static void ready_remove_insn (rtx
);
545 static int max_issue (struct ready_list
*, int *, int);
547 static rtx
choose_ready (struct ready_list
*);
549 static void fix_inter_tick (rtx
, rtx
);
550 static int fix_tick_ready (rtx
);
551 static void change_queue_index (rtx
, int);
552 static void resolve_dep (rtx
, rtx
);
554 /* The following functions are used to implement scheduling of data/control
555 speculative instructions. */
557 static void extend_h_i_d (void);
558 static void extend_ready (int);
559 static void extend_global (rtx
);
560 static void extend_all (rtx
);
561 static void init_h_i_d (rtx
);
562 static void generate_recovery_code (rtx
);
563 static void process_insn_depend_be_in_spec (rtx
, rtx
, ds_t
);
564 static void begin_speculative_block (rtx
);
565 static void add_to_speculative_block (rtx
);
566 static dw_t
dep_weak (ds_t
);
567 static edge
find_fallthru_edge (basic_block
);
568 static void init_before_recovery (void);
569 static basic_block
create_recovery_block (void);
570 static void create_check_block_twin (rtx
, bool);
571 static void fix_recovery_deps (basic_block
);
572 static void associate_line_notes_with_blocks (basic_block
);
573 static void change_pattern (rtx
, rtx
);
574 static int speculate_insn (rtx
, ds_t
, rtx
*);
575 static void dump_new_block_header (int, basic_block
, rtx
, rtx
);
576 static void restore_bb_notes (basic_block
);
577 static void extend_bb (basic_block
);
578 static void fix_jump_move (rtx
);
579 static void move_block_after_check (rtx
);
580 static void move_succs (VEC(edge
,gc
) **, basic_block
);
581 static void init_glat (void);
582 static void init_glat1 (basic_block
);
583 static void attach_life_info1 (basic_block
);
584 static void free_glat (void);
585 static void sched_remove_insn (rtx
);
586 static void clear_priorities (rtx
);
587 static void add_jump_dependencies (rtx
, rtx
);
588 static void calc_priorities (rtx
);
589 #ifdef ENABLE_CHECKING
590 static int has_edge_p (VEC(edge
,gc
) *, int);
591 static void check_cfg (rtx
, rtx
);
592 static void check_sched_flags (void);
595 #endif /* INSN_SCHEDULING */
597 /* Point to state used for the current scheduling pass. */
598 struct sched_info
*current_sched_info
;
600 #ifndef INSN_SCHEDULING
602 schedule_insns (void)
607 /* Working copy of frontend's sched_info variable. */
608 static struct sched_info current_sched_info_var
;
610 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
611 so that insns independent of the last scheduled insn will be preferred
612 over dependent instructions. */
614 static rtx last_scheduled_insn
;
616 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
617 This is the number of cycles between instruction issue and
618 instruction results. */
621 insn_cost (rtx insn
, rtx link
, rtx used
)
623 return insn_cost1 (insn
, used
? REG_NOTE_KIND (link
) : REG_NOTE_MAX
,
627 /* Compute cost of executing INSN given the dependence on the insn USED.
628 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
629 Otherwise, dependence between INSN and USED is assumed to be of type
630 DEP_TYPE. This function was introduced as a workaround for
631 targetm.adjust_cost hook.
632 This is the number of cycles between instruction issue and
633 instruction results. */
635 HAIFA_INLINE
static int
636 insn_cost1 (rtx insn
, enum reg_note dep_type
, rtx link
, rtx used
)
638 int cost
= INSN_COST (insn
);
642 /* A USE insn, or something else we don't need to
643 understand. We can't pass these directly to
644 result_ready_cost or insn_default_latency because it will
645 trigger a fatal error for unrecognizable insns. */
646 if (recog_memoized (insn
) < 0)
648 INSN_COST (insn
) = 0;
653 cost
= insn_default_latency (insn
);
657 INSN_COST (insn
) = cost
;
661 /* In this case estimate cost without caring how insn is used. */
665 /* A USE insn should never require the value used to be computed.
666 This allows the computation of a function's result and parameter
667 values to overlap the return and call. */
668 if (recog_memoized (used
) < 0)
672 gcc_assert (!link
|| dep_type
== REG_NOTE_KIND (link
));
674 if (INSN_CODE (insn
) >= 0)
676 if (dep_type
== REG_DEP_ANTI
)
678 else if (dep_type
== REG_DEP_OUTPUT
)
680 cost
= (insn_default_latency (insn
)
681 - insn_default_latency (used
));
685 else if (bypass_p (insn
))
686 cost
= insn_latency (insn
, used
);
689 if (targetm
.sched
.adjust_cost_2
)
690 cost
= targetm
.sched
.adjust_cost_2 (used
, (int) dep_type
, insn
, cost
);
694 if (targetm
.sched
.adjust_cost
)
695 cost
= targetm
.sched
.adjust_cost (used
, link
, insn
, cost
);
705 /* Compute the priority number for INSN. */
715 if (! INSN_PRIORITY_KNOWN (insn
))
717 int this_priority
= 0;
719 if (INSN_DEPEND (insn
) == 0)
720 this_priority
= insn_cost (insn
, 0, 0);
723 rtx prev_first
, twin
;
726 /* For recovery check instructions we calculate priority slightly
727 different than that of normal instructions. Instead of walking
728 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
729 of each instruction in the corresponding recovery block. */
731 rec
= RECOVERY_BLOCK (insn
);
732 if (!rec
|| rec
== EXIT_BLOCK_PTR
)
734 prev_first
= PREV_INSN (insn
);
739 prev_first
= NEXT_INSN (BB_HEAD (rec
));
740 twin
= PREV_INSN (BB_END (rec
));
745 for (link
= INSN_DEPEND (twin
); link
; link
= XEXP (link
, 1))
750 next
= XEXP (link
, 0);
752 if (BLOCK_FOR_INSN (next
) != rec
)
754 /* Critical path is meaningful in block boundaries
756 if (! (*current_sched_info
->contributes_to_priority
)
758 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
759 then speculative instructions will less likely be
760 scheduled. That is because the priority of
761 their producers will increase, and, thus, the
762 producers will more likely be scheduled, thus,
763 resolving the dependence. */
764 || ((current_sched_info
->flags
& DO_SPECULATION
)
765 && (DEP_STATUS (link
) & SPECULATIVE
)
766 && !(spec_info
->flags
767 & COUNT_SPEC_IN_CRITICAL_PATH
)))
770 next_priority
= insn_cost1 (insn
,
772 REG_NOTE_KIND (link
) :
774 twin
== insn
? link
: 0,
775 next
) + priority (next
);
777 if (next_priority
> this_priority
)
778 this_priority
= next_priority
;
782 twin
= PREV_INSN (twin
);
784 while (twin
!= prev_first
);
786 INSN_PRIORITY (insn
) = this_priority
;
787 INSN_PRIORITY_KNOWN (insn
) = 1;
790 return INSN_PRIORITY (insn
);
793 /* Macros and functions for keeping the priority queue sorted, and
794 dealing with queuing and dequeuing of instructions. */
796 #define SCHED_SORT(READY, N_READY) \
797 do { if ((N_READY) == 2) \
798 swap_sort (READY, N_READY); \
799 else if ((N_READY) > 2) \
800 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
803 /* Returns a positive value if x is preferred; returns a negative value if
804 y is preferred. Should never return 0, since that will make the sort
808 rank_for_schedule (const void *x
, const void *y
)
810 rtx tmp
= *(const rtx
*) y
;
811 rtx tmp2
= *(const rtx
*) x
;
813 int tmp_class
, tmp2_class
, depend_count1
, depend_count2
;
814 int val
, priority_val
, weight_val
, info_val
;
816 /* The insn in a schedule group should be issued the first. */
817 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
818 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
820 /* Prefer insn with higher priority. */
821 priority_val
= INSN_PRIORITY (tmp2
) - INSN_PRIORITY (tmp
);
826 /* Prefer speculative insn with greater dependencies weakness. */
833 ds1
= TODO_SPEC (tmp
) & SPECULATIVE
;
835 dw1
= dep_weak (ds1
);
839 ds2
= TODO_SPEC (tmp2
) & SPECULATIVE
;
841 dw2
= dep_weak (ds2
);
846 if (dw
> (NO_DEP_WEAK
/ 8) || dw
< -(NO_DEP_WEAK
/ 8))
850 /* Prefer an insn with smaller contribution to registers-pressure. */
851 if (!reload_completed
&&
852 (weight_val
= INSN_REG_WEIGHT (tmp
) - INSN_REG_WEIGHT (tmp2
)))
855 info_val
= (*current_sched_info
->rank
) (tmp
, tmp2
);
859 /* Compare insns based on their relation to the last-scheduled-insn. */
860 if (INSN_P (last_scheduled_insn
))
862 /* Classify the instructions into three classes:
863 1) Data dependent on last schedule insn.
864 2) Anti/Output dependent on last scheduled insn.
865 3) Independent of last scheduled insn, or has latency of one.
866 Choose the insn from the highest numbered class if different. */
867 link
= find_insn_list (tmp
, INSN_DEPEND (last_scheduled_insn
));
868 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp
) == 1)
870 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
875 link
= find_insn_list (tmp2
, INSN_DEPEND (last_scheduled_insn
));
876 if (link
== 0 || insn_cost (last_scheduled_insn
, link
, tmp2
) == 1)
878 else if (REG_NOTE_KIND (link
) == 0) /* Data dependence. */
883 if ((val
= tmp2_class
- tmp_class
))
887 /* Prefer the insn which has more later insns that depend on it.
888 This gives the scheduler more freedom when scheduling later
889 instructions at the expense of added register pressure. */
891 for (link
= INSN_DEPEND (tmp
); link
; link
= XEXP (link
, 1))
895 for (link
= INSN_DEPEND (tmp2
); link
; link
= XEXP (link
, 1))
898 val
= depend_count2
- depend_count1
;
902 /* If insns are equally good, sort by INSN_LUID (original insn order),
903 so that we make the sort stable. This minimizes instruction movement,
904 thus minimizing sched's effect on debugging and cross-jumping. */
905 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
908 /* Resort the array A in which only element at index N may be out of order. */
910 HAIFA_INLINE
static void
911 swap_sort (rtx
*a
, int n
)
916 while (i
>= 0 && rank_for_schedule (a
+ i
, &insn
) >= 0)
924 /* Add INSN to the insn queue so that it can be executed at least
925 N_CYCLES after the currently executing insn. Preserve insns
926 chain for debugging purposes. */
928 HAIFA_INLINE
static void
929 queue_insn (rtx insn
, int n_cycles
)
931 int next_q
= NEXT_Q_AFTER (q_ptr
, n_cycles
);
932 rtx link
= alloc_INSN_LIST (insn
, insn_queue
[next_q
]);
934 gcc_assert (n_cycles
<= max_insn_queue_index
);
936 insn_queue
[next_q
] = link
;
939 if (sched_verbose
>= 2)
941 fprintf (sched_dump
, ";;\t\tReady-->Q: insn %s: ",
942 (*current_sched_info
->print_insn
) (insn
, 0));
944 fprintf (sched_dump
, "queued for %d cycles.\n", n_cycles
);
947 QUEUE_INDEX (insn
) = next_q
;
950 /* Remove INSN from queue. */
952 queue_remove (rtx insn
)
954 gcc_assert (QUEUE_INDEX (insn
) >= 0);
955 remove_free_INSN_LIST_elem (insn
, &insn_queue
[QUEUE_INDEX (insn
)]);
957 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
960 /* Return a pointer to the bottom of the ready list, i.e. the insn
961 with the lowest priority. */
963 HAIFA_INLINE
static rtx
*
964 ready_lastpos (struct ready_list
*ready
)
966 gcc_assert (ready
->n_ready
>= 1);
967 return ready
->vec
+ ready
->first
- ready
->n_ready
+ 1;
970 /* Add an element INSN to the ready list so that it ends up with the
971 lowest/highest priority depending on FIRST_P. */
973 HAIFA_INLINE
static void
974 ready_add (struct ready_list
*ready
, rtx insn
, bool first_p
)
978 if (ready
->first
== ready
->n_ready
)
980 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
,
981 ready_lastpos (ready
),
982 ready
->n_ready
* sizeof (rtx
));
983 ready
->first
= ready
->veclen
- 1;
985 ready
->vec
[ready
->first
- ready
->n_ready
] = insn
;
989 if (ready
->first
== ready
->veclen
- 1)
992 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
993 memmove (ready
->vec
+ ready
->veclen
- ready
->n_ready
- 1,
994 ready_lastpos (ready
),
995 ready
->n_ready
* sizeof (rtx
));
996 ready
->first
= ready
->veclen
- 2;
998 ready
->vec
[++(ready
->first
)] = insn
;
1003 gcc_assert (QUEUE_INDEX (insn
) != QUEUE_READY
);
1004 QUEUE_INDEX (insn
) = QUEUE_READY
;
1007 /* Remove the element with the highest priority from the ready list and
1010 HAIFA_INLINE
static rtx
1011 ready_remove_first (struct ready_list
*ready
)
1015 gcc_assert (ready
->n_ready
);
1016 t
= ready
->vec
[ready
->first
--];
1018 /* If the queue becomes empty, reset it. */
1019 if (ready
->n_ready
== 0)
1020 ready
->first
= ready
->veclen
- 1;
1022 gcc_assert (QUEUE_INDEX (t
) == QUEUE_READY
);
1023 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1028 /* The following code implements multi-pass scheduling for the first
1029 cycle. In other words, we will try to choose ready insn which
1030 permits to start maximum number of insns on the same cycle. */
1032 /* Return a pointer to the element INDEX from the ready. INDEX for
1033 insn with the highest priority is 0, and the lowest priority has
1036 HAIFA_INLINE
static rtx
1037 ready_element (struct ready_list
*ready
, int index
)
1039 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1041 return ready
->vec
[ready
->first
- index
];
1044 /* Remove the element INDEX from the ready list and return it. INDEX
1045 for insn with the highest priority is 0, and the lowest priority
1048 HAIFA_INLINE
static rtx
1049 ready_remove (struct ready_list
*ready
, int index
)
1055 return ready_remove_first (ready
);
1056 gcc_assert (ready
->n_ready
&& index
< ready
->n_ready
);
1057 t
= ready
->vec
[ready
->first
- index
];
1059 for (i
= index
; i
< ready
->n_ready
; i
++)
1060 ready
->vec
[ready
->first
- i
] = ready
->vec
[ready
->first
- i
- 1];
1061 QUEUE_INDEX (t
) = QUEUE_NOWHERE
;
1065 /* Remove INSN from the ready list. */
1067 ready_remove_insn (rtx insn
)
1071 for (i
= 0; i
< readyp
->n_ready
; i
++)
1072 if (ready_element (readyp
, i
) == insn
)
1074 ready_remove (readyp
, i
);
1080 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1083 HAIFA_INLINE
static void
1084 ready_sort (struct ready_list
*ready
)
1086 rtx
*first
= ready_lastpos (ready
);
1087 SCHED_SORT (first
, ready
->n_ready
);
1090 /* PREV is an insn that is ready to execute. Adjust its priority if that
1091 will help shorten or lengthen register lifetimes as appropriate. Also
1092 provide a hook for the target to tweek itself. */
1094 HAIFA_INLINE
static void
1095 adjust_priority (rtx prev
)
1097 /* ??? There used to be code here to try and estimate how an insn
1098 affected register lifetimes, but it did it by looking at REG_DEAD
1099 notes, which we removed in schedule_region. Nor did it try to
1100 take into account register pressure or anything useful like that.
1102 Revisit when we have a machine model to work with and not before. */
1104 if (targetm
.sched
.adjust_priority
)
1105 INSN_PRIORITY (prev
) =
1106 targetm
.sched
.adjust_priority (prev
, INSN_PRIORITY (prev
));
1109 /* Advance time on one cycle. */
1110 HAIFA_INLINE
static void
1111 advance_one_cycle (void)
1113 if (targetm
.sched
.dfa_pre_cycle_insn
)
1114 state_transition (curr_state
,
1115 targetm
.sched
.dfa_pre_cycle_insn ());
1117 state_transition (curr_state
, NULL
);
1119 if (targetm
.sched
.dfa_post_cycle_insn
)
1120 state_transition (curr_state
,
1121 targetm
.sched
.dfa_post_cycle_insn ());
1124 /* Clock at which the previous instruction was issued. */
1125 static int last_clock_var
;
1127 /* INSN is the "currently executing insn". Launch each insn which was
1128 waiting on INSN. READY is the ready list which contains the insns
1129 that are ready to fire. CLOCK is the current cycle. The function
1130 returns necessary cycle advance after issuing the insn (it is not
1131 zero for insns in a schedule group). */
1134 schedule_insn (rtx insn
)
1139 if (sched_verbose
>= 1)
1143 print_insn (buf
, insn
, 0);
1145 fprintf (sched_dump
, ";;\t%3i--> %-40s:", clock_var
, buf
);
1147 if (recog_memoized (insn
) < 0)
1148 fprintf (sched_dump
, "nothing");
1150 print_reservation (sched_dump
, insn
);
1151 fputc ('\n', sched_dump
);
1154 /* Scheduling instruction should have all its dependencies resolved and
1155 should have been removed from the ready list. */
1156 gcc_assert (INSN_DEP_COUNT (insn
) == 0);
1157 gcc_assert (!LOG_LINKS (insn
));
1158 gcc_assert (QUEUE_INDEX (insn
) == QUEUE_NOWHERE
);
1160 QUEUE_INDEX (insn
) = QUEUE_SCHEDULED
;
1162 /* Now we can free RESOLVED_DEPS list. */
1163 if (current_sched_info
->flags
& USE_DEPS_LIST
)
1164 free_DEPS_LIST_list (&RESOLVED_DEPS (insn
));
1166 free_INSN_LIST_list (&RESOLVED_DEPS (insn
));
1168 gcc_assert (INSN_TICK (insn
) >= MIN_TICK
);
1169 if (INSN_TICK (insn
) > clock_var
)
1170 /* INSN has been prematurely moved from the queue to the ready list.
1171 This is possible only if following flag is set. */
1172 gcc_assert (flag_sched_stalled_insns
);
1174 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1175 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1176 INSN_TICK (insn
) = clock_var
;
1178 /* Update dependent instructions. */
1179 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
1181 rtx next
= XEXP (link
, 0);
1183 resolve_dep (next
, insn
);
1185 if (!IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1189 effective_cost
= try_ready (next
);
1191 if (effective_cost
>= 0
1192 && SCHED_GROUP_P (next
)
1193 && advance
< effective_cost
)
1194 advance
= effective_cost
;
1197 /* Check always has only one forward dependence (to the first insn in
1198 the recovery block), therefore, this will be executed only once. */
1200 gcc_assert (XEXP (link
, 1) == 0);
1201 fix_recovery_deps (RECOVERY_BLOCK (insn
));
1205 /* Annotate the instruction with issue information -- TImode
1206 indicates that the instruction is expected not to be able
1207 to issue on the same cycle as the previous insn. A machine
1208 may use this information to decide how the instruction should
1211 && GET_CODE (PATTERN (insn
)) != USE
1212 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
1214 if (reload_completed
)
1215 PUT_MODE (insn
, clock_var
> last_clock_var
? TImode
: VOIDmode
);
1216 last_clock_var
= clock_var
;
1222 /* Functions for handling of notes. */
1224 /* Delete notes beginning with INSN and put them in the chain
1225 of notes ended by NOTE_LIST.
1226 Returns the insn following the notes. */
1229 unlink_other_notes (rtx insn
, rtx tail
)
1231 rtx prev
= PREV_INSN (insn
);
1233 while (insn
!= tail
&& NOTE_NOT_BB_P (insn
))
1235 rtx next
= NEXT_INSN (insn
);
1236 basic_block bb
= BLOCK_FOR_INSN (insn
);
1238 /* Delete the note from its current position. */
1240 NEXT_INSN (prev
) = next
;
1242 PREV_INSN (next
) = prev
;
1246 /* Basic block can begin with either LABEL or
1247 NOTE_INSN_BASIC_BLOCK. */
1248 gcc_assert (BB_HEAD (bb
) != insn
);
1250 /* Check if we are removing last insn in the BB. */
1251 if (BB_END (bb
) == insn
)
1255 /* See sched_analyze to see how these are handled. */
1256 if (NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_BEG
1257 && NOTE_LINE_NUMBER (insn
) != NOTE_INSN_EH_REGION_END
)
1259 /* Insert the note at the end of the notes list. */
1260 PREV_INSN (insn
) = note_list
;
1262 NEXT_INSN (note_list
) = insn
;
1271 /* Return the head and tail pointers of ebb starting at BEG and ending
1275 get_ebb_head_tail (basic_block beg
, basic_block end
, rtx
*headp
, rtx
*tailp
)
1277 rtx beg_head
= BB_HEAD (beg
);
1278 rtx beg_tail
= BB_END (beg
);
1279 rtx end_head
= BB_HEAD (end
);
1280 rtx end_tail
= BB_END (end
);
1282 /* Don't include any notes or labels at the beginning of the BEG
1283 basic block, or notes at the end of the END basic blocks. */
1285 if (LABEL_P (beg_head
))
1286 beg_head
= NEXT_INSN (beg_head
);
1288 while (beg_head
!= beg_tail
)
1289 if (NOTE_P (beg_head
))
1290 beg_head
= NEXT_INSN (beg_head
);
1297 end_head
= beg_head
;
1298 else if (LABEL_P (end_head
))
1299 end_head
= NEXT_INSN (end_head
);
1301 while (end_head
!= end_tail
)
1302 if (NOTE_P (end_tail
))
1303 end_tail
= PREV_INSN (end_tail
);
1310 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1313 no_real_insns_p (rtx head
, rtx tail
)
1315 while (head
!= NEXT_INSN (tail
))
1317 if (!NOTE_P (head
) && !LABEL_P (head
))
1319 head
= NEXT_INSN (head
);
1324 /* Save line number notes for each insn in block B. HEAD and TAIL are
1325 the boundaries of the block in which notes should be processed. */
1328 save_line_notes (int b
, rtx head
, rtx tail
)
1332 /* We must use the true line number for the first insn in the block
1333 that was computed and saved at the start of this pass. We can't
1334 use the current line number, because scheduling of the previous
1335 block may have changed the current line number. */
1337 rtx line
= line_note_head
[b
];
1340 next_tail
= NEXT_INSN (tail
);
1342 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1343 LINE_NOTE (insn
) = line
;
1346 /* After a block was scheduled, insert line notes into the insns list.
1347 HEAD and TAIL are the boundaries of the block in which notes should
1351 restore_line_notes (rtx head
, rtx tail
)
1353 rtx line
, note
, prev
, new;
1354 int added_notes
= 0;
1355 rtx next_tail
, insn
;
1358 next_tail
= NEXT_INSN (tail
);
1360 /* Determine the current line-number. We want to know the current
1361 line number of the first insn of the block here, in case it is
1362 different from the true line number that was saved earlier. If
1363 different, then we need a line number note before the first insn
1364 of this block. If it happens to be the same, then we don't want to
1365 emit another line number note here. */
1366 for (line
= head
; line
; line
= PREV_INSN (line
))
1367 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
1370 /* Walk the insns keeping track of the current line-number and inserting
1371 the line-number notes as needed. */
1372 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1373 if (NOTE_P (insn
) && NOTE_LINE_NUMBER (insn
) > 0)
1375 /* This used to emit line number notes before every non-deleted note.
1376 However, this confuses a debugger, because line notes not separated
1377 by real instructions all end up at the same address. I can find no
1378 use for line number notes before other notes, so none are emitted. */
1379 else if (!NOTE_P (insn
)
1380 && INSN_UID (insn
) < old_max_uid
1381 && (note
= LINE_NOTE (insn
)) != 0
1384 #ifdef USE_MAPPED_LOCATION
1385 || NOTE_SOURCE_LOCATION (note
) != NOTE_SOURCE_LOCATION (line
)
1387 || NOTE_LINE_NUMBER (note
) != NOTE_LINE_NUMBER (line
)
1388 || NOTE_SOURCE_FILE (note
) != NOTE_SOURCE_FILE (line
)
1393 prev
= PREV_INSN (insn
);
1394 if (LINE_NOTE (note
))
1396 /* Re-use the original line-number note. */
1397 LINE_NOTE (note
) = 0;
1398 PREV_INSN (note
) = prev
;
1399 NEXT_INSN (prev
) = note
;
1400 PREV_INSN (insn
) = note
;
1401 NEXT_INSN (note
) = insn
;
1402 set_block_for_insn (note
, BLOCK_FOR_INSN (insn
));
1407 new = emit_note_after (NOTE_LINE_NUMBER (note
), prev
);
1408 #ifndef USE_MAPPED_LOCATION
1409 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note
);
1413 if (sched_verbose
&& added_notes
)
1414 fprintf (sched_dump
, ";; added %d line-number notes\n", added_notes
);
1417 /* Delete notes between HEAD and TAIL and put them in the chain
1418 of notes ended by NOTE_LIST. */
1421 rm_other_notes (rtx head
, rtx tail
)
1427 if (head
== tail
&& (! INSN_P (head
)))
1430 next_tail
= NEXT_INSN (tail
);
1431 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1435 /* Farm out notes, and maybe save them in NOTE_LIST.
1436 This is needed to keep the debugger from
1437 getting completely deranged. */
1438 if (NOTE_NOT_BB_P (insn
))
1442 insn
= unlink_other_notes (insn
, next_tail
);
1444 gcc_assert (prev
!= tail
&& prev
!= head
&& insn
!= next_tail
);
1449 /* Functions for computation of registers live/usage info. */
1451 /* This function looks for a new register being defined.
1452 If the destination register is already used by the source,
1453 a new register is not needed. */
1456 find_set_reg_weight (rtx x
)
1458 if (GET_CODE (x
) == CLOBBER
1459 && register_operand (SET_DEST (x
), VOIDmode
))
1461 if (GET_CODE (x
) == SET
1462 && register_operand (SET_DEST (x
), VOIDmode
))
1464 if (REG_P (SET_DEST (x
)))
1466 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
1476 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1479 find_insn_reg_weight (basic_block bb
)
1481 rtx insn
, next_tail
, head
, tail
;
1483 get_ebb_head_tail (bb
, bb
, &head
, &tail
);
1484 next_tail
= NEXT_INSN (tail
);
1486 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1487 find_insn_reg_weight1 (insn
);
1490 /* Calculate INSN_REG_WEIGHT for single instruction.
1491 Separated from find_insn_reg_weight because of need
1492 to initialize new instruction in generate_recovery_code. */
1494 find_insn_reg_weight1 (rtx insn
)
1499 /* Handle register life information. */
1500 if (! INSN_P (insn
))
1503 /* Increment weight for each register born here. */
1505 reg_weight
+= find_set_reg_weight (x
);
1506 if (GET_CODE (x
) == PARALLEL
)
1509 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
1511 x
= XVECEXP (PATTERN (insn
), 0, j
);
1512 reg_weight
+= find_set_reg_weight (x
);
1515 /* Decrement weight for each register that dies here. */
1516 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
1518 if (REG_NOTE_KIND (x
) == REG_DEAD
1519 || REG_NOTE_KIND (x
) == REG_UNUSED
)
1523 INSN_REG_WEIGHT (insn
) = reg_weight
;
1526 /* Move insns that became ready to fire from queue to ready list. */
1529 queue_to_ready (struct ready_list
*ready
)
1534 q_ptr
= NEXT_Q (q_ptr
);
1536 /* Add all pending insns that can be scheduled without stalls to the
1538 for (link
= insn_queue
[q_ptr
]; link
; link
= XEXP (link
, 1))
1540 insn
= XEXP (link
, 0);
1543 if (sched_verbose
>= 2)
1544 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1545 (*current_sched_info
->print_insn
) (insn
, 0));
1547 /* If the ready list is full, delay the insn for 1 cycle.
1548 See the comment in schedule_block for the rationale. */
1549 if (!reload_completed
1550 && ready
->n_ready
> MAX_SCHED_READY_INSNS
1551 && !SCHED_GROUP_P (insn
))
1553 if (sched_verbose
>= 2)
1554 fprintf (sched_dump
, "requeued because ready full\n");
1555 queue_insn (insn
, 1);
1559 ready_add (ready
, insn
, false);
1560 if (sched_verbose
>= 2)
1561 fprintf (sched_dump
, "moving to ready without stalls\n");
1564 free_INSN_LIST_list (&insn_queue
[q_ptr
]);
1566 /* If there are no ready insns, stall until one is ready and add all
1567 of the pending insns at that point to the ready list. */
1568 if (ready
->n_ready
== 0)
1572 for (stalls
= 1; stalls
<= max_insn_queue_index
; stalls
++)
1574 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1576 for (; link
; link
= XEXP (link
, 1))
1578 insn
= XEXP (link
, 0);
1581 if (sched_verbose
>= 2)
1582 fprintf (sched_dump
, ";;\t\tQ-->Ready: insn %s: ",
1583 (*current_sched_info
->print_insn
) (insn
, 0));
1585 ready_add (ready
, insn
, false);
1586 if (sched_verbose
>= 2)
1587 fprintf (sched_dump
, "moving to ready with %d stalls\n", stalls
);
1589 free_INSN_LIST_list (&insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]);
1591 advance_one_cycle ();
1596 advance_one_cycle ();
1599 q_ptr
= NEXT_Q_AFTER (q_ptr
, stalls
);
1600 clock_var
+= stalls
;
1604 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1605 prematurely move INSN from the queue to the ready list. Currently,
1606 if a target defines the hook 'is_costly_dependence', this function
1607 uses the hook to check whether there exist any dependences which are
1608 considered costly by the target, between INSN and other insns that
1609 have already been scheduled. Dependences are checked up to Y cycles
1610 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1611 controlling this value.
1612 (Other considerations could be taken into account instead (or in
1613 addition) depending on user flags and target hooks. */
1616 ok_for_early_queue_removal (rtx insn
)
1619 rtx prev_insn
= last_scheduled_insn
;
1621 if (targetm
.sched
.is_costly_dependence
)
1623 for (n_cycles
= flag_sched_stalled_insns_dep
; n_cycles
; n_cycles
--)
1625 for ( ; prev_insn
; prev_insn
= PREV_INSN (prev_insn
))
1630 if (!NOTE_P (prev_insn
))
1632 dep_link
= find_insn_list (insn
, INSN_DEPEND (prev_insn
));
1635 dep_cost
= insn_cost (prev_insn
, dep_link
, insn
) ;
1636 if (targetm
.sched
.is_costly_dependence (prev_insn
, insn
,
1638 flag_sched_stalled_insns_dep
- n_cycles
))
1643 if (GET_MODE (prev_insn
) == TImode
) /* end of dispatch group */
1649 prev_insn
= PREV_INSN (prev_insn
);
1657 /* Remove insns from the queue, before they become "ready" with respect
1658 to FU latency considerations. */
1661 early_queue_to_ready (state_t state
, struct ready_list
*ready
)
1669 state_t temp_state
= alloca (dfa_state_size
);
1671 int insns_removed
= 0;
1674 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1677 X == 0: There is no limit on how many queued insns can be removed
1678 prematurely. (flag_sched_stalled_insns = -1).
1680 X >= 1: Only X queued insns can be removed prematurely in each
1681 invocation. (flag_sched_stalled_insns = X).
1683 Otherwise: Early queue removal is disabled.
1684 (flag_sched_stalled_insns = 0)
1687 if (! flag_sched_stalled_insns
)
1690 for (stalls
= 0; stalls
<= max_insn_queue_index
; stalls
++)
1692 if ((link
= insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)]))
1694 if (sched_verbose
> 6)
1695 fprintf (sched_dump
, ";; look at index %d + %d\n", q_ptr
, stalls
);
1700 next_link
= XEXP (link
, 1);
1701 insn
= XEXP (link
, 0);
1702 if (insn
&& sched_verbose
> 6)
1703 print_rtl_single (sched_dump
, insn
);
1705 memcpy (temp_state
, state
, dfa_state_size
);
1706 if (recog_memoized (insn
) < 0)
1707 /* non-negative to indicate that it's not ready
1708 to avoid infinite Q->R->Q->R... */
1711 cost
= state_transition (temp_state
, insn
);
1713 if (sched_verbose
>= 6)
1714 fprintf (sched_dump
, "transition cost = %d\n", cost
);
1716 move_to_ready
= false;
1719 move_to_ready
= ok_for_early_queue_removal (insn
);
1720 if (move_to_ready
== true)
1722 /* move from Q to R */
1724 ready_add (ready
, insn
, false);
1727 XEXP (prev_link
, 1) = next_link
;
1729 insn_queue
[NEXT_Q_AFTER (q_ptr
, stalls
)] = next_link
;
1731 free_INSN_LIST_node (link
);
1733 if (sched_verbose
>= 2)
1734 fprintf (sched_dump
, ";;\t\tEarly Q-->Ready: insn %s\n",
1735 (*current_sched_info
->print_insn
) (insn
, 0));
1738 if (insns_removed
== flag_sched_stalled_insns
)
1739 /* Remove no more than flag_sched_stalled_insns insns
1740 from Q at a time. */
1741 return insns_removed
;
1745 if (move_to_ready
== false)
1752 } /* for stalls.. */
1754 return insns_removed
;
1758 /* Print the ready list for debugging purposes. Callable from debugger. */
1761 debug_ready_list (struct ready_list
*ready
)
1766 if (ready
->n_ready
== 0)
1768 fprintf (sched_dump
, "\n");
1772 p
= ready_lastpos (ready
);
1773 for (i
= 0; i
< ready
->n_ready
; i
++)
1774 fprintf (sched_dump
, " %s", (*current_sched_info
->print_insn
) (p
[i
], 0));
1775 fprintf (sched_dump
, "\n");
1778 /* Search INSN for REG_SAVE_NOTE note pairs for
1779 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1780 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1781 saved value for NOTE_BLOCK_NUMBER which is useful for
1782 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1785 reemit_notes (rtx insn
)
1787 rtx note
, last
= insn
;
1789 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
1791 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
1793 enum insn_note note_type
= INTVAL (XEXP (note
, 0));
1795 last
= emit_note_before (note_type
, last
);
1796 remove_note (insn
, note
);
1801 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1803 move_insn (rtx insn
)
1805 rtx last
= last_scheduled_insn
;
1807 if (PREV_INSN (insn
) != last
)
1813 bb
= BLOCK_FOR_INSN (insn
);
1815 /* BB_HEAD is either LABEL or NOTE. */
1816 gcc_assert (BB_HEAD (bb
) != insn
);
1818 if (BB_END (bb
) == insn
)
1819 /* If this is last instruction in BB, move end marker one
1822 /* Jumps are always placed at the end of basic block. */
1823 jump_p
= control_flow_insn_p (insn
);
1826 || ((current_sched_info
->flags
& SCHED_RGN
)
1827 && IS_SPECULATION_BRANCHY_CHECK_P (insn
))
1828 || (current_sched_info
->flags
& SCHED_EBB
));
1830 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn
)) == bb
);
1832 BB_END (bb
) = PREV_INSN (insn
);
1835 gcc_assert (BB_END (bb
) != last
);
1838 /* We move the block note along with jump. */
1840 /* NT is needed for assertion below. */
1841 rtx nt
= current_sched_info
->next_tail
;
1843 note
= NEXT_INSN (insn
);
1844 while (NOTE_NOT_BB_P (note
) && note
!= nt
)
1845 note
= NEXT_INSN (note
);
1849 || BARRIER_P (note
)))
1850 note
= NEXT_INSN (note
);
1852 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
1857 NEXT_INSN (PREV_INSN (insn
)) = NEXT_INSN (note
);
1858 PREV_INSN (NEXT_INSN (note
)) = PREV_INSN (insn
);
1860 NEXT_INSN (note
) = NEXT_INSN (last
);
1861 PREV_INSN (NEXT_INSN (last
)) = note
;
1863 NEXT_INSN (last
) = insn
;
1864 PREV_INSN (insn
) = last
;
1866 bb
= BLOCK_FOR_INSN (last
);
1870 fix_jump_move (insn
);
1872 if (BLOCK_FOR_INSN (insn
) != bb
)
1873 move_block_after_check (insn
);
1875 gcc_assert (BB_END (bb
) == last
);
1878 set_block_for_insn (insn
, bb
);
1880 /* Update BB_END, if needed. */
1881 if (BB_END (bb
) == last
)
1885 reemit_notes (insn
);
1887 SCHED_GROUP_P (insn
) = 0;
1890 /* The following structure describe an entry of the stack of choices. */
1893 /* Ordinal number of the issued insn in the ready queue. */
1895 /* The number of the rest insns whose issues we should try. */
1897 /* The number of issued essential insns. */
1899 /* State after issuing the insn. */
1903 /* The following array is used to implement a stack of choices used in
1904 function max_issue. */
1905 static struct choice_entry
*choice_stack
;
1907 /* The following variable value is number of essential insns issued on
1908 the current cycle. An insn is essential one if it changes the
1909 processors state. */
1910 static int cycle_issued_insns
;
1912 /* The following variable value is maximal number of tries of issuing
1913 insns for the first cycle multipass insn scheduling. We define
1914 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
1915 need this constraint if all real insns (with non-negative codes)
1916 had reservations because in this case the algorithm complexity is
1917 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
1918 might be incomplete and such insn might occur. For such
1919 descriptions, the complexity of algorithm (without the constraint)
1920 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
1921 static int max_lookahead_tries
;
1923 /* The following value is value of hook
1924 `first_cycle_multipass_dfa_lookahead' at the last call of
1926 static int cached_first_cycle_multipass_dfa_lookahead
= 0;
1928 /* The following value is value of `issue_rate' at the last call of
1930 static int cached_issue_rate
= 0;
1932 /* The following function returns maximal (or close to maximal) number
1933 of insns which can be issued on the same cycle and one of which
1934 insns is insns with the best rank (the first insn in READY). To
1935 make this function tries different samples of ready insns. READY
1936 is current queue `ready'. Global array READY_TRY reflects what
1937 insns are already issued in this try. MAX_POINTS is the sum of points
1938 of all instructions in READY. The function stops immediately,
1939 if it reached the such a solution, that all instruction can be issued.
1940 INDEX will contain index of the best insn in READY. The following
1941 function is used only for first cycle multipass scheduling. */
1943 max_issue (struct ready_list
*ready
, int *index
, int max_points
)
1945 int n
, i
, all
, n_ready
, best
, delay
, tries_num
, points
= -1;
1946 struct choice_entry
*top
;
1950 memcpy (choice_stack
->state
, curr_state
, dfa_state_size
);
1952 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
1954 n_ready
= ready
->n_ready
;
1955 for (all
= i
= 0; i
< n_ready
; i
++)
1962 if (top
->rest
== 0 || i
>= n_ready
)
1964 if (top
== choice_stack
)
1966 if (best
< top
- choice_stack
&& ready_try
[0])
1968 best
= top
- choice_stack
;
1969 *index
= choice_stack
[1].index
;
1971 if (top
->n
== max_points
|| best
== all
)
1977 memcpy (curr_state
, top
->state
, dfa_state_size
);
1979 else if (!ready_try
[i
])
1982 if (tries_num
> max_lookahead_tries
)
1984 insn
= ready_element (ready
, i
);
1985 delay
= state_transition (curr_state
, insn
);
1988 if (state_dead_lock_p (curr_state
))
1993 if (memcmp (top
->state
, curr_state
, dfa_state_size
) != 0)
1994 n
+= ISSUE_POINTS (insn
);
1996 top
->rest
= cached_first_cycle_multipass_dfa_lookahead
;
1999 memcpy (top
->state
, curr_state
, dfa_state_size
);
2006 while (top
!= choice_stack
)
2008 ready_try
[top
->index
] = 0;
2011 memcpy (curr_state
, choice_stack
->state
, dfa_state_size
);
2013 if (sched_verbose
>= 4)
2014 fprintf (sched_dump
, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2015 (*current_sched_info
->print_insn
) (ready_element (ready
, *index
),
2017 points
, max_points
);
2022 /* The following function chooses insn from READY and modifies
2023 *N_READY and READY. The following function is used only for first
2024 cycle multipass scheduling. */
2027 choose_ready (struct ready_list
*ready
)
2031 if (targetm
.sched
.first_cycle_multipass_dfa_lookahead
)
2032 lookahead
= targetm
.sched
.first_cycle_multipass_dfa_lookahead ();
2033 if (lookahead
<= 0 || SCHED_GROUP_P (ready_element (ready
, 0)))
2034 return ready_remove_first (ready
);
2037 /* Try to choose the better insn. */
2038 int index
= 0, i
, n
;
2040 int more_issue
, max_points
, try_data
= 1, try_control
= 1;
2042 if (cached_first_cycle_multipass_dfa_lookahead
!= lookahead
)
2044 cached_first_cycle_multipass_dfa_lookahead
= lookahead
;
2045 max_lookahead_tries
= 100;
2046 for (i
= 0; i
< issue_rate
; i
++)
2047 max_lookahead_tries
*= lookahead
;
2049 insn
= ready_element (ready
, 0);
2050 if (INSN_CODE (insn
) < 0)
2051 return ready_remove_first (ready
);
2054 && spec_info
->flags
& (PREFER_NON_DATA_SPEC
2055 | PREFER_NON_CONTROL_SPEC
))
2057 for (i
= 0, n
= ready
->n_ready
; i
< n
; i
++)
2062 x
= ready_element (ready
, i
);
2065 if (spec_info
->flags
& PREFER_NON_DATA_SPEC
2066 && !(s
& DATA_SPEC
))
2069 if (!(spec_info
->flags
& PREFER_NON_CONTROL_SPEC
)
2074 if (spec_info
->flags
& PREFER_NON_CONTROL_SPEC
2075 && !(s
& CONTROL_SPEC
))
2078 if (!(spec_info
->flags
& PREFER_NON_DATA_SPEC
) || !try_data
)
2084 if ((!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2085 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
2086 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
2087 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard_spec
2089 /* Discard speculative instruction that stands first in the ready
2092 change_queue_index (insn
, 1);
2096 max_points
= ISSUE_POINTS (insn
);
2097 more_issue
= issue_rate
- cycle_issued_insns
- 1;
2099 for (i
= 1; i
< ready
->n_ready
; i
++)
2101 insn
= ready_element (ready
, i
);
2103 = (INSN_CODE (insn
) < 0
2104 || (!try_data
&& (TODO_SPEC (insn
) & DATA_SPEC
))
2105 || (!try_control
&& (TODO_SPEC (insn
) & CONTROL_SPEC
))
2106 || (targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2107 && !targetm
.sched
.first_cycle_multipass_dfa_lookahead_guard
2110 if (!ready_try
[i
] && more_issue
-- > 0)
2111 max_points
+= ISSUE_POINTS (insn
);
2114 if (max_issue (ready
, &index
, max_points
) == 0)
2115 return ready_remove_first (ready
);
2117 return ready_remove (ready
, index
);
2121 /* Use forward list scheduling to rearrange insns of block pointed to by
2122 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2126 schedule_block (basic_block
*target_bb
, int rgn_n_insns1
)
2128 struct ready_list ready
;
2129 int i
, first_cycle_insn_p
;
2131 state_t temp_state
= NULL
; /* It is used for multipass scheduling. */
2132 int sort_p
, advance
, start_clock_var
;
2134 /* Head/tail info for this block. */
2135 rtx prev_head
= current_sched_info
->prev_head
;
2136 rtx next_tail
= current_sched_info
->next_tail
;
2137 rtx head
= NEXT_INSN (prev_head
);
2138 rtx tail
= PREV_INSN (next_tail
);
2140 /* We used to have code to avoid getting parameters moved from hard
2141 argument registers into pseudos.
2143 However, it was removed when it proved to be of marginal benefit
2144 and caused problems because schedule_block and compute_forward_dependences
2145 had different notions of what the "head" insn was. */
2147 gcc_assert (head
!= tail
|| INSN_P (head
));
2149 added_recovery_block_p
= false;
2153 dump_new_block_header (0, *target_bb
, head
, tail
);
2155 state_reset (curr_state
);
2157 /* Allocate the ready list. */
2161 choice_stack
= NULL
;
2164 extend_ready (rgn_n_insns1
+ 1);
2166 ready
.first
= ready
.veclen
- 1;
2169 /* It is used for first cycle multipass scheduling. */
2170 temp_state
= alloca (dfa_state_size
);
2172 if (targetm
.sched
.md_init
)
2173 targetm
.sched
.md_init (sched_dump
, sched_verbose
, ready
.veclen
);
2175 /* We start inserting insns after PREV_HEAD. */
2176 last_scheduled_insn
= prev_head
;
2178 gcc_assert (NOTE_P (last_scheduled_insn
)
2179 && BLOCK_FOR_INSN (last_scheduled_insn
) == *target_bb
);
2181 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2186 insn_queue
= alloca ((max_insn_queue_index
+ 1) * sizeof (rtx
));
2187 memset (insn_queue
, 0, (max_insn_queue_index
+ 1) * sizeof (rtx
));
2189 /* Start just before the beginning of time. */
2192 /* We need queue and ready lists and clock_var be initialized
2193 in try_ready () (which is called through init_ready_list ()). */
2194 (*current_sched_info
->init_ready_list
) ();
2196 /* The algorithm is O(n^2) in the number of ready insns at any given
2197 time in the worst case. Before reload we are more likely to have
2198 big lists so truncate them to a reasonable size. */
2199 if (!reload_completed
&& ready
.n_ready
> MAX_SCHED_READY_INSNS
)
2201 ready_sort (&ready
);
2203 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2204 for (i
= MAX_SCHED_READY_INSNS
; i
< ready
.n_ready
; i
++)
2205 if (!SCHED_GROUP_P (ready_element (&ready
, i
)))
2208 if (sched_verbose
>= 2)
2210 fprintf (sched_dump
,
2211 ";;\t\tReady list on entry: %d insns\n", ready
.n_ready
);
2212 fprintf (sched_dump
,
2213 ";;\t\t before reload => truncated to %d insns\n", i
);
2216 /* Delay all insns past it for 1 cycle. */
2217 while (i
< ready
.n_ready
)
2218 queue_insn (ready_remove (&ready
, i
), 1);
2221 /* Now we can restore basic block notes and maintain precise cfg. */
2222 restore_bb_notes (*target_bb
);
2224 last_clock_var
= -1;
2229 /* Loop until all the insns in BB are scheduled. */
2230 while ((*current_sched_info
->schedule_more_p
) ())
2234 start_clock_var
= clock_var
;
2238 advance_one_cycle ();
2240 /* Add to the ready list all pending insns that can be issued now.
2241 If there are no ready insns, increment clock until one
2242 is ready and add all pending insns at that point to the ready
2244 queue_to_ready (&ready
);
2246 gcc_assert (ready
.n_ready
);
2248 if (sched_verbose
>= 2)
2250 fprintf (sched_dump
, ";;\t\tReady list after queue_to_ready: ");
2251 debug_ready_list (&ready
);
2253 advance
-= clock_var
- start_clock_var
;
2255 while (advance
> 0);
2259 /* Sort the ready list based on priority. */
2260 ready_sort (&ready
);
2262 if (sched_verbose
>= 2)
2264 fprintf (sched_dump
, ";;\t\tReady list after ready_sort: ");
2265 debug_ready_list (&ready
);
2269 /* Allow the target to reorder the list, typically for
2270 better instruction bundling. */
2271 if (sort_p
&& targetm
.sched
.reorder
2272 && (ready
.n_ready
== 0
2273 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2275 targetm
.sched
.reorder (sched_dump
, sched_verbose
,
2276 ready_lastpos (&ready
),
2277 &ready
.n_ready
, clock_var
);
2279 can_issue_more
= issue_rate
;
2281 first_cycle_insn_p
= 1;
2282 cycle_issued_insns
= 0;
2289 if (sched_verbose
>= 2)
2291 fprintf (sched_dump
, ";;\tReady list (t = %3d): ",
2293 debug_ready_list (&ready
);
2296 if (ready
.n_ready
== 0
2298 && reload_completed
)
2300 /* Allow scheduling insns directly from the queue in case
2301 there's nothing better to do (ready list is empty) but
2302 there are still vacant dispatch slots in the current cycle. */
2303 if (sched_verbose
>= 6)
2304 fprintf(sched_dump
,";;\t\tSecond chance\n");
2305 memcpy (temp_state
, curr_state
, dfa_state_size
);
2306 if (early_queue_to_ready (temp_state
, &ready
))
2307 ready_sort (&ready
);
2310 if (ready
.n_ready
== 0 || !can_issue_more
2311 || state_dead_lock_p (curr_state
)
2312 || !(*current_sched_info
->schedule_more_p
) ())
2315 /* Select and remove the insn from the ready list. */
2318 insn
= choose_ready (&ready
);
2323 insn
= ready_remove_first (&ready
);
2325 if (targetm
.sched
.dfa_new_cycle
2326 && targetm
.sched
.dfa_new_cycle (sched_dump
, sched_verbose
,
2327 insn
, last_clock_var
,
2328 clock_var
, &sort_p
))
2329 /* SORT_P is used by the target to override sorting
2330 of the ready list. This is needed when the target
2331 has modified its internal structures expecting that
2332 the insn will be issued next. As we need the insn
2333 to have the highest priority (so it will be returned by
2334 the ready_remove_first call above), we invoke
2335 ready_add (&ready, insn, true).
2336 But, still, there is one issue: INSN can be later
2337 discarded by scheduler's front end through
2338 current_sched_info->can_schedule_ready_p, hence, won't
2341 ready_add (&ready
, insn
, true);
2346 memcpy (temp_state
, curr_state
, dfa_state_size
);
2347 if (recog_memoized (insn
) < 0)
2349 asm_p
= (GET_CODE (PATTERN (insn
)) == ASM_INPUT
2350 || asm_noperands (PATTERN (insn
)) >= 0);
2351 if (!first_cycle_insn_p
&& asm_p
)
2352 /* This is asm insn which is tryed to be issued on the
2353 cycle not first. Issue it on the next cycle. */
2356 /* A USE insn, or something else we don't need to
2357 understand. We can't pass these directly to
2358 state_transition because it will trigger a
2359 fatal error for unrecognizable insns. */
2364 cost
= state_transition (temp_state
, insn
);
2373 queue_insn (insn
, cost
);
2374 if (SCHED_GROUP_P (insn
))
2383 if (current_sched_info
->can_schedule_ready_p
2384 && ! (*current_sched_info
->can_schedule_ready_p
) (insn
))
2385 /* We normally get here only if we don't want to move
2386 insn from the split block. */
2388 TODO_SPEC (insn
) = (TODO_SPEC (insn
) & ~SPECULATIVE
) | HARD_DEP
;
2392 /* DECISION is made. */
2394 if (TODO_SPEC (insn
) & SPECULATIVE
)
2395 generate_recovery_code (insn
);
2397 if (control_flow_insn_p (last_scheduled_insn
)
2398 /* This is used to to switch basic blocks by request
2399 from scheduler front-end (actually, sched-ebb.c only).
2400 This is used to process blocks with single fallthru
2401 edge. If succeeding block has jump, it [jump] will try
2402 move at the end of current bb, thus corrupting CFG. */
2403 || current_sched_info
->advance_target_bb (*target_bb
, insn
))
2405 *target_bb
= current_sched_info
->advance_target_bb
2412 x
= next_real_insn (last_scheduled_insn
);
2414 dump_new_block_header (1, *target_bb
, x
, tail
);
2417 last_scheduled_insn
= bb_note (*target_bb
);
2420 /* Update counters, etc in the scheduler's front end. */
2421 (*current_sched_info
->begin_schedule_ready
) (insn
,
2422 last_scheduled_insn
);
2425 last_scheduled_insn
= insn
;
2427 if (memcmp (curr_state
, temp_state
, dfa_state_size
) != 0)
2429 cycle_issued_insns
++;
2430 memcpy (curr_state
, temp_state
, dfa_state_size
);
2433 if (targetm
.sched
.variable_issue
)
2435 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
2436 insn
, can_issue_more
);
2437 /* A naked CLOBBER or USE generates no instruction, so do
2438 not count them against the issue rate. */
2439 else if (GET_CODE (PATTERN (insn
)) != USE
2440 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
2443 advance
= schedule_insn (insn
);
2445 /* After issuing an asm insn we should start a new cycle. */
2446 if (advance
== 0 && asm_p
)
2451 first_cycle_insn_p
= 0;
2453 /* Sort the ready list based on priority. This must be
2454 redone here, as schedule_insn may have readied additional
2455 insns that will not be sorted correctly. */
2456 if (ready
.n_ready
> 0)
2457 ready_sort (&ready
);
2459 if (targetm
.sched
.reorder2
2460 && (ready
.n_ready
== 0
2461 || !SCHED_GROUP_P (ready_element (&ready
, 0))))
2464 targetm
.sched
.reorder2 (sched_dump
, sched_verbose
,
2466 ? ready_lastpos (&ready
) : NULL
,
2467 &ready
.n_ready
, clock_var
);
2475 fprintf (sched_dump
, ";;\tReady list (final): ");
2476 debug_ready_list (&ready
);
2479 if (current_sched_info
->queue_must_finish_empty
)
2480 /* Sanity check -- queue must be empty now. Meaningless if region has
2482 gcc_assert (!q_size
&& !ready
.n_ready
);
2485 /* We must maintain QUEUE_INDEX between blocks in region. */
2486 for (i
= ready
.n_ready
- 1; i
>= 0; i
--)
2490 x
= ready_element (&ready
, i
);
2491 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2492 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2496 for (i
= 0; i
<= max_insn_queue_index
; i
++)
2499 for (link
= insn_queue
[i
]; link
; link
= XEXP (link
, 1))
2504 QUEUE_INDEX (x
) = QUEUE_NOWHERE
;
2505 TODO_SPEC (x
) = (TODO_SPEC (x
) & ~SPECULATIVE
) | HARD_DEP
;
2507 free_INSN_LIST_list (&insn_queue
[i
]);
2511 if (!current_sched_info
->queue_must_finish_empty
2512 || added_recovery_block_p
)
2514 /* INSN_TICK (minimum clock tick at which the insn becomes
2515 ready) may be not correct for the insn in the subsequent
2516 blocks of the region. We should use a correct value of
2517 `clock_var' or modify INSN_TICK. It is better to keep
2518 clock_var value equal to 0 at the start of a basic block.
2519 Therefore we modify INSN_TICK here. */
2520 fix_inter_tick (NEXT_INSN (prev_head
), last_scheduled_insn
);
2523 #ifdef ENABLE_CHECKING
2524 /* After the reload the ia64 backend doesn't maintain BB_END, so
2525 if we want to check anything, better do it now.
2526 And it already clobbered previously scheduled code. */
2527 if (reload_completed
)
2528 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head
)), 0);
2531 if (targetm
.sched
.md_finish
)
2532 targetm
.sched
.md_finish (sched_dump
, sched_verbose
);
2534 /* Update head/tail boundaries. */
2535 head
= NEXT_INSN (prev_head
);
2536 tail
= last_scheduled_insn
;
2538 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2539 previously found among the insns. Insert them at the beginning
2543 basic_block head_bb
= BLOCK_FOR_INSN (head
);
2544 rtx note_head
= note_list
;
2546 while (PREV_INSN (note_head
))
2548 set_block_for_insn (note_head
, head_bb
);
2549 note_head
= PREV_INSN (note_head
);
2551 /* In the above cycle we've missed this note: */
2552 set_block_for_insn (note_head
, head_bb
);
2554 PREV_INSN (note_head
) = PREV_INSN (head
);
2555 NEXT_INSN (PREV_INSN (head
)) = note_head
;
2556 PREV_INSN (head
) = note_list
;
2557 NEXT_INSN (note_list
) = head
;
2564 fprintf (sched_dump
, ";; total time = %d\n;; new head = %d\n",
2565 clock_var
, INSN_UID (head
));
2566 fprintf (sched_dump
, ";; new tail = %d\n\n",
2570 current_sched_info
->head
= head
;
2571 current_sched_info
->tail
= tail
;
2576 for (i
= 0; i
<= rgn_n_insns
; i
++)
2577 free (choice_stack
[i
].state
);
2578 free (choice_stack
);
2581 /* Set_priorities: compute priority of each insn in the block. */
2584 set_priorities (rtx head
, rtx tail
)
2588 int sched_max_insns_priority
=
2589 current_sched_info
->sched_max_insns_priority
;
2592 if (head
== tail
&& (! INSN_P (head
)))
2597 prev_head
= PREV_INSN (head
);
2598 for (insn
= tail
; insn
!= prev_head
; insn
= PREV_INSN (insn
))
2604 (void) priority (insn
);
2606 if (INSN_PRIORITY_KNOWN (insn
))
2607 sched_max_insns_priority
=
2608 MAX (sched_max_insns_priority
, INSN_PRIORITY (insn
));
2611 current_sched_info
->sched_max_insns_priority
= sched_max_insns_priority
;
2616 /* Next LUID to assign to an instruction. */
2619 /* Initialize some global state for the scheduler. */
2628 /* Switch to working copy of sched_info. */
2629 memcpy (¤t_sched_info_var
, current_sched_info
,
2630 sizeof (current_sched_info_var
));
2631 current_sched_info
= ¤t_sched_info_var
;
2633 /* Disable speculative loads in their presence if cc0 defined. */
2635 flag_schedule_speculative_load
= 0;
2638 /* Set dump and sched_verbose for the desired debugging output. If no
2639 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2640 For -fsched-verbose=N, N>=10, print everything to stderr. */
2641 sched_verbose
= sched_verbose_param
;
2642 if (sched_verbose_param
== 0 && dump_file
)
2644 sched_dump
= ((sched_verbose_param
>= 10 || !dump_file
)
2645 ? stderr
: dump_file
);
2647 /* Initialize SPEC_INFO. */
2648 if (targetm
.sched
.set_sched_flags
)
2650 spec_info
= &spec_info_var
;
2651 targetm
.sched
.set_sched_flags (spec_info
);
2652 if (current_sched_info
->flags
& DO_SPECULATION
)
2653 spec_info
->weakness_cutoff
=
2654 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF
) * MAX_DEP_WEAK
) / 100;
2656 /* So we won't read anything accidentally. */
2658 #ifdef ENABLE_CHECKING
2659 check_sched_flags ();
2663 /* So we won't read anything accidentally. */
2666 /* Initialize issue_rate. */
2667 if (targetm
.sched
.issue_rate
)
2668 issue_rate
= targetm
.sched
.issue_rate ();
2672 if (cached_issue_rate
!= issue_rate
)
2674 cached_issue_rate
= issue_rate
;
2675 /* To invalidate max_lookahead_tries: */
2676 cached_first_cycle_multipass_dfa_lookahead
= 0;
2683 for (i
= 0; i
< old_max_uid
; i
++)
2686 h_i_d
[i
].todo_spec
= HARD_DEP
;
2687 h_i_d
[i
].queue_index
= QUEUE_NOWHERE
;
2688 h_i_d
[i
].tick
= INVALID_TICK
;
2689 h_i_d
[i
].inter_tick
= INVALID_TICK
;
2692 if (targetm
.sched
.init_dfa_pre_cycle_insn
)
2693 targetm
.sched
.init_dfa_pre_cycle_insn ();
2695 if (targetm
.sched
.init_dfa_post_cycle_insn
)
2696 targetm
.sched
.init_dfa_post_cycle_insn ();
2699 dfa_state_size
= state_size ();
2700 curr_state
= xmalloc (dfa_state_size
);
2705 for (insn
= BB_HEAD (b
); ; insn
= NEXT_INSN (insn
))
2707 INSN_LUID (insn
) = luid
;
2709 /* Increment the next luid, unless this is a note. We don't
2710 really need separate IDs for notes and we don't want to
2711 schedule differently depending on whether or not there are
2712 line-number notes, i.e., depending on whether or not we're
2713 generating debugging information. */
2717 if (insn
== BB_END (b
))
2721 init_dependency_caches (luid
);
2723 init_alias_analysis ();
2726 old_last_basic_block
= 0;
2731 if (current_sched_info
->flags
& USE_GLAT
)
2734 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2735 removing death notes. */
2736 FOR_EACH_BB_REVERSE (b
)
2737 find_insn_reg_weight (b
);
2739 if (targetm
.sched
.md_init_global
)
2740 targetm
.sched
.md_init_global (sched_dump
, sched_verbose
, old_max_uid
);
2742 nr_begin_data
= nr_begin_control
= nr_be_in_data
= nr_be_in_control
= 0;
2743 before_recovery
= 0;
2745 #ifdef ENABLE_CHECKING
2746 /* This is used preferably for finding bugs in check_cfg () itself. */
2751 /* Free global data used during insn scheduling. */
2759 free_dependency_caches ();
2760 end_alias_analysis ();
2761 free (line_note_head
);
2764 if (targetm
.sched
.md_finish_global
)
2765 targetm
.sched
.md_finish_global (sched_dump
, sched_verbose
);
2767 if (spec_info
&& spec_info
->dump
)
2769 char c
= reload_completed
? 'a' : 'b';
2771 fprintf (spec_info
->dump
,
2772 ";; %s:\n", current_function_name ());
2774 fprintf (spec_info
->dump
,
2775 ";; Procedure %cr-begin-data-spec motions == %d\n",
2777 fprintf (spec_info
->dump
,
2778 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2780 fprintf (spec_info
->dump
,
2781 ";; Procedure %cr-begin-control-spec motions == %d\n",
2782 c
, nr_begin_control
);
2783 fprintf (spec_info
->dump
,
2784 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2785 c
, nr_be_in_control
);
2788 #ifdef ENABLE_CHECKING
2789 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2790 if (!reload_completed
)
2794 current_sched_info
= NULL
;
2797 /* Fix INSN_TICKs of the instructions in the current block as well as
2798 INSN_TICKs of their dependents.
2799 HEAD and TAIL are the begin and the end of the current scheduled block. */
2801 fix_inter_tick (rtx head
, rtx tail
)
2803 /* Set of instructions with corrected INSN_TICK. */
2804 bitmap_head processed
;
2805 int next_clock
= clock_var
+ 1;
2807 bitmap_initialize (&processed
, 0);
2809 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2810 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2811 across different blocks. */
2812 for (tail
= NEXT_INSN (tail
); head
!= tail
; head
= NEXT_INSN (head
))
2819 tick
= INSN_TICK (head
);
2820 gcc_assert (tick
>= MIN_TICK
);
2822 /* Fix INSN_TICK of instruction from just scheduled block. */
2823 if (!bitmap_bit_p (&processed
, INSN_LUID (head
)))
2825 bitmap_set_bit (&processed
, INSN_LUID (head
));
2828 if (tick
< MIN_TICK
)
2831 INSN_TICK (head
) = tick
;
2834 for (link
= INSN_DEPEND (head
); link
; link
= XEXP (link
, 1))
2838 next
= XEXP (link
, 0);
2839 tick
= INSN_TICK (next
);
2841 if (tick
!= INVALID_TICK
2842 /* If NEXT has its INSN_TICK calculated, fix it.
2843 If not - it will be properly calculated from
2844 scratch later in fix_tick_ready. */
2845 && !bitmap_bit_p (&processed
, INSN_LUID (next
)))
2847 bitmap_set_bit (&processed
, INSN_LUID (next
));
2850 if (tick
< MIN_TICK
)
2853 if (tick
> INTER_TICK (next
))
2854 INTER_TICK (next
) = tick
;
2856 tick
= INTER_TICK (next
);
2858 INSN_TICK (next
) = tick
;
2863 bitmap_clear (&processed
);
2866 /* Check if NEXT is ready to be added to the ready or queue list.
2867 If "yes", add it to the proper list.
2869 -1 - is not ready yet,
2870 0 - added to the ready list,
2871 0 < N - queued for N cycles. */
2873 try_ready (rtx next
)
2878 ts
= &TODO_SPEC (next
);
2881 gcc_assert (!(old_ts
& ~(SPECULATIVE
| HARD_DEP
))
2882 && ((old_ts
& HARD_DEP
)
2883 || (old_ts
& SPECULATIVE
)));
2885 if (!(current_sched_info
->flags
& DO_SPECULATION
))
2887 if (!LOG_LINKS (next
))
2892 *ts
&= ~SPECULATIVE
& ~HARD_DEP
;
2894 link
= LOG_LINKS (next
);
2897 /* LOG_LINKS are maintained sorted.
2898 So if DEP_STATUS of the first dep is SPECULATIVE,
2899 than all other deps are speculative too. */
2900 if (DEP_STATUS (link
) & SPECULATIVE
)
2902 /* Now we've got NEXT with speculative deps only.
2903 1. Look at the deps to see what we have to do.
2904 2. Check if we can do 'todo'. */
2905 *ts
= DEP_STATUS (link
) & SPECULATIVE
;
2906 while ((link
= XEXP (link
, 1)))
2907 *ts
= ds_merge (*ts
, DEP_STATUS (link
) & SPECULATIVE
);
2909 if (dep_weak (*ts
) < spec_info
->weakness_cutoff
)
2910 /* Too few points. */
2911 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
2919 gcc_assert (*ts
== old_ts
2920 && QUEUE_INDEX (next
) == QUEUE_NOWHERE
);
2921 else if (current_sched_info
->new_ready
)
2922 *ts
= current_sched_info
->new_ready (next
, *ts
);
2924 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
2925 have its original pattern or changed (speculative) one. This is due
2926 to changing ebb in region scheduling.
2927 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
2928 has speculative pattern.
2930 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
2931 control-speculative NEXT could have been discarded by sched-rgn.c
2932 (the same case as when discarded by can_schedule_ready_p ()). */
2934 if ((*ts
& SPECULATIVE
)
2935 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
2936 need to change anything. */
2942 gcc_assert ((*ts
& SPECULATIVE
) && !(*ts
& ~SPECULATIVE
));
2944 res
= speculate_insn (next
, *ts
, &new_pat
);
2949 /* It would be nice to change DEP_STATUS of all dependences,
2950 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
2951 so we won't reanalyze anything. */
2952 *ts
= (*ts
& ~SPECULATIVE
) | HARD_DEP
;
2956 /* We follow the rule, that every speculative insn
2957 has non-null ORIG_PAT. */
2958 if (!ORIG_PAT (next
))
2959 ORIG_PAT (next
) = PATTERN (next
);
2963 if (!ORIG_PAT (next
))
2964 /* If we gonna to overwrite the original pattern of insn,
2966 ORIG_PAT (next
) = PATTERN (next
);
2968 change_pattern (next
, new_pat
);
2976 /* We need to restore pattern only if (*ts == 0), because otherwise it is
2977 either correct (*ts & SPECULATIVE),
2978 or we simply don't care (*ts & HARD_DEP). */
2980 gcc_assert (!ORIG_PAT (next
)
2981 || !IS_SPECULATION_BRANCHY_CHECK_P (next
));
2985 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
2986 control-speculative NEXT could have been discarded by sched-rgn.c
2987 (the same case as when discarded by can_schedule_ready_p ()). */
2988 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
2990 change_queue_index (next
, QUEUE_NOWHERE
);
2993 else if (!(*ts
& BEGIN_SPEC
) && ORIG_PAT (next
) && !IS_SPECULATION_CHECK_P (next
))
2994 /* We should change pattern of every previously speculative
2995 instruction - and we determine if NEXT was speculative by using
2996 ORIG_PAT field. Except one case - speculation checks have ORIG_PAT
2997 pat too, so skip them. */
2999 change_pattern (next
, ORIG_PAT (next
));
3000 ORIG_PAT (next
) = 0;
3003 if (sched_verbose
>= 2)
3005 int s
= TODO_SPEC (next
);
3007 fprintf (sched_dump
, ";;\t\tdependencies resolved: insn %s",
3008 (*current_sched_info
->print_insn
) (next
, 0));
3010 if (spec_info
&& spec_info
->dump
)
3013 fprintf (spec_info
->dump
, "; data-spec;");
3014 if (s
& BEGIN_CONTROL
)
3015 fprintf (spec_info
->dump
, "; control-spec;");
3016 if (s
& BE_IN_CONTROL
)
3017 fprintf (spec_info
->dump
, "; in-control-spec;");
3020 fprintf (sched_dump
, "\n");
3023 adjust_priority (next
);
3025 return fix_tick_ready (next
);
3028 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3030 fix_tick_ready (rtx next
)
3035 link
= RESOLVED_DEPS (next
);
3041 tick
= INSN_TICK (next
);
3042 /* if tick is not equal to INVALID_TICK, then update
3043 INSN_TICK of NEXT with the most recent resolved dependence
3044 cost. Otherwise, recalculate from scratch. */
3045 full_p
= tick
== INVALID_TICK
;
3051 pro
= XEXP (link
, 0);
3052 gcc_assert (INSN_TICK (pro
) >= MIN_TICK
);
3054 tick1
= INSN_TICK (pro
) + insn_cost (pro
, link
, next
);
3058 while ((link
= XEXP (link
, 1)) && full_p
);
3063 INSN_TICK (next
) = tick
;
3065 delay
= tick
- clock_var
;
3067 delay
= QUEUE_READY
;
3069 change_queue_index (next
, delay
);
3074 /* Move NEXT to the proper queue list with (DELAY >= 1),
3075 or add it to the ready list (DELAY == QUEUE_READY),
3076 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3078 change_queue_index (rtx next
, int delay
)
3080 int i
= QUEUE_INDEX (next
);
3082 gcc_assert (QUEUE_NOWHERE
<= delay
&& delay
<= max_insn_queue_index
3084 gcc_assert (i
!= QUEUE_SCHEDULED
);
3086 if ((delay
> 0 && NEXT_Q_AFTER (q_ptr
, delay
) == i
)
3087 || (delay
< 0 && delay
== i
))
3088 /* We have nothing to do. */
3091 /* Remove NEXT from wherever it is now. */
3092 if (i
== QUEUE_READY
)
3093 ready_remove_insn (next
);
3095 queue_remove (next
);
3097 /* Add it to the proper place. */
3098 if (delay
== QUEUE_READY
)
3099 ready_add (readyp
, next
, false);
3100 else if (delay
>= 1)
3101 queue_insn (next
, delay
);
3103 if (sched_verbose
>= 2)
3105 fprintf (sched_dump
, ";;\t\ttick updated: insn %s",
3106 (*current_sched_info
->print_insn
) (next
, 0));
3108 if (delay
== QUEUE_READY
)
3109 fprintf (sched_dump
, " into ready\n");
3110 else if (delay
>= 1)
3111 fprintf (sched_dump
, " into queue with cost=%d\n", delay
);
3113 fprintf (sched_dump
, " removed from ready or queue lists\n");
3117 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3119 resolve_dep (rtx next
, rtx insn
)
3123 INSN_DEP_COUNT (next
)--;
3125 dep
= remove_list_elem (insn
, &LOG_LINKS (next
));
3126 XEXP (dep
, 1) = RESOLVED_DEPS (next
);
3127 RESOLVED_DEPS (next
) = dep
;
3129 gcc_assert ((INSN_DEP_COUNT (next
) != 0 || !LOG_LINKS (next
))
3130 && (LOG_LINKS (next
) || INSN_DEP_COUNT (next
) == 0));
3133 /* Extend H_I_D data. */
3137 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3138 pseudos which do not cross calls. */
3139 int new_max_uid
= get_max_uid() + 1;
3141 h_i_d
= xrecalloc (h_i_d
, new_max_uid
, old_max_uid
, sizeof (*h_i_d
));
3142 old_max_uid
= new_max_uid
;
3144 if (targetm
.sched
.h_i_d_extended
)
3145 targetm
.sched
.h_i_d_extended ();
3148 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3149 N_NEW_INSNS is the number of additional elements to allocate. */
3151 extend_ready (int n_new_insns
)
3155 readyp
->veclen
= rgn_n_insns
+ n_new_insns
+ 1 + issue_rate
;
3156 readyp
->vec
= XRESIZEVEC (rtx
, readyp
->vec
, readyp
->veclen
);
3158 ready_try
= xrecalloc (ready_try
, rgn_n_insns
+ n_new_insns
+ 1,
3159 rgn_n_insns
+ 1, sizeof (char));
3161 rgn_n_insns
+= n_new_insns
;
3163 choice_stack
= XRESIZEVEC (struct choice_entry
, choice_stack
,
3166 for (i
= rgn_n_insns
; n_new_insns
--; i
--)
3167 choice_stack
[i
].state
= xmalloc (dfa_state_size
);
3170 /* Extend global scheduler structures (those, that live across calls to
3171 schedule_block) to include information about just emitted INSN. */
3173 extend_global (rtx insn
)
3175 gcc_assert (INSN_P (insn
));
3176 /* These structures have scheduler scope. */
3180 extend_dependency_caches (1, 0);
3183 /* Extends global and local scheduler structures to include information
3184 about just emitted INSN. */
3186 extend_all (rtx insn
)
3188 extend_global (insn
);
3190 /* These structures have block scope. */
3193 (*current_sched_info
->add_remove_insn
) (insn
, 0);
3196 /* Initialize h_i_d entry of the new INSN with default values.
3197 Values, that are not explicitly initialized here, hold zero. */
3199 init_h_i_d (rtx insn
)
3201 INSN_LUID (insn
) = luid
++;
3202 INSN_COST (insn
) = -1;
3203 TODO_SPEC (insn
) = HARD_DEP
;
3204 QUEUE_INDEX (insn
) = QUEUE_NOWHERE
;
3205 INSN_TICK (insn
) = INVALID_TICK
;
3206 INTER_TICK (insn
) = INVALID_TICK
;
3207 find_insn_reg_weight1 (insn
);
3210 /* Generates recovery code for INSN. */
3212 generate_recovery_code (rtx insn
)
3214 if (TODO_SPEC (insn
) & BEGIN_SPEC
)
3215 begin_speculative_block (insn
);
3217 /* Here we have insn with no dependencies to
3218 instructions other then CHECK_SPEC ones. */
3220 if (TODO_SPEC (insn
) & BE_IN_SPEC
)
3221 add_to_speculative_block (insn
);
3225 Tries to add speculative dependencies of type FS between instructions
3226 in LINK list and TWIN. */
3228 process_insn_depend_be_in_spec (rtx link
, rtx twin
, ds_t fs
)
3230 for (; link
; link
= XEXP (link
, 1))
3235 consumer
= XEXP (link
, 0);
3237 ds
= DEP_STATUS (link
);
3239 if (/* If we want to create speculative dep. */
3241 /* And we can do that because this is a true dep. */
3242 && (ds
& DEP_TYPES
) == DEP_TRUE
)
3244 gcc_assert (!(ds
& BE_IN_SPEC
));
3246 if (/* If this dep can be overcome with 'begin speculation'. */
3248 /* Then we have a choice: keep the dep 'begin speculative'
3249 or transform it into 'be in speculative'. */
3251 if (/* In try_ready we assert that if insn once became ready
3252 it can be removed from the ready (or queue) list only
3253 due to backend decision. Hence we can't let the
3254 probability of the speculative dep to decrease. */
3255 dep_weak (ds
) <= dep_weak (fs
))
3256 /* Transform it to be in speculative. */
3257 ds
= (ds
& ~BEGIN_SPEC
) | fs
;
3260 /* Mark the dep as 'be in speculative'. */
3264 add_back_forw_dep (consumer
, twin
, REG_NOTE_KIND (link
), ds
);
3268 /* Generates recovery code for BEGIN speculative INSN. */
3270 begin_speculative_block (rtx insn
)
3272 if (TODO_SPEC (insn
) & BEGIN_DATA
)
3274 if (TODO_SPEC (insn
) & BEGIN_CONTROL
)
3277 create_check_block_twin (insn
, false);
3279 TODO_SPEC (insn
) &= ~BEGIN_SPEC
;
3282 /* Generates recovery code for BE_IN speculative INSN. */
3284 add_to_speculative_block (rtx insn
)
3287 rtx link
, twins
= NULL
;
3289 ts
= TODO_SPEC (insn
);
3290 gcc_assert (!(ts
& ~BE_IN_SPEC
));
3292 if (ts
& BE_IN_DATA
)
3294 if (ts
& BE_IN_CONTROL
)
3297 TODO_SPEC (insn
) &= ~BE_IN_SPEC
;
3298 gcc_assert (!TODO_SPEC (insn
));
3300 DONE_SPEC (insn
) |= ts
;
3302 /* First we convert all simple checks to branchy. */
3303 for (link
= LOG_LINKS (insn
); link
;)
3307 check
= XEXP (link
, 0);
3309 if (IS_SPECULATION_SIMPLE_CHECK_P (check
))
3311 create_check_block_twin (check
, true);
3312 link
= LOG_LINKS (insn
);
3315 link
= XEXP (link
, 1);
3318 clear_priorities (insn
);
3322 rtx link
, check
, twin
;
3325 link
= LOG_LINKS (insn
);
3326 gcc_assert (!(DEP_STATUS (link
) & BEGIN_SPEC
)
3327 && (DEP_STATUS (link
) & BE_IN_SPEC
)
3328 && (DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3330 check
= XEXP (link
, 0);
3332 gcc_assert (!IS_SPECULATION_CHECK_P (check
) && !ORIG_PAT (check
)
3333 && QUEUE_INDEX (check
) == QUEUE_NOWHERE
);
3335 rec
= BLOCK_FOR_INSN (check
);
3337 twin
= emit_insn_before (copy_rtx (PATTERN (insn
)), BB_END (rec
));
3338 extend_global (twin
);
3340 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3342 if (sched_verbose
&& spec_info
->dump
)
3343 /* INSN_BB (insn) isn't determined for twin insns yet.
3344 So we can't use current_sched_info->print_insn. */
3345 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3346 INSN_UID (twin
), rec
->index
);
3348 twins
= alloc_INSN_LIST (twin
, twins
);
3350 /* Add dependences between TWIN and all appropriate
3351 instructions from REC. */
3354 add_back_forw_dep (twin
, check
, REG_DEP_TRUE
, DEP_TRUE
);
3358 link
= XEXP (link
, 1);
3361 check
= XEXP (link
, 0);
3362 if (BLOCK_FOR_INSN (check
) == rec
)
3372 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, ts
);
3374 for (link
= LOG_LINKS (insn
); link
;)
3376 check
= XEXP (link
, 0);
3378 if (BLOCK_FOR_INSN (check
) == rec
)
3380 delete_back_forw_dep (insn
, check
);
3381 link
= LOG_LINKS (insn
);
3384 link
= XEXP (link
, 1);
3387 while (LOG_LINKS (insn
));
3389 /* We can't add the dependence between insn and twin earlier because
3390 that would make twin appear in the INSN_DEPEND (insn). */
3395 twin
= XEXP (twins
, 0);
3396 calc_priorities (twin
);
3397 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3399 twin
= XEXP (twins
, 1);
3400 free_INSN_LIST_node (twins
);
3405 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3407 xrecalloc (void *p
, size_t new_nmemb
, size_t old_nmemb
, size_t size
)
3409 gcc_assert (new_nmemb
>= old_nmemb
);
3410 p
= XRESIZEVAR (void, p
, new_nmemb
* size
);
3411 memset (((char *) p
) + old_nmemb
* size
, 0, (new_nmemb
- old_nmemb
) * size
);
3415 /* Return the probability of speculation success for the speculation
3423 dt
= FIRST_SPEC_TYPE
;
3428 res
*= (ds_t
) get_dep_weak (ds
, dt
);
3432 if (dt
== LAST_SPEC_TYPE
)
3434 dt
<<= SPEC_TYPE_SHIFT
;
3440 res
/= MAX_DEP_WEAK
;
3442 if (res
< MIN_DEP_WEAK
)
3445 gcc_assert (res
<= MAX_DEP_WEAK
);
3451 Find fallthru edge from PRED. */
3453 find_fallthru_edge (basic_block pred
)
3459 succ
= pred
->next_bb
;
3460 gcc_assert (succ
->prev_bb
== pred
);
3462 if (EDGE_COUNT (pred
->succs
) <= EDGE_COUNT (succ
->preds
))
3464 FOR_EACH_EDGE (e
, ei
, pred
->succs
)
3465 if (e
->flags
& EDGE_FALLTHRU
)
3467 gcc_assert (e
->dest
== succ
);
3473 FOR_EACH_EDGE (e
, ei
, succ
->preds
)
3474 if (e
->flags
& EDGE_FALLTHRU
)
3476 gcc_assert (e
->src
== pred
);
3484 /* Initialize BEFORE_RECOVERY variable. */
3486 init_before_recovery (void)
3491 last
= EXIT_BLOCK_PTR
->prev_bb
;
3492 e
= find_fallthru_edge (last
);
3496 /* We create two basic blocks:
3497 1. Single instruction block is inserted right after E->SRC
3499 2. Empty block right before EXIT_BLOCK.
3500 Between these two blocks recovery blocks will be emitted. */
3502 basic_block single
, empty
;
3505 single
= create_empty_bb (last
);
3506 empty
= create_empty_bb (single
);
3508 single
->count
= last
->count
;
3509 empty
->count
= last
->count
;
3510 single
->frequency
= last
->frequency
;
3511 empty
->frequency
= last
->frequency
;
3512 BB_COPY_PARTITION (single
, last
);
3513 BB_COPY_PARTITION (empty
, last
);
3515 redirect_edge_succ (e
, single
);
3516 make_single_succ_edge (single
, empty
, 0);
3517 make_single_succ_edge (empty
, EXIT_BLOCK_PTR
,
3518 EDGE_FALLTHRU
| EDGE_CAN_FALLTHRU
);
3520 label
= block_label (empty
);
3521 x
= emit_jump_insn_after (gen_jump (label
), BB_END (single
));
3522 JUMP_LABEL (x
) = label
;
3523 LABEL_NUSES (label
)++;
3526 emit_barrier_after (x
);
3528 add_block (empty
, 0);
3529 add_block (single
, 0);
3531 before_recovery
= single
;
3533 if (sched_verbose
>= 2 && spec_info
->dump
)
3534 fprintf (spec_info
->dump
,
3535 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3536 last
->index
, single
->index
, empty
->index
);
3539 before_recovery
= last
;
3542 /* Returns new recovery block. */
3544 create_recovery_block (void)
3549 added_recovery_block_p
= true;
3551 if (!before_recovery
)
3552 init_before_recovery ();
3554 label
= gen_label_rtx ();
3555 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery
))));
3556 label
= emit_label_after (label
, NEXT_INSN (BB_END (before_recovery
)));
3558 rec
= create_basic_block (label
, label
, before_recovery
);
3559 emit_barrier_after (BB_END (rec
));
3561 if (BB_PARTITION (before_recovery
) != BB_UNPARTITIONED
)
3562 BB_SET_PARTITION (rec
, BB_COLD_PARTITION
);
3564 if (sched_verbose
&& spec_info
->dump
)
3565 fprintf (spec_info
->dump
, ";;\t\tGenerated recovery block rec%d\n",
3568 before_recovery
= rec
;
3573 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3574 INSN is a simple check, that should be converted to branchy one. */
3576 create_check_block_twin (rtx insn
, bool mutate_p
)
3579 rtx label
, check
, twin
, link
;
3582 gcc_assert (ORIG_PAT (insn
)
3584 || (IS_SPECULATION_SIMPLE_CHECK_P (insn
)
3585 && !(TODO_SPEC (insn
) & SPECULATIVE
))));
3587 /* Create recovery block. */
3588 if (mutate_p
|| targetm
.sched
.needs_block_p (insn
))
3590 rec
= create_recovery_block ();
3591 label
= BB_HEAD (rec
);
3595 rec
= EXIT_BLOCK_PTR
;
3600 check
= targetm
.sched
.gen_check (insn
, label
, mutate_p
);
3602 if (rec
!= EXIT_BLOCK_PTR
)
3604 /* To have mem_reg alive at the beginning of second_bb,
3605 we emit check BEFORE insn, so insn after splitting
3606 insn will be at the beginning of second_bb, which will
3607 provide us with the correct life information. */
3608 check
= emit_jump_insn_before (check
, insn
);
3609 JUMP_LABEL (check
) = label
;
3610 LABEL_NUSES (label
)++;
3613 check
= emit_insn_before (check
, insn
);
3615 /* Extend data structures. */
3617 RECOVERY_BLOCK (check
) = rec
;
3619 if (sched_verbose
&& spec_info
->dump
)
3620 fprintf (spec_info
->dump
, ";;\t\tGenerated check insn : %s\n",
3621 (*current_sched_info
->print_insn
) (check
, 0));
3623 gcc_assert (ORIG_PAT (insn
));
3625 /* Initialize TWIN (twin is a duplicate of original instruction
3626 in the recovery block). */
3627 if (rec
!= EXIT_BLOCK_PTR
)
3631 for (link
= RESOLVED_DEPS (insn
); link
; link
= XEXP (link
, 1))
3632 if (DEP_STATUS (link
) & DEP_OUTPUT
)
3634 RESOLVED_DEPS (check
) =
3635 alloc_DEPS_LIST (XEXP (link
, 0), RESOLVED_DEPS (check
), DEP_TRUE
);
3636 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check
), REG_DEP_TRUE
);
3639 twin
= emit_insn_after (ORIG_PAT (insn
), BB_END (rec
));
3640 extend_global (twin
);
3642 if (sched_verbose
&& spec_info
->dump
)
3643 /* INSN_BB (insn) isn't determined for twin insns yet.
3644 So we can't use current_sched_info->print_insn. */
3645 fprintf (spec_info
->dump
, ";;\t\tGenerated twin insn : %d/rec%d\n",
3646 INSN_UID (twin
), rec
->index
);
3650 ORIG_PAT (check
) = ORIG_PAT (insn
);
3651 HAS_INTERNAL_DEP (check
) = 1;
3653 /* ??? We probably should change all OUTPUT dependencies to
3657 RESOLVED_DEPS (twin
) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn
));
3659 if (rec
!= EXIT_BLOCK_PTR
)
3660 /* In case of branchy check, fix CFG. */
3662 basic_block first_bb
, second_bb
;
3667 first_bb
= BLOCK_FOR_INSN (check
);
3668 e
= split_block (first_bb
, check
);
3669 /* split_block emits note if *check == BB_END. Probably it
3670 is better to rip that note off. */
3671 gcc_assert (e
->src
== first_bb
);
3672 second_bb
= e
->dest
;
3674 /* This is fixing of incoming edge. */
3675 /* ??? Which other flags should be specified? */
3676 if (BB_PARTITION (first_bb
) != BB_PARTITION (rec
))
3677 /* Partition type is the same, if it is "unpartitioned". */
3678 edge_flags
= EDGE_CROSSING
;
3682 e
= make_edge (first_bb
, rec
, edge_flags
);
3684 add_block (second_bb
, first_bb
);
3686 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb
)));
3687 label
= block_label (second_bb
);
3688 jump
= emit_jump_insn_after (gen_jump (label
), BB_END (rec
));
3689 JUMP_LABEL (jump
) = label
;
3690 LABEL_NUSES (label
)++;
3691 extend_global (jump
);
3693 if (BB_PARTITION (second_bb
) != BB_PARTITION (rec
))
3694 /* Partition type is the same, if it is "unpartitioned". */
3696 /* Rewritten from cfgrtl.c. */
3697 if (flag_reorder_blocks_and_partition
3698 && targetm
.have_named_sections
3699 /*&& !any_condjump_p (jump)*/)
3700 /* any_condjump_p (jump) == false.
3701 We don't need the same note for the check because
3702 any_condjump_p (check) == true. */
3704 REG_NOTES (jump
) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP
,
3708 edge_flags
= EDGE_CROSSING
;
3713 make_single_succ_edge (rec
, second_bb
, edge_flags
);
3715 add_block (rec
, EXIT_BLOCK_PTR
);
3718 /* Move backward dependences from INSN to CHECK and
3719 move forward dependences from INSN to TWIN. */
3720 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
3724 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3725 check --TRUE--> producer ??? or ANTI ???
3726 twin --TRUE--> producer
3727 twin --ANTI--> check
3729 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3730 check --ANTI--> producer
3731 twin --ANTI--> producer
3732 twin --ANTI--> check
3734 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3735 check ~~TRUE~~> producer
3736 twin ~~TRUE~~> producer
3737 twin --ANTI--> check */
3739 ds
= DEP_STATUS (link
);
3741 if (ds
& BEGIN_SPEC
)
3743 gcc_assert (!mutate_p
);
3747 if (rec
!= EXIT_BLOCK_PTR
)
3749 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3750 add_back_forw_dep (twin
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3753 add_back_forw_dep (check
, XEXP (link
, 0), REG_NOTE_KIND (link
), ds
);
3756 for (link
= LOG_LINKS (insn
); link
;)
3757 if ((DEP_STATUS (link
) & BEGIN_SPEC
)
3759 /* We can delete this dep only if we totally overcome it with
3760 BEGIN_SPECULATION. */
3762 delete_back_forw_dep (insn
, XEXP (link
, 0));
3763 link
= LOG_LINKS (insn
);
3766 link
= XEXP (link
, 1);
3770 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3773 gcc_assert (!DONE_SPEC (insn
));
3777 ds_t ts
= TODO_SPEC (insn
);
3779 DONE_SPEC (insn
) = ts
& BEGIN_SPEC
;
3780 CHECK_SPEC (check
) = ts
& BEGIN_SPEC
;
3782 if (ts
& BEGIN_DATA
)
3783 fs
= set_dep_weak (fs
, BE_IN_DATA
, get_dep_weak (ts
, BEGIN_DATA
));
3784 if (ts
& BEGIN_CONTROL
)
3785 fs
= set_dep_weak (fs
, BE_IN_CONTROL
, get_dep_weak (ts
, BEGIN_CONTROL
));
3788 CHECK_SPEC (check
) = CHECK_SPEC (insn
);
3790 /* Future speculations: call the helper. */
3791 process_insn_depend_be_in_spec (INSN_DEPEND (insn
), twin
, fs
);
3793 if (rec
!= EXIT_BLOCK_PTR
)
3795 /* Which types of dependencies should we use here is,
3796 generally, machine-dependent question... But, for now,
3801 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
);
3802 add_back_forw_dep (twin
, insn
, REG_DEP_OUTPUT
, DEP_OUTPUT
);
3806 if (spec_info
->dump
)
3807 fprintf (spec_info
->dump
, ";;\t\tRemoved simple check : %s\n",
3808 (*current_sched_info
->print_insn
) (insn
, 0));
3810 for (link
= INSN_DEPEND (insn
); link
; link
= INSN_DEPEND (insn
))
3811 delete_back_forw_dep (XEXP (link
, 0), insn
);
3813 if (QUEUE_INDEX (insn
) != QUEUE_NOWHERE
)
3816 sched_remove_insn (insn
);
3819 add_back_forw_dep (twin
, check
, REG_DEP_ANTI
, DEP_ANTI
);
3822 add_back_forw_dep (check
, insn
, REG_DEP_TRUE
, DEP_TRUE
| DEP_OUTPUT
);
3825 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3826 because it'll be done later in add_to_speculative_block. */
3828 clear_priorities (twin
);
3829 calc_priorities (twin
);
3833 /* Removes dependency between instructions in the recovery block REC
3834 and usual region instructions. It keeps inner dependences so it
3835 won't be necessary to recompute them. */
3837 fix_recovery_deps (basic_block rec
)
3839 rtx note
, insn
, link
, jump
, ready_list
= 0;
3840 bitmap_head in_ready
;
3842 bitmap_initialize (&in_ready
, 0);
3844 /* NOTE - a basic block note. */
3845 note
= NEXT_INSN (BB_HEAD (rec
));
3846 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
3847 insn
= BB_END (rec
);
3848 gcc_assert (JUMP_P (insn
));
3849 insn
= PREV_INSN (insn
);
3853 for (link
= INSN_DEPEND (insn
); link
;)
3857 consumer
= XEXP (link
, 0);
3859 if (BLOCK_FOR_INSN (consumer
) != rec
)
3861 delete_back_forw_dep (consumer
, insn
);
3863 if (!bitmap_bit_p (&in_ready
, INSN_LUID (consumer
)))
3865 ready_list
= alloc_INSN_LIST (consumer
, ready_list
);
3866 bitmap_set_bit (&in_ready
, INSN_LUID (consumer
));
3869 link
= INSN_DEPEND (insn
);
3873 gcc_assert ((DEP_STATUS (link
) & DEP_TYPES
) == DEP_TRUE
);
3875 link
= XEXP (link
, 1);
3879 insn
= PREV_INSN (insn
);
3881 while (insn
!= note
);
3883 bitmap_clear (&in_ready
);
3885 /* Try to add instructions to the ready or queue list. */
3886 for (link
= ready_list
; link
; link
= XEXP (link
, 1))
3887 try_ready (XEXP (link
, 0));
3888 free_INSN_LIST_list (&ready_list
);
3890 /* Fixing jump's dependences. */
3891 insn
= BB_HEAD (rec
);
3892 jump
= BB_END (rec
);
3894 gcc_assert (LABEL_P (insn
));
3895 insn
= NEXT_INSN (insn
);
3897 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn
));
3898 add_jump_dependencies (insn
, jump
);
3901 /* The function saves line notes at the beginning of block B. */
3903 associate_line_notes_with_blocks (basic_block b
)
3907 for (line
= BB_HEAD (b
); line
; line
= PREV_INSN (line
))
3908 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
3910 line_note_head
[b
->index
] = line
;
3913 /* Do a forward search as well, since we won't get to see the first
3914 notes in a basic block. */
3915 for (line
= BB_HEAD (b
); line
; line
= NEXT_INSN (line
))
3919 if (NOTE_P (line
) && NOTE_LINE_NUMBER (line
) > 0)
3920 line_note_head
[b
->index
] = line
;
3924 /* Changes pattern of the INSN to NEW_PAT. */
3926 change_pattern (rtx insn
, rtx new_pat
)
3930 t
= validate_change (insn
, &PATTERN (insn
), new_pat
, 0);
3932 /* Invalidate INSN_COST, so it'll be recalculated. */
3933 INSN_COST (insn
) = -1;
3934 /* Invalidate INSN_TICK, so it'll be recalculated. */
3935 INSN_TICK (insn
) = INVALID_TICK
;
3936 dfa_clear_single_insn_cache (insn
);
3940 /* -1 - can't speculate,
3941 0 - for speculation with REQUEST mode it is OK to use
3942 current instruction pattern,
3943 1 - need to change pattern for *NEW_PAT to be speculative. */
3945 speculate_insn (rtx insn
, ds_t request
, rtx
*new_pat
)
3947 gcc_assert (current_sched_info
->flags
& DO_SPECULATION
3948 && (request
& SPECULATIVE
));
3950 if (!NONJUMP_INSN_P (insn
)
3951 || HAS_INTERNAL_DEP (insn
)
3952 || SCHED_GROUP_P (insn
)
3953 || side_effects_p (PATTERN (insn
))
3954 || (request
& spec_info
->mask
) != request
)
3957 gcc_assert (!IS_SPECULATION_CHECK_P (insn
));
3959 if (request
& BE_IN_SPEC
)
3961 if (may_trap_p (PATTERN (insn
)))
3964 if (!(request
& BEGIN_SPEC
))
3968 return targetm
.sched
.speculate_insn (insn
, request
& BEGIN_SPEC
, new_pat
);
3971 /* Print some information about block BB, which starts with HEAD and
3972 ends with TAIL, before scheduling it.
3973 I is zero, if scheduler is about to start with the fresh ebb. */
3975 dump_new_block_header (int i
, basic_block bb
, rtx head
, rtx tail
)
3978 fprintf (sched_dump
,
3979 ";; ======================================================\n");
3981 fprintf (sched_dump
,
3982 ";; =====================ADVANCING TO=====================\n");
3983 fprintf (sched_dump
,
3984 ";; -- basic block %d from %d to %d -- %s reload\n",
3985 bb
->index
, INSN_UID (head
), INSN_UID (tail
),
3986 (reload_completed
? "after" : "before"));
3987 fprintf (sched_dump
,
3988 ";; ======================================================\n");
3989 fprintf (sched_dump
, "\n");
3992 /* Unlink basic block notes and labels and saves them, so they
3993 can be easily restored. We unlink basic block notes in EBB to
3994 provide back-compatibility with the previous code, as target backends
3995 assume, that there'll be only instructions between
3996 current_sched_info->{head and tail}. We restore these notes as soon
3998 FIRST (LAST) is the first (last) basic block in the ebb.
3999 NB: In usual case (FIRST == LAST) nothing is really done. */
4001 unlink_bb_notes (basic_block first
, basic_block last
)
4003 /* We DON'T unlink basic block notes of the first block in the ebb. */
4007 bb_header
= xmalloc (last_basic_block
* sizeof (*bb_header
));
4009 /* Make a sentinel. */
4010 if (last
->next_bb
!= EXIT_BLOCK_PTR
)
4011 bb_header
[last
->next_bb
->index
] = 0;
4013 first
= first
->next_bb
;
4016 rtx prev
, label
, note
, next
;
4018 label
= BB_HEAD (last
);
4019 if (LABEL_P (label
))
4020 note
= NEXT_INSN (label
);
4023 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4025 prev
= PREV_INSN (label
);
4026 next
= NEXT_INSN (note
);
4027 gcc_assert (prev
&& next
);
4029 NEXT_INSN (prev
) = next
;
4030 PREV_INSN (next
) = prev
;
4032 bb_header
[last
->index
] = label
;
4037 last
= last
->prev_bb
;
4042 /* Restore basic block notes.
4043 FIRST is the first basic block in the ebb. */
4045 restore_bb_notes (basic_block first
)
4050 /* We DON'T unlink basic block notes of the first block in the ebb. */
4051 first
= first
->next_bb
;
4052 /* Remember: FIRST is actually a second basic block in the ebb. */
4054 while (first
!= EXIT_BLOCK_PTR
4055 && bb_header
[first
->index
])
4057 rtx prev
, label
, note
, next
;
4059 label
= bb_header
[first
->index
];
4060 prev
= PREV_INSN (label
);
4061 next
= NEXT_INSN (prev
);
4063 if (LABEL_P (label
))
4064 note
= NEXT_INSN (label
);
4067 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4069 bb_header
[first
->index
] = 0;
4071 NEXT_INSN (prev
) = label
;
4072 NEXT_INSN (note
) = next
;
4073 PREV_INSN (next
) = note
;
4075 first
= first
->next_bb
;
4082 /* Extend per basic block data structures of the scheduler.
4083 If BB is NULL, initialize structures for the whole CFG.
4084 Otherwise, initialize them for the just created BB. */
4086 extend_bb (basic_block bb
)
4090 if (write_symbols
!= NO_DEBUG
)
4092 /* Save-line-note-head:
4093 Determine the line-number at the start of each basic block.
4094 This must be computed and saved now, because after a basic block's
4095 predecessor has been scheduled, it is impossible to accurately
4096 determine the correct line number for the first insn of the block. */
4097 line_note_head
= xrecalloc (line_note_head
, last_basic_block
,
4098 old_last_basic_block
,
4099 sizeof (*line_note_head
));
4102 associate_line_notes_with_blocks (bb
);
4105 associate_line_notes_with_blocks (bb
);
4108 old_last_basic_block
= last_basic_block
;
4110 if (current_sched_info
->flags
& USE_GLAT
)
4112 glat_start
= xrealloc (glat_start
,
4113 last_basic_block
* sizeof (*glat_start
));
4114 glat_end
= xrealloc (glat_end
, last_basic_block
* sizeof (*glat_end
));
4117 /* The following is done to keep current_sched_info->next_tail non null. */
4119 insn
= BB_END (EXIT_BLOCK_PTR
->prev_bb
);
4120 if (NEXT_INSN (insn
) == 0
4123 /* Don't emit a NOTE if it would end up before a BARRIER. */
4124 && !BARRIER_P (NEXT_INSN (insn
))))
4126 emit_note_after (NOTE_INSN_DELETED
, insn
);
4127 /* Make insn to appear outside BB. */
4128 BB_END (EXIT_BLOCK_PTR
->prev_bb
) = insn
;
4132 /* Add a basic block BB to extended basic block EBB.
4133 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4134 If EBB is NULL, then BB should be a new region. */
4136 add_block (basic_block bb
, basic_block ebb
)
4138 gcc_assert (current_sched_info
->flags
& DETACH_LIFE_INFO
4139 && bb
->il
.rtl
->global_live_at_start
== 0
4140 && bb
->il
.rtl
->global_live_at_end
== 0);
4144 glat_start
[bb
->index
] = 0;
4145 glat_end
[bb
->index
] = 0;
4147 if (current_sched_info
->add_block
)
4148 /* This changes only data structures of the front-end. */
4149 current_sched_info
->add_block (bb
, ebb
);
4153 Fix CFG after both in- and inter-block movement of
4154 control_flow_insn_p JUMP. */
4156 fix_jump_move (rtx jump
)
4158 basic_block bb
, jump_bb
, jump_bb_next
;
4160 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4161 jump_bb
= BLOCK_FOR_INSN (jump
);
4162 jump_bb_next
= jump_bb
->next_bb
;
4164 gcc_assert (current_sched_info
->flags
& SCHED_EBB
4165 || IS_SPECULATION_BRANCHY_CHECK_P (jump
));
4167 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next
)))
4168 /* if jump_bb_next is not empty. */
4169 BB_END (jump_bb
) = BB_END (jump_bb_next
);
4171 if (BB_END (bb
) != PREV_INSN (jump
))
4172 /* Then there are instruction after jump that should be placed
4174 BB_END (jump_bb_next
) = BB_END (bb
);
4176 /* Otherwise jump_bb_next is empty. */
4177 BB_END (jump_bb_next
) = NEXT_INSN (BB_HEAD (jump_bb_next
));
4179 /* To make assertion in move_insn happy. */
4180 BB_END (bb
) = PREV_INSN (jump
);
4182 update_bb_for_insn (jump_bb_next
);
4185 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4187 move_block_after_check (rtx jump
)
4189 basic_block bb
, jump_bb
, jump_bb_next
;
4192 bb
= BLOCK_FOR_INSN (PREV_INSN (jump
));
4193 jump_bb
= BLOCK_FOR_INSN (jump
);
4194 jump_bb_next
= jump_bb
->next_bb
;
4196 update_bb_for_insn (jump_bb
);
4198 gcc_assert (IS_SPECULATION_CHECK_P (jump
)
4199 || IS_SPECULATION_CHECK_P (BB_END (jump_bb_next
)));
4201 unlink_block (jump_bb_next
);
4202 link_block (jump_bb_next
, bb
);
4206 move_succs (&(jump_bb
->succs
), bb
);
4207 move_succs (&(jump_bb_next
->succs
), jump_bb
);
4208 move_succs (&t
, jump_bb_next
);
4210 if (current_sched_info
->fix_recovery_cfg
)
4211 current_sched_info
->fix_recovery_cfg
4212 (bb
->index
, jump_bb
->index
, jump_bb_next
->index
);
4215 /* Helper function for move_block_after_check.
4216 This functions attaches edge vector pointed to by SUCCSP to
4219 move_succs (VEC(edge
,gc
) **succsp
, basic_block to
)
4224 gcc_assert (to
->succs
== 0);
4226 to
->succs
= *succsp
;
4228 FOR_EACH_EDGE (e
, ei
, to
->succs
)
4234 /* Initialize GLAT (global_live_at_{start, end}) structures.
4235 GLAT structures are used to substitute global_live_{start, end}
4236 regsets during scheduling. This is necessary to use such functions as
4237 split_block (), as they assume consistency of register live information. */
4247 /* Helper function for init_glat. */
4249 init_glat1 (basic_block bb
)
4251 gcc_assert (bb
->il
.rtl
->global_live_at_start
!= 0
4252 && bb
->il
.rtl
->global_live_at_end
!= 0);
4254 glat_start
[bb
->index
] = bb
->il
.rtl
->global_live_at_start
;
4255 glat_end
[bb
->index
] = bb
->il
.rtl
->global_live_at_end
;
4257 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4259 bb
->il
.rtl
->global_live_at_start
= 0;
4260 bb
->il
.rtl
->global_live_at_end
= 0;
4264 /* Attach reg_live_info back to basic blocks.
4265 Also save regsets, that should not have been changed during scheduling,
4266 for checking purposes (see check_reg_live). */
4268 attach_life_info (void)
4273 attach_life_info1 (bb
);
4276 /* Helper function for attach_life_info. */
4278 attach_life_info1 (basic_block bb
)
4280 gcc_assert (bb
->il
.rtl
->global_live_at_start
== 0
4281 && bb
->il
.rtl
->global_live_at_end
== 0);
4283 if (glat_start
[bb
->index
])
4285 gcc_assert (glat_end
[bb
->index
]);
4287 bb
->il
.rtl
->global_live_at_start
= glat_start
[bb
->index
];
4288 bb
->il
.rtl
->global_live_at_end
= glat_end
[bb
->index
];
4290 /* Make them NULL, so they won't be freed in free_glat. */
4291 glat_start
[bb
->index
] = 0;
4292 glat_end
[bb
->index
] = 0;
4294 #ifdef ENABLE_CHECKING
4295 if (bb
->index
< NUM_FIXED_BLOCKS
4296 || current_sched_info
->region_head_or_leaf_p (bb
, 0))
4298 glat_start
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4299 COPY_REG_SET (glat_start
[bb
->index
],
4300 bb
->il
.rtl
->global_live_at_start
);
4303 if (bb
->index
< NUM_FIXED_BLOCKS
4304 || current_sched_info
->region_head_or_leaf_p (bb
, 1))
4306 glat_end
[bb
->index
] = ALLOC_REG_SET (®_obstack
);
4307 COPY_REG_SET (glat_end
[bb
->index
], bb
->il
.rtl
->global_live_at_end
);
4313 gcc_assert (!glat_end
[bb
->index
]);
4315 bb
->il
.rtl
->global_live_at_start
= ALLOC_REG_SET (®_obstack
);
4316 bb
->il
.rtl
->global_live_at_end
= ALLOC_REG_SET (®_obstack
);
4320 /* Free GLAT information. */
4324 #ifdef ENABLE_CHECKING
4325 if (current_sched_info
->flags
& DETACH_LIFE_INFO
)
4331 if (glat_start
[bb
->index
])
4332 FREE_REG_SET (glat_start
[bb
->index
]);
4333 if (glat_end
[bb
->index
])
4334 FREE_REG_SET (glat_end
[bb
->index
]);
4343 /* Remove INSN from the instruction stream.
4344 INSN should have any dependencies. */
4346 sched_remove_insn (rtx insn
)
4348 change_queue_index (insn
, QUEUE_NOWHERE
);
4349 current_sched_info
->add_remove_insn (insn
, 1);
4353 /* Clear priorities of all instructions, that are
4354 forward dependent on INSN. */
4356 clear_priorities (rtx insn
)
4360 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4364 pro
= XEXP (link
, 0);
4365 if (INSN_PRIORITY_KNOWN (pro
))
4367 INSN_PRIORITY_KNOWN (pro
) = 0;
4368 clear_priorities (pro
);
4373 /* Recompute priorities of instructions, whose priorities might have been
4374 changed due to changes in INSN. */
4376 calc_priorities (rtx insn
)
4380 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4384 pro
= XEXP (link
, 0);
4385 if (!INSN_PRIORITY_KNOWN (pro
))
4388 calc_priorities (pro
);
4394 /* Add dependences between JUMP and other instructions in the recovery
4395 block. INSN is the first insn the recovery block. */
4397 add_jump_dependencies (rtx insn
, rtx jump
)
4401 insn
= NEXT_INSN (insn
);
4405 if (!INSN_DEPEND (insn
))
4406 add_back_forw_dep (jump
, insn
, REG_DEP_ANTI
, DEP_ANTI
);
4409 gcc_assert (LOG_LINKS (jump
));
4412 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4414 bb_note (basic_block bb
)
4418 note
= BB_HEAD (bb
);
4420 note
= NEXT_INSN (note
);
4422 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note
));
4426 #ifdef ENABLE_CHECKING
4427 extern void debug_spec_status (ds_t
);
4429 /* Dump information about the dependence status S. */
4431 debug_spec_status (ds_t s
)
4436 fprintf (f
, "BEGIN_DATA: %d; ", get_dep_weak (s
, BEGIN_DATA
));
4438 fprintf (f
, "BE_IN_DATA: %d; ", get_dep_weak (s
, BE_IN_DATA
));
4439 if (s
& BEGIN_CONTROL
)
4440 fprintf (f
, "BEGIN_CONTROL: %d; ", get_dep_weak (s
, BEGIN_CONTROL
));
4441 if (s
& BE_IN_CONTROL
)
4442 fprintf (f
, "BE_IN_CONTROL: %d; ", get_dep_weak (s
, BE_IN_CONTROL
));
4445 fprintf (f
, "HARD_DEP; ");
4448 fprintf (f
, "DEP_TRUE; ");
4450 fprintf (f
, "DEP_ANTI; ");
4452 fprintf (f
, "DEP_OUTPUT; ");
4457 /* Helper function for check_cfg.
4458 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4461 has_edge_p (VEC(edge
,gc
) *el
, int type
)
4466 FOR_EACH_EDGE (e
, ei
, el
)
4467 if (e
->flags
& type
)
4472 /* Check few properties of CFG between HEAD and TAIL.
4473 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4474 instruction stream. */
4476 check_cfg (rtx head
, rtx tail
)
4480 int not_first
= 0, not_last
;
4483 head
= get_insns ();
4485 tail
= get_last_insn ();
4486 next_tail
= NEXT_INSN (tail
);
4490 not_last
= head
!= tail
;
4493 gcc_assert (NEXT_INSN (PREV_INSN (head
)) == head
);
4495 gcc_assert (PREV_INSN (NEXT_INSN (head
)) == head
);
4498 || (NOTE_INSN_BASIC_BLOCK_P (head
)
4500 || (not_first
&& !LABEL_P (PREV_INSN (head
))))))
4502 gcc_assert (bb
== 0);
4503 bb
= BLOCK_FOR_INSN (head
);
4505 gcc_assert (BB_HEAD (bb
) == head
);
4507 /* This is the case of jump table. See inside_basic_block_p (). */
4508 gcc_assert (LABEL_P (head
) && !inside_basic_block_p (head
));
4513 gcc_assert (!inside_basic_block_p (head
));
4514 head
= NEXT_INSN (head
);
4518 gcc_assert (inside_basic_block_p (head
)
4520 gcc_assert (BLOCK_FOR_INSN (head
) == bb
);
4524 head
= NEXT_INSN (head
);
4525 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head
));
4529 if (control_flow_insn_p (head
))
4531 gcc_assert (BB_END (bb
) == head
);
4533 if (any_uncondjump_p (head
))
4534 gcc_assert (EDGE_COUNT (bb
->succs
) == 1
4535 && BARRIER_P (NEXT_INSN (head
)));
4536 else if (any_condjump_p (head
))
4537 gcc_assert (/* Usual case. */
4538 (EDGE_COUNT (bb
->succs
) > 1
4539 && !BARRIER_P (NEXT_INSN (head
)))
4540 /* Or jump to the next instruction. */
4541 || (EDGE_COUNT (bb
->succs
) == 1
4542 && (BB_HEAD (EDGE_I (bb
->succs
, 0)->dest
)
4543 == JUMP_LABEL (head
))));
4545 if (BB_END (bb
) == head
)
4547 if (EDGE_COUNT (bb
->succs
) > 1)
4548 gcc_assert (control_flow_insn_p (head
)
4549 || has_edge_p (bb
->succs
, EDGE_COMPLEX
));
4553 head
= NEXT_INSN (head
);
4559 while (head
!= next_tail
);
4561 gcc_assert (bb
== 0);
4564 /* Perform a few consistency checks of flags in different data structures. */
4566 check_sched_flags (void)
4568 unsigned int f
= current_sched_info
->flags
;
4570 if (flag_sched_stalled_insns
)
4571 gcc_assert (!(f
& DO_SPECULATION
));
4572 if (f
& DO_SPECULATION
)
4573 gcc_assert (!flag_sched_stalled_insns
4574 && (f
& DETACH_LIFE_INFO
)
4576 && spec_info
->mask
);
4577 if (f
& DETACH_LIFE_INFO
)
4578 gcc_assert (f
& USE_GLAT
);
4581 /* Check global_live_at_{start, end} regsets.
4582 If FATAL_P is TRUE, then abort execution at the first failure.
4583 Otherwise, print diagnostics to STDERR (this mode is for calling
4586 check_reg_live (bool fatal_p
)
4598 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_start
,
4603 gcc_assert (!fatal_p
);
4605 fprintf (stderr
, ";; check_reg_live_at_start (%d) failed.\n", i
);
4611 bool b
= bitmap_equal_p (bb
->il
.rtl
->global_live_at_end
,
4616 gcc_assert (!fatal_p
);
4618 fprintf (stderr
, ";; check_reg_live_at_end (%d) failed.\n", i
);
4623 #endif /* ENABLE_CHECKING */
4625 #endif /* INSN_SCHEDULING */