2006-09-26 Jack Howarth <howarth@bromo.med.uc.edu>
[official-gcc.git] / gcc / haifa-sched.c
blob78adee5723ab97534cb970c0b91f52579bf692ed
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=1: same as -dSR.
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
178 void
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx *line_note_head;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list;
211 static struct spec_info_def spec_info_var;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset *glat_start, *glat_end;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery;
235 /* Queues, etc. */
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
245 time has passed.
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
263 `n_ready'.
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx *insn_queue;
281 static int q_ptr = 0;
282 static int q_size = 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
291 queue or ready list.
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
299 state_t curr_state;
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
314 priority.
315 N_READY determines how many insns are on the ready list. */
317 struct ready_list
319 rtx *vec;
320 int veclen;
321 int first;
322 int n_ready;
325 /* The pointer to the ready list. */
326 static struct ready_list *readyp;
328 /* Scheduling clock. */
329 static int clock_var;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns;
334 static int may_trap_exp (rtx, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
338 (REG_P (x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
347 static int
348 may_trap_exp (rtx x, int is_store)
350 enum rtx_code code;
352 if (x == 0)
353 return TRAP_FREE;
354 code = GET_CODE (x);
355 if (is_store)
357 if (code == MEM && may_trap_p (x))
358 return TRAP_RISKY;
359 else
360 return TRAP_FREE;
362 if (code == MEM)
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x))
366 return IRISKY;
367 /* An exception-free load. */
368 if (!may_trap_p (x))
369 return IFREE;
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 return PFREE_CANDIDATE;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE;
376 else
378 const char *fmt;
379 int i, insn_class = TRAP_FREE;
381 /* Neither store nor load, check if it may cause a trap. */
382 if (may_trap_p (x))
383 return TRAP_RISKY;
384 /* Recursive step: walk the insn... */
385 fmt = GET_RTX_FORMAT (code);
386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
388 if (fmt[i] == 'e')
390 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
393 else if (fmt[i] == 'E')
395 int j;
396 for (j = 0; j < XVECLEN (x, i); j++)
398 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401 break;
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
407 return insn_class;
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn)
423 rtx pat = PATTERN (insn);
424 int tmp_class = TRAP_FREE;
425 int insn_class = TRAP_FREE;
426 enum rtx_code code;
428 if (GET_CODE (pat) == PARALLEL)
430 int i, len = XVECLEN (pat, 0);
432 for (i = len - 1; i >= 0; i--)
434 code = GET_CODE (XVECEXP (pat, 0, i));
435 switch (code)
437 case CLOBBER:
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440 break;
441 case SET:
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 if (tmp_class == TRAP_RISKY)
445 break;
446 /* Test if it is a load. */
447 tmp_class
448 = WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450 0));
451 break;
452 case COND_EXEC:
453 case TRAP_IF:
454 tmp_class = TRAP_RISKY;
455 break;
456 default:
459 insn_class = WORST_CLASS (insn_class, tmp_class);
460 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 break;
464 else
466 code = GET_CODE (pat);
467 switch (code)
469 case CLOBBER:
470 /* Test if it is a 'store'. */
471 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472 break;
473 case SET:
474 /* Test if it is a store. */
475 tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 if (tmp_class == TRAP_RISKY)
477 break;
478 /* Test if it is a load. */
479 tmp_class =
480 WORST_CLASS (tmp_class,
481 may_trap_exp (SET_SRC (pat), 0));
482 break;
483 case COND_EXEC:
484 case TRAP_IF:
485 tmp_class = TRAP_RISKY;
486 break;
487 default:;
489 insn_class = tmp_class;
492 return insn_class;
495 /* Forward declarations. */
497 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498 static int priority (rtx);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx *, int);
501 static void queue_insn (rtx, int);
502 static int schedule_insn (rtx);
503 static int find_set_reg_weight (rtx);
504 static void find_insn_reg_weight (basic_block);
505 static void find_insn_reg_weight1 (rtx);
506 static void adjust_priority (rtx);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static rtx unlink_other_notes (rtx, rtx);
533 static rtx unlink_line_notes (rtx, rtx);
534 static void reemit_notes (rtx);
536 static rtx *ready_lastpos (struct ready_list *);
537 static void ready_add (struct ready_list *, rtx, bool);
538 static void ready_sort (struct ready_list *);
539 static rtx ready_remove_first (struct ready_list *);
541 static void queue_to_ready (struct ready_list *);
542 static int early_queue_to_ready (state_t, struct ready_list *);
544 static void debug_ready_list (struct ready_list *);
546 static void move_insn (rtx);
548 /* The following functions are used to implement multi-pass scheduling
549 on the first cycle. */
550 static rtx ready_element (struct ready_list *, int);
551 static rtx ready_remove (struct ready_list *, int);
552 static void ready_remove_insn (rtx);
553 static int max_issue (struct ready_list *, int *, int);
555 static rtx choose_ready (struct ready_list *);
557 static void fix_inter_tick (rtx, rtx);
558 static int fix_tick_ready (rtx);
559 static void change_queue_index (rtx, int);
560 static void resolve_dep (rtx, rtx);
562 /* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx);
568 static void extend_all (rtx);
569 static void init_h_i_d (rtx);
570 static void generate_recovery_code (rtx);
571 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572 static void begin_speculative_block (rtx);
573 static void add_to_speculative_block (rtx);
574 static dw_t dep_weak (ds_t);
575 static edge find_fallthru_edge (basic_block);
576 static void init_before_recovery (void);
577 static basic_block create_recovery_block (void);
578 static void create_check_block_twin (rtx, bool);
579 static void fix_recovery_deps (basic_block);
580 static void associate_line_notes_with_blocks (basic_block);
581 static void change_pattern (rtx, rtx);
582 static int speculate_insn (rtx, ds_t, rtx *);
583 static void dump_new_block_header (int, basic_block, rtx, rtx);
584 static void restore_bb_notes (basic_block);
585 static void extend_bb (basic_block);
586 static void fix_jump_move (rtx);
587 static void move_block_after_check (rtx);
588 static void move_succs (VEC(edge,gc) **, basic_block);
589 static void init_glat (void);
590 static void init_glat1 (basic_block);
591 static void attach_life_info1 (basic_block);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx);
594 static void clear_priorities (rtx);
595 static void add_jump_dependencies (rtx, rtx);
596 static void calc_priorities (rtx);
597 #ifdef ENABLE_CHECKING
598 static int has_edge_p (VEC(edge,gc) *, int);
599 static void check_cfg (rtx, rtx);
600 static void check_sched_flags (void);
601 #endif
603 #endif /* INSN_SCHEDULING */
605 /* Point to state used for the current scheduling pass. */
606 struct sched_info *current_sched_info;
608 #ifndef INSN_SCHEDULING
609 void
610 schedule_insns (void)
613 #else
615 /* Working copy of frontend's sched_info variable. */
616 static struct sched_info current_sched_info_var;
618 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
619 so that insns independent of the last scheduled insn will be preferred
620 over dependent instructions. */
622 static rtx last_scheduled_insn;
624 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
625 This is the number of cycles between instruction issue and
626 instruction results. */
628 HAIFA_INLINE int
629 insn_cost (rtx insn, rtx link, rtx used)
631 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
632 link, used);
635 /* Compute cost of executing INSN given the dependence on the insn USED.
636 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
637 Otherwise, dependence between INSN and USED is assumed to be of type
638 DEP_TYPE. This function was introduced as a workaround for
639 targetm.adjust_cost hook.
640 This is the number of cycles between instruction issue and
641 instruction results. */
643 HAIFA_INLINE static int
644 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
646 int cost = INSN_COST (insn);
648 if (cost < 0)
650 /* A USE insn, or something else we don't need to
651 understand. We can't pass these directly to
652 result_ready_cost or insn_default_latency because it will
653 trigger a fatal error for unrecognizable insns. */
654 if (recog_memoized (insn) < 0)
656 INSN_COST (insn) = 0;
657 return 0;
659 else
661 cost = insn_default_latency (insn);
662 if (cost < 0)
663 cost = 0;
665 INSN_COST (insn) = cost;
669 /* In this case estimate cost without caring how insn is used. */
670 if (used == 0)
671 return cost;
673 /* A USE insn should never require the value used to be computed.
674 This allows the computation of a function's result and parameter
675 values to overlap the return and call. */
676 if (recog_memoized (used) < 0)
677 cost = 0;
678 else
680 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
682 if (INSN_CODE (insn) >= 0)
684 if (dep_type == REG_DEP_ANTI)
685 cost = 0;
686 else if (dep_type == REG_DEP_OUTPUT)
688 cost = (insn_default_latency (insn)
689 - insn_default_latency (used));
690 if (cost <= 0)
691 cost = 1;
693 else if (bypass_p (insn))
694 cost = insn_latency (insn, used);
697 if (targetm.sched.adjust_cost_2)
698 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
699 else
701 gcc_assert (link);
702 if (targetm.sched.adjust_cost)
703 cost = targetm.sched.adjust_cost (used, link, insn, cost);
706 if (cost < 0)
707 cost = 0;
710 return cost;
713 /* Compute the priority number for INSN. */
715 static int
716 priority (rtx insn)
718 rtx link;
720 if (! INSN_P (insn))
721 return 0;
723 if (! INSN_PRIORITY_KNOWN (insn))
725 int this_priority = 0;
727 if (INSN_DEPEND (insn) == 0)
728 this_priority = insn_cost (insn, 0, 0);
729 else
731 rtx prev_first, twin;
732 basic_block rec;
734 /* For recovery check instructions we calculate priority slightly
735 different than that of normal instructions. Instead of walking
736 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
737 of each instruction in the corresponding recovery block. */
739 rec = RECOVERY_BLOCK (insn);
740 if (!rec || rec == EXIT_BLOCK_PTR)
742 prev_first = PREV_INSN (insn);
743 twin = insn;
745 else
747 prev_first = NEXT_INSN (BB_HEAD (rec));
748 twin = PREV_INSN (BB_END (rec));
753 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
755 rtx next;
756 int next_priority;
758 next = XEXP (link, 0);
760 if (BLOCK_FOR_INSN (next) != rec)
762 /* Critical path is meaningful in block boundaries
763 only. */
764 if (! (*current_sched_info->contributes_to_priority)
765 (next, insn)
766 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
767 then speculative instructions will less likely be
768 scheduled. That is because the priority of
769 their producers will increase, and, thus, the
770 producers will more likely be scheduled, thus,
771 resolving the dependence. */
772 || ((current_sched_info->flags & DO_SPECULATION)
773 && (DEP_STATUS (link) & SPECULATIVE)
774 && !(spec_info->flags
775 & COUNT_SPEC_IN_CRITICAL_PATH)))
776 continue;
778 next_priority = insn_cost1 (insn,
779 twin == insn ?
780 REG_NOTE_KIND (link) :
781 REG_DEP_ANTI,
782 twin == insn ? link : 0,
783 next) + priority (next);
785 if (next_priority > this_priority)
786 this_priority = next_priority;
790 twin = PREV_INSN (twin);
792 while (twin != prev_first);
794 INSN_PRIORITY (insn) = this_priority;
795 INSN_PRIORITY_KNOWN (insn) = 1;
798 return INSN_PRIORITY (insn);
801 /* Macros and functions for keeping the priority queue sorted, and
802 dealing with queuing and dequeuing of instructions. */
804 #define SCHED_SORT(READY, N_READY) \
805 do { if ((N_READY) == 2) \
806 swap_sort (READY, N_READY); \
807 else if ((N_READY) > 2) \
808 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
809 while (0)
811 /* Returns a positive value if x is preferred; returns a negative value if
812 y is preferred. Should never return 0, since that will make the sort
813 unstable. */
815 static int
816 rank_for_schedule (const void *x, const void *y)
818 rtx tmp = *(const rtx *) y;
819 rtx tmp2 = *(const rtx *) x;
820 rtx link;
821 int tmp_class, tmp2_class, depend_count1, depend_count2;
822 int val, priority_val, weight_val, info_val;
824 /* The insn in a schedule group should be issued the first. */
825 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
826 return SCHED_GROUP_P (tmp2) ? 1 : -1;
828 /* Prefer insn with higher priority. */
829 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
831 if (priority_val)
832 return priority_val;
834 /* Prefer speculative insn with greater dependencies weakness. */
835 if (spec_info)
837 ds_t ds1, ds2;
838 dw_t dw1, dw2;
839 int dw;
841 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
842 if (ds1)
843 dw1 = dep_weak (ds1);
844 else
845 dw1 = NO_DEP_WEAK;
847 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
848 if (ds2)
849 dw2 = dep_weak (ds2);
850 else
851 dw2 = NO_DEP_WEAK;
853 dw = dw2 - dw1;
854 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
855 return dw;
858 /* Prefer an insn with smaller contribution to registers-pressure. */
859 if (!reload_completed &&
860 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
861 return weight_val;
863 info_val = (*current_sched_info->rank) (tmp, tmp2);
864 if (info_val)
865 return info_val;
867 /* Compare insns based on their relation to the last-scheduled-insn. */
868 if (INSN_P (last_scheduled_insn))
870 /* Classify the instructions into three classes:
871 1) Data dependent on last schedule insn.
872 2) Anti/Output dependent on last scheduled insn.
873 3) Independent of last scheduled insn, or has latency of one.
874 Choose the insn from the highest numbered class if different. */
875 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
876 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
877 tmp_class = 3;
878 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
879 tmp_class = 1;
880 else
881 tmp_class = 2;
883 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
884 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
885 tmp2_class = 3;
886 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
887 tmp2_class = 1;
888 else
889 tmp2_class = 2;
891 if ((val = tmp2_class - tmp_class))
892 return val;
895 /* Prefer the insn which has more later insns that depend on it.
896 This gives the scheduler more freedom when scheduling later
897 instructions at the expense of added register pressure. */
898 depend_count1 = 0;
899 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
900 depend_count1++;
902 depend_count2 = 0;
903 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
904 depend_count2++;
906 val = depend_count2 - depend_count1;
907 if (val)
908 return val;
910 /* If insns are equally good, sort by INSN_LUID (original insn order),
911 so that we make the sort stable. This minimizes instruction movement,
912 thus minimizing sched's effect on debugging and cross-jumping. */
913 return INSN_LUID (tmp) - INSN_LUID (tmp2);
916 /* Resort the array A in which only element at index N may be out of order. */
918 HAIFA_INLINE static void
919 swap_sort (rtx *a, int n)
921 rtx insn = a[n - 1];
922 int i = n - 2;
924 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
926 a[i + 1] = a[i];
927 i -= 1;
929 a[i + 1] = insn;
932 /* Add INSN to the insn queue so that it can be executed at least
933 N_CYCLES after the currently executing insn. Preserve insns
934 chain for debugging purposes. */
936 HAIFA_INLINE static void
937 queue_insn (rtx insn, int n_cycles)
939 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
940 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
942 gcc_assert (n_cycles <= max_insn_queue_index);
944 insn_queue[next_q] = link;
945 q_size += 1;
947 if (sched_verbose >= 2)
949 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
950 (*current_sched_info->print_insn) (insn, 0));
952 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
955 QUEUE_INDEX (insn) = next_q;
958 /* Remove INSN from queue. */
959 static void
960 queue_remove (rtx insn)
962 gcc_assert (QUEUE_INDEX (insn) >= 0);
963 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
964 q_size--;
965 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
968 /* Return a pointer to the bottom of the ready list, i.e. the insn
969 with the lowest priority. */
971 HAIFA_INLINE static rtx *
972 ready_lastpos (struct ready_list *ready)
974 gcc_assert (ready->n_ready >= 1);
975 return ready->vec + ready->first - ready->n_ready + 1;
978 /* Add an element INSN to the ready list so that it ends up with the
979 lowest/highest priority depending on FIRST_P. */
981 HAIFA_INLINE static void
982 ready_add (struct ready_list *ready, rtx insn, bool first_p)
984 if (!first_p)
986 if (ready->first == ready->n_ready)
988 memmove (ready->vec + ready->veclen - ready->n_ready,
989 ready_lastpos (ready),
990 ready->n_ready * sizeof (rtx));
991 ready->first = ready->veclen - 1;
993 ready->vec[ready->first - ready->n_ready] = insn;
995 else
997 if (ready->first == ready->veclen - 1)
999 if (ready->n_ready)
1000 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1001 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1002 ready_lastpos (ready),
1003 ready->n_ready * sizeof (rtx));
1004 ready->first = ready->veclen - 2;
1006 ready->vec[++(ready->first)] = insn;
1009 ready->n_ready++;
1011 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1012 QUEUE_INDEX (insn) = QUEUE_READY;
1015 /* Remove the element with the highest priority from the ready list and
1016 return it. */
1018 HAIFA_INLINE static rtx
1019 ready_remove_first (struct ready_list *ready)
1021 rtx t;
1023 gcc_assert (ready->n_ready);
1024 t = ready->vec[ready->first--];
1025 ready->n_ready--;
1026 /* If the queue becomes empty, reset it. */
1027 if (ready->n_ready == 0)
1028 ready->first = ready->veclen - 1;
1030 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1031 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1033 return t;
1036 /* The following code implements multi-pass scheduling for the first
1037 cycle. In other words, we will try to choose ready insn which
1038 permits to start maximum number of insns on the same cycle. */
1040 /* Return a pointer to the element INDEX from the ready. INDEX for
1041 insn with the highest priority is 0, and the lowest priority has
1042 N_READY - 1. */
1044 HAIFA_INLINE static rtx
1045 ready_element (struct ready_list *ready, int index)
1047 gcc_assert (ready->n_ready && index < ready->n_ready);
1049 return ready->vec[ready->first - index];
1052 /* Remove the element INDEX from the ready list and return it. INDEX
1053 for insn with the highest priority is 0, and the lowest priority
1054 has N_READY - 1. */
1056 HAIFA_INLINE static rtx
1057 ready_remove (struct ready_list *ready, int index)
1059 rtx t;
1060 int i;
1062 if (index == 0)
1063 return ready_remove_first (ready);
1064 gcc_assert (ready->n_ready && index < ready->n_ready);
1065 t = ready->vec[ready->first - index];
1066 ready->n_ready--;
1067 for (i = index; i < ready->n_ready; i++)
1068 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1069 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1070 return t;
1073 /* Remove INSN from the ready list. */
1074 static void
1075 ready_remove_insn (rtx insn)
1077 int i;
1079 for (i = 0; i < readyp->n_ready; i++)
1080 if (ready_element (readyp, i) == insn)
1082 ready_remove (readyp, i);
1083 return;
1085 gcc_unreachable ();
1088 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1089 macro. */
1091 HAIFA_INLINE static void
1092 ready_sort (struct ready_list *ready)
1094 rtx *first = ready_lastpos (ready);
1095 SCHED_SORT (first, ready->n_ready);
1098 /* PREV is an insn that is ready to execute. Adjust its priority if that
1099 will help shorten or lengthen register lifetimes as appropriate. Also
1100 provide a hook for the target to tweek itself. */
1102 HAIFA_INLINE static void
1103 adjust_priority (rtx prev)
1105 /* ??? There used to be code here to try and estimate how an insn
1106 affected register lifetimes, but it did it by looking at REG_DEAD
1107 notes, which we removed in schedule_region. Nor did it try to
1108 take into account register pressure or anything useful like that.
1110 Revisit when we have a machine model to work with and not before. */
1112 if (targetm.sched.adjust_priority)
1113 INSN_PRIORITY (prev) =
1114 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1117 /* Advance time on one cycle. */
1118 HAIFA_INLINE static void
1119 advance_one_cycle (void)
1121 if (targetm.sched.dfa_pre_cycle_insn)
1122 state_transition (curr_state,
1123 targetm.sched.dfa_pre_cycle_insn ());
1125 state_transition (curr_state, NULL);
1127 if (targetm.sched.dfa_post_cycle_insn)
1128 state_transition (curr_state,
1129 targetm.sched.dfa_post_cycle_insn ());
1132 /* Clock at which the previous instruction was issued. */
1133 static int last_clock_var;
1135 /* INSN is the "currently executing insn". Launch each insn which was
1136 waiting on INSN. READY is the ready list which contains the insns
1137 that are ready to fire. CLOCK is the current cycle. The function
1138 returns necessary cycle advance after issuing the insn (it is not
1139 zero for insns in a schedule group). */
1141 static int
1142 schedule_insn (rtx insn)
1144 rtx link;
1145 int advance = 0;
1147 if (sched_verbose >= 1)
1149 char buf[2048];
1151 print_insn (buf, insn, 0);
1152 buf[40] = 0;
1153 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1155 if (recog_memoized (insn) < 0)
1156 fprintf (sched_dump, "nothing");
1157 else
1158 print_reservation (sched_dump, insn);
1159 fputc ('\n', sched_dump);
1162 /* Scheduling instruction should have all its dependencies resolved and
1163 should have been removed from the ready list. */
1164 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1165 gcc_assert (!LOG_LINKS (insn));
1166 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1168 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1170 /* Now we can free RESOLVED_DEPS list. */
1171 if (current_sched_info->flags & USE_DEPS_LIST)
1172 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1173 else
1174 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1176 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1177 if (INSN_TICK (insn) > clock_var)
1178 /* INSN has been prematurely moved from the queue to the ready list.
1179 This is possible only if following flag is set. */
1180 gcc_assert (flag_sched_stalled_insns);
1182 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1183 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1184 INSN_TICK (insn) = clock_var;
1186 /* Update dependent instructions. */
1187 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1189 rtx next = XEXP (link, 0);
1191 resolve_dep (next, insn);
1193 if (!RECOVERY_BLOCK (insn)
1194 || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
1196 int effective_cost;
1198 effective_cost = try_ready (next);
1200 if (effective_cost >= 0
1201 && SCHED_GROUP_P (next)
1202 && advance < effective_cost)
1203 advance = effective_cost;
1205 else
1206 /* Check always has only one forward dependence (to the first insn in
1207 the recovery block), therefore, this will be executed only once. */
1209 gcc_assert (XEXP (link, 1) == 0);
1210 fix_recovery_deps (RECOVERY_BLOCK (insn));
1214 /* Annotate the instruction with issue information -- TImode
1215 indicates that the instruction is expected not to be able
1216 to issue on the same cycle as the previous insn. A machine
1217 may use this information to decide how the instruction should
1218 be aligned. */
1219 if (issue_rate > 1
1220 && GET_CODE (PATTERN (insn)) != USE
1221 && GET_CODE (PATTERN (insn)) != CLOBBER)
1223 if (reload_completed)
1224 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1225 last_clock_var = clock_var;
1228 return advance;
1231 /* Functions for handling of notes. */
1233 /* Delete notes beginning with INSN and put them in the chain
1234 of notes ended by NOTE_LIST.
1235 Returns the insn following the notes. */
1237 static rtx
1238 unlink_other_notes (rtx insn, rtx tail)
1240 rtx prev = PREV_INSN (insn);
1242 while (insn != tail && NOTE_NOT_BB_P (insn))
1244 rtx next = NEXT_INSN (insn);
1245 basic_block bb = BLOCK_FOR_INSN (insn);
1247 /* Delete the note from its current position. */
1248 if (prev)
1249 NEXT_INSN (prev) = next;
1250 if (next)
1251 PREV_INSN (next) = prev;
1253 if (bb)
1255 /* Basic block can begin with either LABEL or
1256 NOTE_INSN_BASIC_BLOCK. */
1257 gcc_assert (BB_HEAD (bb) != insn);
1259 /* Check if we are removing last insn in the BB. */
1260 if (BB_END (bb) == insn)
1261 BB_END (bb) = prev;
1264 /* See sched_analyze to see how these are handled. */
1265 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1266 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1268 /* Insert the note at the end of the notes list. */
1269 PREV_INSN (insn) = note_list;
1270 if (note_list)
1271 NEXT_INSN (note_list) = insn;
1272 note_list = insn;
1275 insn = next;
1277 return insn;
1280 /* Delete line notes beginning with INSN. Record line-number notes so
1281 they can be reused. Returns the insn following the notes. */
1283 static rtx
1284 unlink_line_notes (rtx insn, rtx tail)
1286 rtx prev = PREV_INSN (insn);
1288 while (insn != tail && NOTE_NOT_BB_P (insn))
1290 rtx next = NEXT_INSN (insn);
1292 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1294 basic_block bb = BLOCK_FOR_INSN (insn);
1296 /* Delete the note from its current position. */
1297 if (prev)
1298 NEXT_INSN (prev) = next;
1299 if (next)
1300 PREV_INSN (next) = prev;
1302 if (bb)
1304 /* Basic block can begin with either LABEL or
1305 NOTE_INSN_BASIC_BLOCK. */
1306 gcc_assert (BB_HEAD (bb) != insn);
1308 /* Check if we are removing last insn in the BB. */
1309 if (BB_END (bb) == insn)
1310 BB_END (bb) = prev;
1313 /* Record line-number notes so they can be reused. */
1314 LINE_NOTE (insn) = insn;
1316 else
1317 prev = insn;
1319 insn = next;
1321 return insn;
1324 /* Return the head and tail pointers of ebb starting at BEG and ending
1325 at END. */
1327 void
1328 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1330 rtx beg_head = BB_HEAD (beg);
1331 rtx beg_tail = BB_END (beg);
1332 rtx end_head = BB_HEAD (end);
1333 rtx end_tail = BB_END (end);
1335 /* Don't include any notes or labels at the beginning of the BEG
1336 basic block, or notes at the end of the END basic blocks. */
1338 if (LABEL_P (beg_head))
1339 beg_head = NEXT_INSN (beg_head);
1341 while (beg_head != beg_tail)
1342 if (NOTE_P (beg_head))
1343 beg_head = NEXT_INSN (beg_head);
1344 else
1345 break;
1347 *headp = beg_head;
1349 if (beg == end)
1350 end_head = beg_head;
1351 else if (LABEL_P (end_head))
1352 end_head = NEXT_INSN (end_head);
1354 while (end_head != end_tail)
1355 if (NOTE_P (end_tail))
1356 end_tail = PREV_INSN (end_tail);
1357 else
1358 break;
1360 *tailp = end_tail;
1363 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1366 no_real_insns_p (rtx head, rtx tail)
1368 while (head != NEXT_INSN (tail))
1370 if (!NOTE_P (head) && !LABEL_P (head))
1371 return 0;
1372 head = NEXT_INSN (head);
1374 return 1;
1377 /* Delete line notes from one block. Save them so they can be later restored
1378 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1379 block in which notes should be processed. */
1381 void
1382 rm_line_notes (rtx head, rtx tail)
1384 rtx next_tail;
1385 rtx insn;
1387 next_tail = NEXT_INSN (tail);
1388 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1390 rtx prev;
1392 /* Farm out notes, and maybe save them in NOTE_LIST.
1393 This is needed to keep the debugger from
1394 getting completely deranged. */
1395 if (NOTE_NOT_BB_P (insn))
1397 prev = insn;
1398 insn = unlink_line_notes (insn, next_tail);
1400 gcc_assert (prev != tail && prev != head && insn != next_tail);
1405 /* Save line number notes for each insn in block B. HEAD and TAIL are
1406 the boundaries of the block in which notes should be processed. */
1408 void
1409 save_line_notes (int b, rtx head, rtx tail)
1411 rtx next_tail;
1413 /* We must use the true line number for the first insn in the block
1414 that was computed and saved at the start of this pass. We can't
1415 use the current line number, because scheduling of the previous
1416 block may have changed the current line number. */
1418 rtx line = line_note_head[b];
1419 rtx insn;
1421 next_tail = NEXT_INSN (tail);
1423 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1424 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1425 line = insn;
1426 else
1427 LINE_NOTE (insn) = line;
1430 /* After a block was scheduled, insert line notes into the insns list.
1431 HEAD and TAIL are the boundaries of the block in which notes should
1432 be processed. */
1434 void
1435 restore_line_notes (rtx head, rtx tail)
1437 rtx line, note, prev, new;
1438 int added_notes = 0;
1439 rtx next_tail, insn;
1441 head = head;
1442 next_tail = NEXT_INSN (tail);
1444 /* Determine the current line-number. We want to know the current
1445 line number of the first insn of the block here, in case it is
1446 different from the true line number that was saved earlier. If
1447 different, then we need a line number note before the first insn
1448 of this block. If it happens to be the same, then we don't want to
1449 emit another line number note here. */
1450 for (line = head; line; line = PREV_INSN (line))
1451 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1452 break;
1454 /* Walk the insns keeping track of the current line-number and inserting
1455 the line-number notes as needed. */
1456 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1457 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1458 line = insn;
1459 /* This used to emit line number notes before every non-deleted note.
1460 However, this confuses a debugger, because line notes not separated
1461 by real instructions all end up at the same address. I can find no
1462 use for line number notes before other notes, so none are emitted. */
1463 else if (!NOTE_P (insn)
1464 && INSN_UID (insn) < old_max_uid
1465 && (note = LINE_NOTE (insn)) != 0
1466 && note != line
1467 && (line == 0
1468 #ifdef USE_MAPPED_LOCATION
1469 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1470 #else
1471 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1472 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1473 #endif
1476 line = note;
1477 prev = PREV_INSN (insn);
1478 if (LINE_NOTE (note))
1480 /* Re-use the original line-number note. */
1481 LINE_NOTE (note) = 0;
1482 PREV_INSN (note) = prev;
1483 NEXT_INSN (prev) = note;
1484 PREV_INSN (insn) = note;
1485 NEXT_INSN (note) = insn;
1486 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1488 else
1490 added_notes++;
1491 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1492 #ifndef USE_MAPPED_LOCATION
1493 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1494 #endif
1497 if (sched_verbose && added_notes)
1498 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1501 /* After scheduling the function, delete redundant line notes from the
1502 insns list. */
1504 void
1505 rm_redundant_line_notes (void)
1507 rtx line = 0;
1508 rtx insn = get_insns ();
1509 int active_insn = 0;
1510 int notes = 0;
1512 /* Walk the insns deleting redundant line-number notes. Many of these
1513 are already present. The remainder tend to occur at basic
1514 block boundaries. */
1515 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1516 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1518 /* If there are no active insns following, INSN is redundant. */
1519 if (active_insn == 0)
1521 notes++;
1522 SET_INSN_DELETED (insn);
1524 /* If the line number is unchanged, LINE is redundant. */
1525 else if (line
1526 #ifdef USE_MAPPED_LOCATION
1527 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1528 #else
1529 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1530 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1531 #endif
1534 notes++;
1535 SET_INSN_DELETED (line);
1536 line = insn;
1538 else
1539 line = insn;
1540 active_insn = 0;
1542 else if (!((NOTE_P (insn)
1543 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1544 || (NONJUMP_INSN_P (insn)
1545 && (GET_CODE (PATTERN (insn)) == USE
1546 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1547 active_insn++;
1549 if (sched_verbose && notes)
1550 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1553 /* Delete notes between HEAD and TAIL and put them in the chain
1554 of notes ended by NOTE_LIST. */
1556 void
1557 rm_other_notes (rtx head, rtx tail)
1559 rtx next_tail;
1560 rtx insn;
1562 note_list = 0;
1563 if (head == tail && (! INSN_P (head)))
1564 return;
1566 next_tail = NEXT_INSN (tail);
1567 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1569 rtx prev;
1571 /* Farm out notes, and maybe save them in NOTE_LIST.
1572 This is needed to keep the debugger from
1573 getting completely deranged. */
1574 if (NOTE_NOT_BB_P (insn))
1576 prev = insn;
1578 insn = unlink_other_notes (insn, next_tail);
1580 gcc_assert (prev != tail && prev != head && insn != next_tail);
1585 /* Functions for computation of registers live/usage info. */
1587 /* This function looks for a new register being defined.
1588 If the destination register is already used by the source,
1589 a new register is not needed. */
1591 static int
1592 find_set_reg_weight (rtx x)
1594 if (GET_CODE (x) == CLOBBER
1595 && register_operand (SET_DEST (x), VOIDmode))
1596 return 1;
1597 if (GET_CODE (x) == SET
1598 && register_operand (SET_DEST (x), VOIDmode))
1600 if (REG_P (SET_DEST (x)))
1602 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1603 return 1;
1604 else
1605 return 0;
1607 return 1;
1609 return 0;
1612 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1614 static void
1615 find_insn_reg_weight (basic_block bb)
1617 rtx insn, next_tail, head, tail;
1619 get_ebb_head_tail (bb, bb, &head, &tail);
1620 next_tail = NEXT_INSN (tail);
1622 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1623 find_insn_reg_weight1 (insn);
1626 /* Calculate INSN_REG_WEIGHT for single instruction.
1627 Separated from find_insn_reg_weight because of need
1628 to initialize new instruction in generate_recovery_code. */
1629 static void
1630 find_insn_reg_weight1 (rtx insn)
1632 int reg_weight = 0;
1633 rtx x;
1635 /* Handle register life information. */
1636 if (! INSN_P (insn))
1637 return;
1639 /* Increment weight for each register born here. */
1640 x = PATTERN (insn);
1641 reg_weight += find_set_reg_weight (x);
1642 if (GET_CODE (x) == PARALLEL)
1644 int j;
1645 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1647 x = XVECEXP (PATTERN (insn), 0, j);
1648 reg_weight += find_set_reg_weight (x);
1651 /* Decrement weight for each register that dies here. */
1652 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1654 if (REG_NOTE_KIND (x) == REG_DEAD
1655 || REG_NOTE_KIND (x) == REG_UNUSED)
1656 reg_weight--;
1659 INSN_REG_WEIGHT (insn) = reg_weight;
1662 /* Move insns that became ready to fire from queue to ready list. */
1664 static void
1665 queue_to_ready (struct ready_list *ready)
1667 rtx insn;
1668 rtx link;
1670 q_ptr = NEXT_Q (q_ptr);
1672 /* Add all pending insns that can be scheduled without stalls to the
1673 ready list. */
1674 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1676 insn = XEXP (link, 0);
1677 q_size -= 1;
1679 if (sched_verbose >= 2)
1680 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1681 (*current_sched_info->print_insn) (insn, 0));
1683 /* If the ready list is full, delay the insn for 1 cycle.
1684 See the comment in schedule_block for the rationale. */
1685 if (!reload_completed
1686 && ready->n_ready > MAX_SCHED_READY_INSNS
1687 && !SCHED_GROUP_P (insn))
1689 if (sched_verbose >= 2)
1690 fprintf (sched_dump, "requeued because ready full\n");
1691 queue_insn (insn, 1);
1693 else
1695 ready_add (ready, insn, false);
1696 if (sched_verbose >= 2)
1697 fprintf (sched_dump, "moving to ready without stalls\n");
1700 free_INSN_LIST_list (&insn_queue[q_ptr]);
1702 /* If there are no ready insns, stall until one is ready and add all
1703 of the pending insns at that point to the ready list. */
1704 if (ready->n_ready == 0)
1706 int stalls;
1708 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1710 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1712 for (; link; link = XEXP (link, 1))
1714 insn = XEXP (link, 0);
1715 q_size -= 1;
1717 if (sched_verbose >= 2)
1718 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1719 (*current_sched_info->print_insn) (insn, 0));
1721 ready_add (ready, insn, false);
1722 if (sched_verbose >= 2)
1723 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1725 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1727 advance_one_cycle ();
1729 break;
1732 advance_one_cycle ();
1735 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1736 clock_var += stalls;
1740 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1741 prematurely move INSN from the queue to the ready list. Currently,
1742 if a target defines the hook 'is_costly_dependence', this function
1743 uses the hook to check whether there exist any dependences which are
1744 considered costly by the target, between INSN and other insns that
1745 have already been scheduled. Dependences are checked up to Y cycles
1746 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1747 controlling this value.
1748 (Other considerations could be taken into account instead (or in
1749 addition) depending on user flags and target hooks. */
1751 static bool
1752 ok_for_early_queue_removal (rtx insn)
1754 int n_cycles;
1755 rtx prev_insn = last_scheduled_insn;
1757 if (targetm.sched.is_costly_dependence)
1759 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1761 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1763 rtx dep_link = 0;
1764 int dep_cost;
1766 if (!NOTE_P (prev_insn))
1768 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1769 if (dep_link)
1771 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1772 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1773 dep_link, dep_cost,
1774 flag_sched_stalled_insns_dep - n_cycles))
1775 return false;
1779 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1780 break;
1783 if (!prev_insn)
1784 break;
1785 prev_insn = PREV_INSN (prev_insn);
1789 return true;
1793 /* Remove insns from the queue, before they become "ready" with respect
1794 to FU latency considerations. */
1796 static int
1797 early_queue_to_ready (state_t state, struct ready_list *ready)
1799 rtx insn;
1800 rtx link;
1801 rtx next_link;
1802 rtx prev_link;
1803 bool move_to_ready;
1804 int cost;
1805 state_t temp_state = alloca (dfa_state_size);
1806 int stalls;
1807 int insns_removed = 0;
1810 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1811 function:
1813 X == 0: There is no limit on how many queued insns can be removed
1814 prematurely. (flag_sched_stalled_insns = -1).
1816 X >= 1: Only X queued insns can be removed prematurely in each
1817 invocation. (flag_sched_stalled_insns = X).
1819 Otherwise: Early queue removal is disabled.
1820 (flag_sched_stalled_insns = 0)
1823 if (! flag_sched_stalled_insns)
1824 return 0;
1826 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1828 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1830 if (sched_verbose > 6)
1831 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1833 prev_link = 0;
1834 while (link)
1836 next_link = XEXP (link, 1);
1837 insn = XEXP (link, 0);
1838 if (insn && sched_verbose > 6)
1839 print_rtl_single (sched_dump, insn);
1841 memcpy (temp_state, state, dfa_state_size);
1842 if (recog_memoized (insn) < 0)
1843 /* non-negative to indicate that it's not ready
1844 to avoid infinite Q->R->Q->R... */
1845 cost = 0;
1846 else
1847 cost = state_transition (temp_state, insn);
1849 if (sched_verbose >= 6)
1850 fprintf (sched_dump, "transition cost = %d\n", cost);
1852 move_to_ready = false;
1853 if (cost < 0)
1855 move_to_ready = ok_for_early_queue_removal (insn);
1856 if (move_to_ready == true)
1858 /* move from Q to R */
1859 q_size -= 1;
1860 ready_add (ready, insn, false);
1862 if (prev_link)
1863 XEXP (prev_link, 1) = next_link;
1864 else
1865 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1867 free_INSN_LIST_node (link);
1869 if (sched_verbose >= 2)
1870 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1871 (*current_sched_info->print_insn) (insn, 0));
1873 insns_removed++;
1874 if (insns_removed == flag_sched_stalled_insns)
1875 /* Remove no more than flag_sched_stalled_insns insns
1876 from Q at a time. */
1877 return insns_removed;
1881 if (move_to_ready == false)
1882 prev_link = link;
1884 link = next_link;
1885 } /* while link */
1886 } /* if link */
1888 } /* for stalls.. */
1890 return insns_removed;
1894 /* Print the ready list for debugging purposes. Callable from debugger. */
1896 static void
1897 debug_ready_list (struct ready_list *ready)
1899 rtx *p;
1900 int i;
1902 if (ready->n_ready == 0)
1904 fprintf (sched_dump, "\n");
1905 return;
1908 p = ready_lastpos (ready);
1909 for (i = 0; i < ready->n_ready; i++)
1910 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1911 fprintf (sched_dump, "\n");
1914 /* Search INSN for REG_SAVE_NOTE note pairs for
1915 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1916 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1917 saved value for NOTE_BLOCK_NUMBER which is useful for
1918 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1920 static void
1921 reemit_notes (rtx insn)
1923 rtx note, last = insn;
1925 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1927 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1929 enum insn_note note_type = INTVAL (XEXP (note, 0));
1931 last = emit_note_before (note_type, last);
1932 remove_note (insn, note);
1937 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1938 static void
1939 move_insn (rtx insn)
1941 rtx last = last_scheduled_insn;
1943 if (PREV_INSN (insn) != last)
1945 basic_block bb;
1946 rtx note;
1947 int jump_p = 0;
1949 bb = BLOCK_FOR_INSN (insn);
1951 /* BB_HEAD is either LABEL or NOTE. */
1952 gcc_assert (BB_HEAD (bb) != insn);
1954 if (BB_END (bb) == insn)
1955 /* If this is last instruction in BB, move end marker one
1956 instruction up. */
1958 /* Jumps are always placed at the end of basic block. */
1959 jump_p = control_flow_insn_p (insn);
1961 gcc_assert (!jump_p
1962 || ((current_sched_info->flags & SCHED_RGN)
1963 && RECOVERY_BLOCK (insn)
1964 && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
1965 || (current_sched_info->flags & SCHED_EBB));
1967 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1969 BB_END (bb) = PREV_INSN (insn);
1972 gcc_assert (BB_END (bb) != last);
1974 if (jump_p)
1975 /* We move the block note along with jump. */
1977 /* NT is needed for assertion below. */
1978 rtx nt = current_sched_info->next_tail;
1980 note = NEXT_INSN (insn);
1981 while (NOTE_NOT_BB_P (note) && note != nt)
1982 note = NEXT_INSN (note);
1984 if (note != nt
1985 && (LABEL_P (note)
1986 || BARRIER_P (note)))
1987 note = NEXT_INSN (note);
1989 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1991 else
1992 note = insn;
1994 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1995 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1997 NEXT_INSN (note) = NEXT_INSN (last);
1998 PREV_INSN (NEXT_INSN (last)) = note;
2000 NEXT_INSN (last) = insn;
2001 PREV_INSN (insn) = last;
2003 bb = BLOCK_FOR_INSN (last);
2005 if (jump_p)
2007 fix_jump_move (insn);
2009 if (BLOCK_FOR_INSN (insn) != bb)
2010 move_block_after_check (insn);
2012 gcc_assert (BB_END (bb) == last);
2015 set_block_for_insn (insn, bb);
2017 /* Update BB_END, if needed. */
2018 if (BB_END (bb) == last)
2019 BB_END (bb) = insn;
2022 reemit_notes (insn);
2024 SCHED_GROUP_P (insn) = 0;
2027 /* The following structure describe an entry of the stack of choices. */
2028 struct choice_entry
2030 /* Ordinal number of the issued insn in the ready queue. */
2031 int index;
2032 /* The number of the rest insns whose issues we should try. */
2033 int rest;
2034 /* The number of issued essential insns. */
2035 int n;
2036 /* State after issuing the insn. */
2037 state_t state;
2040 /* The following array is used to implement a stack of choices used in
2041 function max_issue. */
2042 static struct choice_entry *choice_stack;
2044 /* The following variable value is number of essential insns issued on
2045 the current cycle. An insn is essential one if it changes the
2046 processors state. */
2047 static int cycle_issued_insns;
2049 /* The following variable value is maximal number of tries of issuing
2050 insns for the first cycle multipass insn scheduling. We define
2051 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2052 need this constraint if all real insns (with non-negative codes)
2053 had reservations because in this case the algorithm complexity is
2054 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2055 might be incomplete and such insn might occur. For such
2056 descriptions, the complexity of algorithm (without the constraint)
2057 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2058 static int max_lookahead_tries;
2060 /* The following value is value of hook
2061 `first_cycle_multipass_dfa_lookahead' at the last call of
2062 `max_issue'. */
2063 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2065 /* The following value is value of `issue_rate' at the last call of
2066 `sched_init'. */
2067 static int cached_issue_rate = 0;
2069 /* The following function returns maximal (or close to maximal) number
2070 of insns which can be issued on the same cycle and one of which
2071 insns is insns with the best rank (the first insn in READY). To
2072 make this function tries different samples of ready insns. READY
2073 is current queue `ready'. Global array READY_TRY reflects what
2074 insns are already issued in this try. MAX_POINTS is the sum of points
2075 of all instructions in READY. The function stops immediately,
2076 if it reached the such a solution, that all instruction can be issued.
2077 INDEX will contain index of the best insn in READY. The following
2078 function is used only for first cycle multipass scheduling. */
2079 static int
2080 max_issue (struct ready_list *ready, int *index, int max_points)
2082 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2083 struct choice_entry *top;
2084 rtx insn;
2086 best = 0;
2087 memcpy (choice_stack->state, curr_state, dfa_state_size);
2088 top = choice_stack;
2089 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2090 top->n = 0;
2091 n_ready = ready->n_ready;
2092 for (all = i = 0; i < n_ready; i++)
2093 if (!ready_try [i])
2094 all++;
2095 i = 0;
2096 tries_num = 0;
2097 for (;;)
2099 if (top->rest == 0 || i >= n_ready)
2101 if (top == choice_stack)
2102 break;
2103 if (best < top - choice_stack && ready_try [0])
2105 best = top - choice_stack;
2106 *index = choice_stack [1].index;
2107 points = top->n;
2108 if (top->n == max_points || best == all)
2109 break;
2111 i = top->index;
2112 ready_try [i] = 0;
2113 top--;
2114 memcpy (curr_state, top->state, dfa_state_size);
2116 else if (!ready_try [i])
2118 tries_num++;
2119 if (tries_num > max_lookahead_tries)
2120 break;
2121 insn = ready_element (ready, i);
2122 delay = state_transition (curr_state, insn);
2123 if (delay < 0)
2125 if (state_dead_lock_p (curr_state))
2126 top->rest = 0;
2127 else
2128 top->rest--;
2129 n = top->n;
2130 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2131 n += ISSUE_POINTS (insn);
2132 top++;
2133 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2134 top->index = i;
2135 top->n = n;
2136 memcpy (top->state, curr_state, dfa_state_size);
2137 ready_try [i] = 1;
2138 i = -1;
2141 i++;
2143 while (top != choice_stack)
2145 ready_try [top->index] = 0;
2146 top--;
2148 memcpy (curr_state, choice_stack->state, dfa_state_size);
2150 if (sched_verbose >= 4)
2151 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2152 (*current_sched_info->print_insn) (ready_element (ready, *index),
2153 0),
2154 points, max_points);
2156 return best;
2159 /* The following function chooses insn from READY and modifies
2160 *N_READY and READY. The following function is used only for first
2161 cycle multipass scheduling. */
2163 static rtx
2164 choose_ready (struct ready_list *ready)
2166 int lookahead = 0;
2168 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2169 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2170 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2171 return ready_remove_first (ready);
2172 else
2174 /* Try to choose the better insn. */
2175 int index = 0, i, n;
2176 rtx insn;
2177 int more_issue, max_points, try_data = 1, try_control = 1;
2179 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2181 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2182 max_lookahead_tries = 100;
2183 for (i = 0; i < issue_rate; i++)
2184 max_lookahead_tries *= lookahead;
2186 insn = ready_element (ready, 0);
2187 if (INSN_CODE (insn) < 0)
2188 return ready_remove_first (ready);
2190 if (spec_info
2191 && spec_info->flags & (PREFER_NON_DATA_SPEC
2192 | PREFER_NON_CONTROL_SPEC))
2194 for (i = 0, n = ready->n_ready; i < n; i++)
2196 rtx x;
2197 ds_t s;
2199 x = ready_element (ready, i);
2200 s = TODO_SPEC (x);
2202 if (spec_info->flags & PREFER_NON_DATA_SPEC
2203 && !(s & DATA_SPEC))
2205 try_data = 0;
2206 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2207 || !try_control)
2208 break;
2211 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2212 && !(s & CONTROL_SPEC))
2214 try_control = 0;
2215 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2216 break;
2221 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2222 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2223 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2224 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2225 (insn)))
2226 /* Discard speculative instruction that stands first in the ready
2227 list. */
2229 change_queue_index (insn, 1);
2230 return 0;
2233 max_points = ISSUE_POINTS (insn);
2234 more_issue = issue_rate - cycle_issued_insns - 1;
2236 for (i = 1; i < ready->n_ready; i++)
2238 insn = ready_element (ready, i);
2239 ready_try [i]
2240 = (INSN_CODE (insn) < 0
2241 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2242 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2243 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2244 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2245 (insn)));
2247 if (!ready_try [i] && more_issue-- > 0)
2248 max_points += ISSUE_POINTS (insn);
2251 if (max_issue (ready, &index, max_points) == 0)
2252 return ready_remove_first (ready);
2253 else
2254 return ready_remove (ready, index);
2258 /* Use forward list scheduling to rearrange insns of block pointed to by
2259 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2260 region. */
2262 void
2263 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2265 struct ready_list ready;
2266 int i, first_cycle_insn_p;
2267 int can_issue_more;
2268 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2269 int sort_p, advance, start_clock_var;
2271 /* Head/tail info for this block. */
2272 rtx prev_head = current_sched_info->prev_head;
2273 rtx next_tail = current_sched_info->next_tail;
2274 rtx head = NEXT_INSN (prev_head);
2275 rtx tail = PREV_INSN (next_tail);
2277 /* We used to have code to avoid getting parameters moved from hard
2278 argument registers into pseudos.
2280 However, it was removed when it proved to be of marginal benefit
2281 and caused problems because schedule_block and compute_forward_dependences
2282 had different notions of what the "head" insn was. */
2284 gcc_assert (head != tail || INSN_P (head));
2286 added_recovery_block_p = false;
2288 /* Debug info. */
2289 if (sched_verbose)
2290 dump_new_block_header (0, *target_bb, head, tail);
2292 state_reset (curr_state);
2294 /* Allocate the ready list. */
2295 readyp = &ready;
2296 ready.vec = NULL;
2297 ready_try = NULL;
2298 choice_stack = NULL;
2300 rgn_n_insns = -1;
2301 extend_ready (rgn_n_insns1 + 1);
2303 ready.first = ready.veclen - 1;
2304 ready.n_ready = 0;
2306 /* It is used for first cycle multipass scheduling. */
2307 temp_state = alloca (dfa_state_size);
2309 if (targetm.sched.md_init)
2310 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2312 /* We start inserting insns after PREV_HEAD. */
2313 last_scheduled_insn = prev_head;
2315 gcc_assert (NOTE_P (last_scheduled_insn)
2316 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2318 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2319 queue. */
2320 q_ptr = 0;
2321 q_size = 0;
2323 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2324 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2326 /* Start just before the beginning of time. */
2327 clock_var = -1;
2329 /* We need queue and ready lists and clock_var be initialized
2330 in try_ready () (which is called through init_ready_list ()). */
2331 (*current_sched_info->init_ready_list) ();
2333 /* The algorithm is O(n^2) in the number of ready insns at any given
2334 time in the worst case. Before reload we are more likely to have
2335 big lists so truncate them to a reasonable size. */
2336 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2338 ready_sort (&ready);
2340 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2341 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2342 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2343 break;
2345 if (sched_verbose >= 2)
2347 fprintf (sched_dump,
2348 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2349 fprintf (sched_dump,
2350 ";;\t\t before reload => truncated to %d insns\n", i);
2353 /* Delay all insns past it for 1 cycle. */
2354 while (i < ready.n_ready)
2355 queue_insn (ready_remove (&ready, i), 1);
2358 /* Now we can restore basic block notes and maintain precise cfg. */
2359 restore_bb_notes (*target_bb);
2361 last_clock_var = -1;
2363 advance = 0;
2365 sort_p = TRUE;
2366 /* Loop until all the insns in BB are scheduled. */
2367 while ((*current_sched_info->schedule_more_p) ())
2371 start_clock_var = clock_var;
2373 clock_var++;
2375 advance_one_cycle ();
2377 /* Add to the ready list all pending insns that can be issued now.
2378 If there are no ready insns, increment clock until one
2379 is ready and add all pending insns at that point to the ready
2380 list. */
2381 queue_to_ready (&ready);
2383 gcc_assert (ready.n_ready);
2385 if (sched_verbose >= 2)
2387 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2388 debug_ready_list (&ready);
2390 advance -= clock_var - start_clock_var;
2392 while (advance > 0);
2394 if (sort_p)
2396 /* Sort the ready list based on priority. */
2397 ready_sort (&ready);
2399 if (sched_verbose >= 2)
2401 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2402 debug_ready_list (&ready);
2406 /* Allow the target to reorder the list, typically for
2407 better instruction bundling. */
2408 if (sort_p && targetm.sched.reorder
2409 && (ready.n_ready == 0
2410 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2411 can_issue_more =
2412 targetm.sched.reorder (sched_dump, sched_verbose,
2413 ready_lastpos (&ready),
2414 &ready.n_ready, clock_var);
2415 else
2416 can_issue_more = issue_rate;
2418 first_cycle_insn_p = 1;
2419 cycle_issued_insns = 0;
2420 for (;;)
2422 rtx insn;
2423 int cost;
2424 bool asm_p = false;
2426 if (sched_verbose >= 2)
2428 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2429 clock_var);
2430 debug_ready_list (&ready);
2433 if (ready.n_ready == 0
2434 && can_issue_more
2435 && reload_completed)
2437 /* Allow scheduling insns directly from the queue in case
2438 there's nothing better to do (ready list is empty) but
2439 there are still vacant dispatch slots in the current cycle. */
2440 if (sched_verbose >= 6)
2441 fprintf(sched_dump,";;\t\tSecond chance\n");
2442 memcpy (temp_state, curr_state, dfa_state_size);
2443 if (early_queue_to_ready (temp_state, &ready))
2444 ready_sort (&ready);
2447 if (ready.n_ready == 0 || !can_issue_more
2448 || state_dead_lock_p (curr_state)
2449 || !(*current_sched_info->schedule_more_p) ())
2450 break;
2452 /* Select and remove the insn from the ready list. */
2453 if (sort_p)
2455 insn = choose_ready (&ready);
2456 if (!insn)
2457 continue;
2459 else
2460 insn = ready_remove_first (&ready);
2462 if (targetm.sched.dfa_new_cycle
2463 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2464 insn, last_clock_var,
2465 clock_var, &sort_p))
2466 /* SORT_P is used by the target to override sorting
2467 of the ready list. This is needed when the target
2468 has modified its internal structures expecting that
2469 the insn will be issued next. As we need the insn
2470 to have the highest priority (so it will be returned by
2471 the ready_remove_first call above), we invoke
2472 ready_add (&ready, insn, true).
2473 But, still, there is one issue: INSN can be later
2474 discarded by scheduler's front end through
2475 current_sched_info->can_schedule_ready_p, hence, won't
2476 be issued next. */
2478 ready_add (&ready, insn, true);
2479 break;
2482 sort_p = TRUE;
2483 memcpy (temp_state, curr_state, dfa_state_size);
2484 if (recog_memoized (insn) < 0)
2486 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2487 || asm_noperands (PATTERN (insn)) >= 0);
2488 if (!first_cycle_insn_p && asm_p)
2489 /* This is asm insn which is tryed to be issued on the
2490 cycle not first. Issue it on the next cycle. */
2491 cost = 1;
2492 else
2493 /* A USE insn, or something else we don't need to
2494 understand. We can't pass these directly to
2495 state_transition because it will trigger a
2496 fatal error for unrecognizable insns. */
2497 cost = 0;
2499 else
2501 cost = state_transition (temp_state, insn);
2502 if (cost < 0)
2503 cost = 0;
2504 else if (cost == 0)
2505 cost = 1;
2508 if (cost >= 1)
2510 queue_insn (insn, cost);
2511 if (SCHED_GROUP_P (insn))
2513 advance = cost;
2514 break;
2517 continue;
2520 if (current_sched_info->can_schedule_ready_p
2521 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2522 /* We normally get here only if we don't want to move
2523 insn from the split block. */
2525 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2526 continue;
2529 /* DECISION is made. */
2531 if (TODO_SPEC (insn) & SPECULATIVE)
2532 generate_recovery_code (insn);
2534 if (control_flow_insn_p (last_scheduled_insn)
2535 /* This is used to to switch basic blocks by request
2536 from scheduler front-end (actually, sched-ebb.c only).
2537 This is used to process blocks with single fallthru
2538 edge. If succeeding block has jump, it [jump] will try
2539 move at the end of current bb, thus corrupting CFG. */
2540 || current_sched_info->advance_target_bb (*target_bb, insn))
2542 *target_bb = current_sched_info->advance_target_bb
2543 (*target_bb, 0);
2545 if (sched_verbose)
2547 rtx x;
2549 x = next_real_insn (last_scheduled_insn);
2550 gcc_assert (x);
2551 dump_new_block_header (1, *target_bb, x, tail);
2554 last_scheduled_insn = bb_note (*target_bb);
2557 /* Update counters, etc in the scheduler's front end. */
2558 (*current_sched_info->begin_schedule_ready) (insn,
2559 last_scheduled_insn);
2561 move_insn (insn);
2562 last_scheduled_insn = insn;
2564 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2566 cycle_issued_insns++;
2567 memcpy (curr_state, temp_state, dfa_state_size);
2570 if (targetm.sched.variable_issue)
2571 can_issue_more =
2572 targetm.sched.variable_issue (sched_dump, sched_verbose,
2573 insn, can_issue_more);
2574 /* A naked CLOBBER or USE generates no instruction, so do
2575 not count them against the issue rate. */
2576 else if (GET_CODE (PATTERN (insn)) != USE
2577 && GET_CODE (PATTERN (insn)) != CLOBBER)
2578 can_issue_more--;
2580 advance = schedule_insn (insn);
2582 /* After issuing an asm insn we should start a new cycle. */
2583 if (advance == 0 && asm_p)
2584 advance = 1;
2585 if (advance != 0)
2586 break;
2588 first_cycle_insn_p = 0;
2590 /* Sort the ready list based on priority. This must be
2591 redone here, as schedule_insn may have readied additional
2592 insns that will not be sorted correctly. */
2593 if (ready.n_ready > 0)
2594 ready_sort (&ready);
2596 if (targetm.sched.reorder2
2597 && (ready.n_ready == 0
2598 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2600 can_issue_more =
2601 targetm.sched.reorder2 (sched_dump, sched_verbose,
2602 ready.n_ready
2603 ? ready_lastpos (&ready) : NULL,
2604 &ready.n_ready, clock_var);
2609 /* Debug info. */
2610 if (sched_verbose)
2612 fprintf (sched_dump, ";;\tReady list (final): ");
2613 debug_ready_list (&ready);
2616 if (current_sched_info->queue_must_finish_empty)
2617 /* Sanity check -- queue must be empty now. Meaningless if region has
2618 multiple bbs. */
2619 gcc_assert (!q_size && !ready.n_ready);
2620 else
2622 /* We must maintain QUEUE_INDEX between blocks in region. */
2623 for (i = ready.n_ready - 1; i >= 0; i--)
2625 rtx x;
2627 x = ready_element (&ready, i);
2628 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2629 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2632 if (q_size)
2633 for (i = 0; i <= max_insn_queue_index; i++)
2635 rtx link;
2636 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2638 rtx x;
2640 x = XEXP (link, 0);
2641 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2642 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2644 free_INSN_LIST_list (&insn_queue[i]);
2648 if (!current_sched_info->queue_must_finish_empty
2649 || added_recovery_block_p)
2651 /* INSN_TICK (minimum clock tick at which the insn becomes
2652 ready) may be not correct for the insn in the subsequent
2653 blocks of the region. We should use a correct value of
2654 `clock_var' or modify INSN_TICK. It is better to keep
2655 clock_var value equal to 0 at the start of a basic block.
2656 Therefore we modify INSN_TICK here. */
2657 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2660 #ifdef ENABLE_CHECKING
2661 /* After the reload the ia64 backend doesn't maintain BB_END, so
2662 if we want to check anything, better do it now.
2663 And it already clobbered previously scheduled code. */
2664 if (reload_completed)
2665 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2666 #endif
2668 if (targetm.sched.md_finish)
2669 targetm.sched.md_finish (sched_dump, sched_verbose);
2671 /* Update head/tail boundaries. */
2672 head = NEXT_INSN (prev_head);
2673 tail = last_scheduled_insn;
2675 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2676 previously found among the insns. Insert them at the beginning
2677 of the insns. */
2678 if (note_list != 0)
2680 basic_block head_bb = BLOCK_FOR_INSN (head);
2681 rtx note_head = note_list;
2683 while (PREV_INSN (note_head))
2685 set_block_for_insn (note_head, head_bb);
2686 note_head = PREV_INSN (note_head);
2688 /* In the above cycle we've missed this note: */
2689 set_block_for_insn (note_head, head_bb);
2691 PREV_INSN (note_head) = PREV_INSN (head);
2692 NEXT_INSN (PREV_INSN (head)) = note_head;
2693 PREV_INSN (head) = note_list;
2694 NEXT_INSN (note_list) = head;
2695 head = note_head;
2698 /* Debugging. */
2699 if (sched_verbose)
2701 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2702 clock_var, INSN_UID (head));
2703 fprintf (sched_dump, ";; new tail = %d\n\n",
2704 INSN_UID (tail));
2707 current_sched_info->head = head;
2708 current_sched_info->tail = tail;
2710 free (ready.vec);
2712 free (ready_try);
2713 for (i = 0; i <= rgn_n_insns; i++)
2714 free (choice_stack [i].state);
2715 free (choice_stack);
2718 /* Set_priorities: compute priority of each insn in the block. */
2721 set_priorities (rtx head, rtx tail)
2723 rtx insn;
2724 int n_insn;
2725 int sched_max_insns_priority =
2726 current_sched_info->sched_max_insns_priority;
2727 rtx prev_head;
2729 if (head == tail && (! INSN_P (head)))
2730 return 0;
2732 n_insn = 0;
2734 prev_head = PREV_INSN (head);
2735 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2737 if (!INSN_P (insn))
2738 continue;
2740 n_insn++;
2741 (void) priority (insn);
2743 if (INSN_PRIORITY_KNOWN (insn))
2744 sched_max_insns_priority =
2745 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2748 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2750 return n_insn;
2753 /* Next LUID to assign to an instruction. */
2754 static int luid;
2756 /* Initialize some global state for the scheduler. */
2758 void
2759 sched_init (void)
2761 basic_block b;
2762 rtx insn;
2763 int i;
2765 /* Switch to working copy of sched_info. */
2766 memcpy (&current_sched_info_var, current_sched_info,
2767 sizeof (current_sched_info_var));
2768 current_sched_info = &current_sched_info_var;
2770 /* Disable speculative loads in their presence if cc0 defined. */
2771 #ifdef HAVE_cc0
2772 flag_schedule_speculative_load = 0;
2773 #endif
2775 /* Set dump and sched_verbose for the desired debugging output. If no
2776 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2777 For -fsched-verbose=N, N>=10, print everything to stderr. */
2778 sched_verbose = sched_verbose_param;
2779 if (sched_verbose_param == 0 && dump_file)
2780 sched_verbose = 1;
2781 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2782 ? stderr : dump_file);
2784 /* Initialize SPEC_INFO. */
2785 if (targetm.sched.set_sched_flags)
2787 spec_info = &spec_info_var;
2788 targetm.sched.set_sched_flags (spec_info);
2789 if (current_sched_info->flags & DO_SPECULATION)
2790 spec_info->weakness_cutoff =
2791 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2792 else
2793 /* So we won't read anything accidentally. */
2794 spec_info = 0;
2795 #ifdef ENABLE_CHECKING
2796 check_sched_flags ();
2797 #endif
2799 else
2800 /* So we won't read anything accidentally. */
2801 spec_info = 0;
2803 /* Initialize issue_rate. */
2804 if (targetm.sched.issue_rate)
2805 issue_rate = targetm.sched.issue_rate ();
2806 else
2807 issue_rate = 1;
2809 if (cached_issue_rate != issue_rate)
2811 cached_issue_rate = issue_rate;
2812 /* To invalidate max_lookahead_tries: */
2813 cached_first_cycle_multipass_dfa_lookahead = 0;
2816 old_max_uid = 0;
2817 h_i_d = 0;
2818 extend_h_i_d ();
2820 for (i = 0; i < old_max_uid; i++)
2822 h_i_d[i].cost = -1;
2823 h_i_d[i].todo_spec = HARD_DEP;
2824 h_i_d[i].queue_index = QUEUE_NOWHERE;
2825 h_i_d[i].tick = INVALID_TICK;
2826 h_i_d[i].inter_tick = INVALID_TICK;
2829 if (targetm.sched.init_dfa_pre_cycle_insn)
2830 targetm.sched.init_dfa_pre_cycle_insn ();
2832 if (targetm.sched.init_dfa_post_cycle_insn)
2833 targetm.sched.init_dfa_post_cycle_insn ();
2835 dfa_start ();
2836 dfa_state_size = state_size ();
2837 curr_state = xmalloc (dfa_state_size);
2839 h_i_d[0].luid = 0;
2840 luid = 1;
2841 FOR_EACH_BB (b)
2842 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2844 INSN_LUID (insn) = luid;
2846 /* Increment the next luid, unless this is a note. We don't
2847 really need separate IDs for notes and we don't want to
2848 schedule differently depending on whether or not there are
2849 line-number notes, i.e., depending on whether or not we're
2850 generating debugging information. */
2851 if (!NOTE_P (insn))
2852 ++luid;
2854 if (insn == BB_END (b))
2855 break;
2858 init_dependency_caches (luid);
2860 init_alias_analysis ();
2862 line_note_head = 0;
2863 old_last_basic_block = 0;
2864 glat_start = 0;
2865 glat_end = 0;
2866 extend_bb (0);
2868 if (current_sched_info->flags & USE_GLAT)
2869 init_glat ();
2871 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2872 removing death notes. */
2873 FOR_EACH_BB_REVERSE (b)
2874 find_insn_reg_weight (b);
2876 if (targetm.sched.md_init_global)
2877 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2879 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2880 before_recovery = 0;
2882 #ifdef ENABLE_CHECKING
2883 /* This is used preferably for finding bugs in check_cfg () itself. */
2884 check_cfg (0, 0);
2885 #endif
2888 /* Free global data used during insn scheduling. */
2890 void
2891 sched_finish (void)
2893 free (h_i_d);
2894 free (curr_state);
2895 dfa_finish ();
2896 free_dependency_caches ();
2897 end_alias_analysis ();
2898 free (line_note_head);
2899 free_glat ();
2901 if (targetm.sched.md_finish_global)
2902 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2904 if (spec_info && spec_info->dump)
2906 char c = reload_completed ? 'a' : 'b';
2908 fprintf (spec_info->dump,
2909 ";; %s:\n", current_function_name ());
2911 fprintf (spec_info->dump,
2912 ";; Procedure %cr-begin-data-spec motions == %d\n",
2913 c, nr_begin_data);
2914 fprintf (spec_info->dump,
2915 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2916 c, nr_be_in_data);
2917 fprintf (spec_info->dump,
2918 ";; Procedure %cr-begin-control-spec motions == %d\n",
2919 c, nr_begin_control);
2920 fprintf (spec_info->dump,
2921 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2922 c, nr_be_in_control);
2925 #ifdef ENABLE_CHECKING
2926 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2927 if (!reload_completed)
2928 check_cfg (0, 0);
2929 #endif
2931 current_sched_info = NULL;
2934 /* Fix INSN_TICKs of the instructions in the current block as well as
2935 INSN_TICKs of their dependents.
2936 HEAD and TAIL are the begin and the end of the current scheduled block. */
2937 static void
2938 fix_inter_tick (rtx head, rtx tail)
2940 /* Set of instructions with corrected INSN_TICK. */
2941 bitmap_head processed;
2942 int next_clock = clock_var + 1;
2944 bitmap_initialize (&processed, 0);
2946 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2947 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2948 across different blocks. */
2949 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2951 if (INSN_P (head))
2953 int tick;
2954 rtx link;
2956 tick = INSN_TICK (head);
2957 gcc_assert (tick >= MIN_TICK);
2959 /* Fix INSN_TICK of instruction from just scheduled block. */
2960 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2962 bitmap_set_bit (&processed, INSN_LUID (head));
2963 tick -= next_clock;
2965 if (tick < MIN_TICK)
2966 tick = MIN_TICK;
2968 INSN_TICK (head) = tick;
2971 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2973 rtx next;
2975 next = XEXP (link, 0);
2976 tick = INSN_TICK (next);
2978 if (tick != INVALID_TICK
2979 /* If NEXT has its INSN_TICK calculated, fix it.
2980 If not - it will be properly calculated from
2981 scratch later in fix_tick_ready. */
2982 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2984 bitmap_set_bit (&processed, INSN_LUID (next));
2985 tick -= next_clock;
2987 if (tick < MIN_TICK)
2988 tick = MIN_TICK;
2990 if (tick > INTER_TICK (next))
2991 INTER_TICK (next) = tick;
2992 else
2993 tick = INTER_TICK (next);
2995 INSN_TICK (next) = tick;
3000 bitmap_clear (&processed);
3003 /* Check if NEXT is ready to be added to the ready or queue list.
3004 If "yes", add it to the proper list.
3005 Returns:
3006 -1 - is not ready yet,
3007 0 - added to the ready list,
3008 0 < N - queued for N cycles. */
3010 try_ready (rtx next)
3012 ds_t old_ts, *ts;
3013 rtx link;
3015 ts = &TODO_SPEC (next);
3016 old_ts = *ts;
3018 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
3019 && ((old_ts & HARD_DEP)
3020 || (old_ts & SPECULATIVE)));
3022 if (!(current_sched_info->flags & DO_SPECULATION))
3024 if (!LOG_LINKS (next))
3025 *ts &= ~HARD_DEP;
3027 else
3029 *ts &= ~SPECULATIVE & ~HARD_DEP;
3031 link = LOG_LINKS (next);
3032 if (link)
3034 /* LOG_LINKS are maintained sorted.
3035 So if DEP_STATUS of the first dep is SPECULATIVE,
3036 than all other deps are speculative too. */
3037 if (DEP_STATUS (link) & SPECULATIVE)
3039 /* Now we've got NEXT with speculative deps only.
3040 1. Look at the deps to see what we have to do.
3041 2. Check if we can do 'todo'. */
3042 *ts = DEP_STATUS (link) & SPECULATIVE;
3043 while ((link = XEXP (link, 1)))
3044 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3046 if (dep_weak (*ts) < spec_info->weakness_cutoff)
3047 /* Too few points. */
3048 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3050 else
3051 *ts |= HARD_DEP;
3055 if (*ts & HARD_DEP)
3056 gcc_assert (*ts == old_ts
3057 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3058 else if (current_sched_info->new_ready)
3059 *ts = current_sched_info->new_ready (next, *ts);
3061 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3062 have its original pattern or changed (speculative) one. This is due
3063 to changing ebb in region scheduling.
3064 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3065 has speculative pattern.
3067 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3068 control-speculative NEXT could have been discarded by sched-rgn.c
3069 (the same case as when discarded by can_schedule_ready_p ()). */
3071 if ((*ts & SPECULATIVE)
3072 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3073 need to change anything. */
3074 && *ts != old_ts)
3076 int res;
3077 rtx new_pat;
3079 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3081 res = speculate_insn (next, *ts, &new_pat);
3083 switch (res)
3085 case -1:
3086 /* It would be nice to change DEP_STATUS of all dependences,
3087 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3088 so we won't reanalyze anything. */
3089 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3090 break;
3092 case 0:
3093 /* We follow the rule, that every speculative insn
3094 has non-null ORIG_PAT. */
3095 if (!ORIG_PAT (next))
3096 ORIG_PAT (next) = PATTERN (next);
3097 break;
3099 case 1:
3100 if (!ORIG_PAT (next))
3101 /* If we gonna to overwrite the original pattern of insn,
3102 save it. */
3103 ORIG_PAT (next) = PATTERN (next);
3105 change_pattern (next, new_pat);
3106 break;
3108 default:
3109 gcc_unreachable ();
3113 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3114 either correct (*ts & SPECULATIVE),
3115 or we simply don't care (*ts & HARD_DEP). */
3117 gcc_assert (!ORIG_PAT (next)
3118 || !RECOVERY_BLOCK (next)
3119 || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
3121 if (*ts & HARD_DEP)
3123 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3124 control-speculative NEXT could have been discarded by sched-rgn.c
3125 (the same case as when discarded by can_schedule_ready_p ()). */
3126 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3128 change_queue_index (next, QUEUE_NOWHERE);
3129 return -1;
3131 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
3132 /* We should change pattern of every previously speculative
3133 instruction - and we determine if NEXT was speculative by using
3134 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3135 pat too, hence we also check for the RECOVERY_BLOCK. */
3137 change_pattern (next, ORIG_PAT (next));
3138 ORIG_PAT (next) = 0;
3141 if (sched_verbose >= 2)
3143 int s = TODO_SPEC (next);
3145 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3146 (*current_sched_info->print_insn) (next, 0));
3148 if (spec_info && spec_info->dump)
3150 if (s & BEGIN_DATA)
3151 fprintf (spec_info->dump, "; data-spec;");
3152 if (s & BEGIN_CONTROL)
3153 fprintf (spec_info->dump, "; control-spec;");
3154 if (s & BE_IN_CONTROL)
3155 fprintf (spec_info->dump, "; in-control-spec;");
3158 fprintf (sched_dump, "\n");
3161 adjust_priority (next);
3163 return fix_tick_ready (next);
3166 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3167 static int
3168 fix_tick_ready (rtx next)
3170 rtx link;
3171 int tick, delay;
3173 link = RESOLVED_DEPS (next);
3175 if (link)
3177 int full_p;
3179 tick = INSN_TICK (next);
3180 /* if tick is not equal to INVALID_TICK, then update
3181 INSN_TICK of NEXT with the most recent resolved dependence
3182 cost. Otherwise, recalculate from scratch. */
3183 full_p = tick == INVALID_TICK;
3186 rtx pro;
3187 int tick1;
3189 pro = XEXP (link, 0);
3190 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3192 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3193 if (tick1 > tick)
3194 tick = tick1;
3196 while ((link = XEXP (link, 1)) && full_p);
3198 else
3199 tick = -1;
3201 INSN_TICK (next) = tick;
3203 delay = tick - clock_var;
3204 if (delay <= 0)
3205 delay = QUEUE_READY;
3207 change_queue_index (next, delay);
3209 return delay;
3212 /* Move NEXT to the proper queue list with (DELAY >= 1),
3213 or add it to the ready list (DELAY == QUEUE_READY),
3214 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3215 static void
3216 change_queue_index (rtx next, int delay)
3218 int i = QUEUE_INDEX (next);
3220 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3221 && delay != 0);
3222 gcc_assert (i != QUEUE_SCHEDULED);
3224 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3225 || (delay < 0 && delay == i))
3226 /* We have nothing to do. */
3227 return;
3229 /* Remove NEXT from wherever it is now. */
3230 if (i == QUEUE_READY)
3231 ready_remove_insn (next);
3232 else if (i >= 0)
3233 queue_remove (next);
3235 /* Add it to the proper place. */
3236 if (delay == QUEUE_READY)
3237 ready_add (readyp, next, false);
3238 else if (delay >= 1)
3239 queue_insn (next, delay);
3241 if (sched_verbose >= 2)
3243 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3244 (*current_sched_info->print_insn) (next, 0));
3246 if (delay == QUEUE_READY)
3247 fprintf (sched_dump, " into ready\n");
3248 else if (delay >= 1)
3249 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3250 else
3251 fprintf (sched_dump, " removed from ready or queue lists\n");
3255 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3256 static void
3257 resolve_dep (rtx next, rtx insn)
3259 rtx dep;
3261 INSN_DEP_COUNT (next)--;
3263 dep = remove_list_elem (insn, &LOG_LINKS (next));
3264 XEXP (dep, 1) = RESOLVED_DEPS (next);
3265 RESOLVED_DEPS (next) = dep;
3267 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3268 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3271 /* Extend H_I_D data. */
3272 static void
3273 extend_h_i_d (void)
3275 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3276 pseudos which do not cross calls. */
3277 int new_max_uid = get_max_uid() + 1;
3279 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3280 old_max_uid = new_max_uid;
3282 if (targetm.sched.h_i_d_extended)
3283 targetm.sched.h_i_d_extended ();
3286 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3287 N_NEW_INSNS is the number of additional elements to allocate. */
3288 static void
3289 extend_ready (int n_new_insns)
3291 int i;
3293 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3294 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3296 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3297 rgn_n_insns + 1, sizeof (char));
3299 rgn_n_insns += n_new_insns;
3301 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3302 rgn_n_insns + 1);
3304 for (i = rgn_n_insns; n_new_insns--; i--)
3305 choice_stack[i].state = xmalloc (dfa_state_size);
3308 /* Extend global scheduler structures (those, that live across calls to
3309 schedule_block) to include information about just emitted INSN. */
3310 static void
3311 extend_global (rtx insn)
3313 gcc_assert (INSN_P (insn));
3314 /* These structures have scheduler scope. */
3315 extend_h_i_d ();
3316 init_h_i_d (insn);
3318 extend_dependency_caches (1, 0);
3321 /* Extends global and local scheduler structures to include information
3322 about just emitted INSN. */
3323 static void
3324 extend_all (rtx insn)
3326 extend_global (insn);
3328 /* These structures have block scope. */
3329 extend_ready (1);
3331 (*current_sched_info->add_remove_insn) (insn, 0);
3334 /* Initialize h_i_d entry of the new INSN with default values.
3335 Values, that are not explicitly initialized here, hold zero. */
3336 static void
3337 init_h_i_d (rtx insn)
3339 INSN_LUID (insn) = luid++;
3340 INSN_COST (insn) = -1;
3341 TODO_SPEC (insn) = HARD_DEP;
3342 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3343 INSN_TICK (insn) = INVALID_TICK;
3344 INTER_TICK (insn) = INVALID_TICK;
3345 find_insn_reg_weight1 (insn);
3348 /* Generates recovery code for INSN. */
3349 static void
3350 generate_recovery_code (rtx insn)
3352 if (TODO_SPEC (insn) & BEGIN_SPEC)
3353 begin_speculative_block (insn);
3355 /* Here we have insn with no dependencies to
3356 instructions other then CHECK_SPEC ones. */
3358 if (TODO_SPEC (insn) & BE_IN_SPEC)
3359 add_to_speculative_block (insn);
3362 /* Helper function.
3363 Tries to add speculative dependencies of type FS between instructions
3364 in LINK list and TWIN. */
3365 static void
3366 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3368 for (; link; link = XEXP (link, 1))
3370 ds_t ds;
3371 rtx consumer;
3373 consumer = XEXP (link, 0);
3375 ds = DEP_STATUS (link);
3377 if (/* If we want to create speculative dep. */
3379 /* And we can do that because this is a true dep. */
3380 && (ds & DEP_TYPES) == DEP_TRUE)
3382 gcc_assert (!(ds & BE_IN_SPEC));
3384 if (/* If this dep can be overcome with 'begin speculation'. */
3385 ds & BEGIN_SPEC)
3386 /* Then we have a choice: keep the dep 'begin speculative'
3387 or transform it into 'be in speculative'. */
3389 if (/* In try_ready we assert that if insn once became ready
3390 it can be removed from the ready (or queue) list only
3391 due to backend decision. Hence we can't let the
3392 probability of the speculative dep to decrease. */
3393 dep_weak (ds) <= dep_weak (fs))
3394 /* Transform it to be in speculative. */
3395 ds = (ds & ~BEGIN_SPEC) | fs;
3397 else
3398 /* Mark the dep as 'be in speculative'. */
3399 ds |= fs;
3402 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3406 /* Generates recovery code for BEGIN speculative INSN. */
3407 static void
3408 begin_speculative_block (rtx insn)
3410 if (TODO_SPEC (insn) & BEGIN_DATA)
3411 nr_begin_data++;
3412 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3413 nr_begin_control++;
3415 create_check_block_twin (insn, false);
3417 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3420 /* Generates recovery code for BE_IN speculative INSN. */
3421 static void
3422 add_to_speculative_block (rtx insn)
3424 ds_t ts;
3425 rtx link, twins = NULL;
3427 ts = TODO_SPEC (insn);
3428 gcc_assert (!(ts & ~BE_IN_SPEC));
3430 if (ts & BE_IN_DATA)
3431 nr_be_in_data++;
3432 if (ts & BE_IN_CONTROL)
3433 nr_be_in_control++;
3435 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3436 gcc_assert (!TODO_SPEC (insn));
3438 DONE_SPEC (insn) |= ts;
3440 /* First we convert all simple checks to branchy. */
3441 for (link = LOG_LINKS (insn); link;)
3443 rtx check;
3445 check = XEXP (link, 0);
3447 if (RECOVERY_BLOCK (check))
3449 create_check_block_twin (check, true);
3450 link = LOG_LINKS (insn);
3452 else
3453 link = XEXP (link, 1);
3456 clear_priorities (insn);
3460 rtx link, check, twin;
3461 basic_block rec;
3463 link = LOG_LINKS (insn);
3464 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3465 && (DEP_STATUS (link) & BE_IN_SPEC)
3466 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3468 check = XEXP (link, 0);
3469 gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
3470 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3472 rec = BLOCK_FOR_INSN (check);
3474 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3475 extend_global (twin);
3477 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3479 if (sched_verbose && spec_info->dump)
3480 /* INSN_BB (insn) isn't determined for twin insns yet.
3481 So we can't use current_sched_info->print_insn. */
3482 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3483 INSN_UID (twin), rec->index);
3485 twins = alloc_INSN_LIST (twin, twins);
3487 /* Add dependences between TWIN and all appropriate
3488 instructions from REC. */
3491 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3495 link = XEXP (link, 1);
3496 if (link)
3498 check = XEXP (link, 0);
3499 if (BLOCK_FOR_INSN (check) == rec)
3500 break;
3502 else
3503 break;
3505 while (1);
3507 while (link);
3509 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3511 for (link = LOG_LINKS (insn); link;)
3513 check = XEXP (link, 0);
3515 if (BLOCK_FOR_INSN (check) == rec)
3517 delete_back_forw_dep (insn, check);
3518 link = LOG_LINKS (insn);
3520 else
3521 link = XEXP (link, 1);
3524 while (LOG_LINKS (insn));
3526 /* We can't add the dependence between insn and twin earlier because
3527 that would make twin appear in the INSN_DEPEND (insn). */
3528 while (twins)
3530 rtx twin;
3532 twin = XEXP (twins, 0);
3533 calc_priorities (twin);
3534 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3536 twin = XEXP (twins, 1);
3537 free_INSN_LIST_node (twins);
3538 twins = twin;
3542 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3543 void *
3544 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3546 gcc_assert (new_nmemb >= old_nmemb);
3547 p = XRESIZEVAR (void, p, new_nmemb * size);
3548 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3549 return p;
3552 /* Return the probability of speculation success for the speculation
3553 status DS. */
3554 static dw_t
3555 dep_weak (ds_t ds)
3557 ds_t res = 1, dt;
3558 int n = 0;
3560 dt = FIRST_SPEC_TYPE;
3563 if (ds & dt)
3565 res *= (ds_t) get_dep_weak (ds, dt);
3566 n++;
3569 if (dt == LAST_SPEC_TYPE)
3570 break;
3571 dt <<= SPEC_TYPE_SHIFT;
3573 while (1);
3575 gcc_assert (n);
3576 while (--n)
3577 res /= MAX_DEP_WEAK;
3579 if (res < MIN_DEP_WEAK)
3580 res = MIN_DEP_WEAK;
3582 gcc_assert (res <= MAX_DEP_WEAK);
3584 return (dw_t) res;
3587 /* Helper function.
3588 Find fallthru edge from PRED. */
3589 static edge
3590 find_fallthru_edge (basic_block pred)
3592 edge e;
3593 edge_iterator ei;
3594 basic_block succ;
3596 succ = pred->next_bb;
3597 gcc_assert (succ->prev_bb == pred);
3599 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3601 FOR_EACH_EDGE (e, ei, pred->succs)
3602 if (e->flags & EDGE_FALLTHRU)
3604 gcc_assert (e->dest == succ);
3605 return e;
3608 else
3610 FOR_EACH_EDGE (e, ei, succ->preds)
3611 if (e->flags & EDGE_FALLTHRU)
3613 gcc_assert (e->src == pred);
3614 return e;
3618 return NULL;
3621 /* Initialize BEFORE_RECOVERY variable. */
3622 static void
3623 init_before_recovery (void)
3625 basic_block last;
3626 edge e;
3628 last = EXIT_BLOCK_PTR->prev_bb;
3629 e = find_fallthru_edge (last);
3631 if (e)
3633 /* We create two basic blocks:
3634 1. Single instruction block is inserted right after E->SRC
3635 and has jump to
3636 2. Empty block right before EXIT_BLOCK.
3637 Between these two blocks recovery blocks will be emitted. */
3639 basic_block single, empty;
3640 rtx x, label;
3642 single = create_empty_bb (last);
3643 empty = create_empty_bb (single);
3645 single->count = last->count;
3646 empty->count = last->count;
3647 single->frequency = last->frequency;
3648 empty->frequency = last->frequency;
3649 BB_COPY_PARTITION (single, last);
3650 BB_COPY_PARTITION (empty, last);
3652 redirect_edge_succ (e, single);
3653 make_single_succ_edge (single, empty, 0);
3654 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3655 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3657 label = block_label (empty);
3658 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3659 JUMP_LABEL (x) = label;
3660 LABEL_NUSES (label)++;
3661 extend_global (x);
3663 emit_barrier_after (x);
3665 add_block (empty, 0);
3666 add_block (single, 0);
3668 before_recovery = single;
3670 if (sched_verbose >= 2 && spec_info->dump)
3671 fprintf (spec_info->dump,
3672 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3673 last->index, single->index, empty->index);
3675 else
3676 before_recovery = last;
3679 /* Returns new recovery block. */
3680 static basic_block
3681 create_recovery_block (void)
3683 rtx label;
3684 basic_block rec;
3686 added_recovery_block_p = true;
3688 if (!before_recovery)
3689 init_before_recovery ();
3691 label = gen_label_rtx ();
3692 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3693 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3695 rec = create_basic_block (label, label, before_recovery);
3696 emit_barrier_after (BB_END (rec));
3698 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3699 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3701 if (sched_verbose && spec_info->dump)
3702 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3703 rec->index);
3705 before_recovery = rec;
3707 return rec;
3710 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3711 INSN is a simple check, that should be converted to branchy one. */
3712 static void
3713 create_check_block_twin (rtx insn, bool mutate_p)
3715 basic_block rec;
3716 rtx label, check, twin, link;
3717 ds_t fs;
3719 gcc_assert (ORIG_PAT (insn)
3720 && (!mutate_p
3721 || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
3722 && !(TODO_SPEC (insn) & SPECULATIVE))));
3724 /* Create recovery block. */
3725 if (mutate_p || targetm.sched.needs_block_p (insn))
3727 rec = create_recovery_block ();
3728 label = BB_HEAD (rec);
3730 else
3732 rec = EXIT_BLOCK_PTR;
3733 label = 0;
3736 /* Emit CHECK. */
3737 check = targetm.sched.gen_check (insn, label, mutate_p);
3739 if (rec != EXIT_BLOCK_PTR)
3741 /* To have mem_reg alive at the beginning of second_bb,
3742 we emit check BEFORE insn, so insn after splitting
3743 insn will be at the beginning of second_bb, which will
3744 provide us with the correct life information. */
3745 check = emit_jump_insn_before (check, insn);
3746 JUMP_LABEL (check) = label;
3747 LABEL_NUSES (label)++;
3749 else
3750 check = emit_insn_before (check, insn);
3752 /* Extend data structures. */
3753 extend_all (check);
3754 RECOVERY_BLOCK (check) = rec;
3756 if (sched_verbose && spec_info->dump)
3757 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3758 (*current_sched_info->print_insn) (check, 0));
3760 gcc_assert (ORIG_PAT (insn));
3762 /* Initialize TWIN (twin is a duplicate of original instruction
3763 in the recovery block). */
3764 if (rec != EXIT_BLOCK_PTR)
3766 rtx link;
3768 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3769 if (DEP_STATUS (link) & DEP_OUTPUT)
3771 RESOLVED_DEPS (check) =
3772 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3773 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3776 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3777 extend_global (twin);
3779 if (sched_verbose && spec_info->dump)
3780 /* INSN_BB (insn) isn't determined for twin insns yet.
3781 So we can't use current_sched_info->print_insn. */
3782 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3783 INSN_UID (twin), rec->index);
3785 else
3787 ORIG_PAT (check) = ORIG_PAT (insn);
3788 HAS_INTERNAL_DEP (check) = 1;
3789 twin = check;
3790 /* ??? We probably should change all OUTPUT dependencies to
3791 (TRUE | OUTPUT). */
3794 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3796 if (rec != EXIT_BLOCK_PTR)
3797 /* In case of branchy check, fix CFG. */
3799 basic_block first_bb, second_bb;
3800 rtx jump;
3801 edge e;
3802 int edge_flags;
3804 first_bb = BLOCK_FOR_INSN (check);
3805 e = split_block (first_bb, check);
3806 /* split_block emits note if *check == BB_END. Probably it
3807 is better to rip that note off. */
3808 gcc_assert (e->src == first_bb);
3809 second_bb = e->dest;
3811 /* This is fixing of incoming edge. */
3812 /* ??? Which other flags should be specified? */
3813 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3814 /* Partition type is the same, if it is "unpartitioned". */
3815 edge_flags = EDGE_CROSSING;
3816 else
3817 edge_flags = 0;
3819 e = make_edge (first_bb, rec, edge_flags);
3821 add_block (second_bb, first_bb);
3823 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3824 label = block_label (second_bb);
3825 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3826 JUMP_LABEL (jump) = label;
3827 LABEL_NUSES (label)++;
3828 extend_global (jump);
3830 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3831 /* Partition type is the same, if it is "unpartitioned". */
3833 /* Rewritten from cfgrtl.c. */
3834 if (flag_reorder_blocks_and_partition
3835 && targetm.have_named_sections
3836 /*&& !any_condjump_p (jump)*/)
3837 /* any_condjump_p (jump) == false.
3838 We don't need the same note for the check because
3839 any_condjump_p (check) == true. */
3841 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3842 NULL_RTX,
3843 REG_NOTES (jump));
3845 edge_flags = EDGE_CROSSING;
3847 else
3848 edge_flags = 0;
3850 make_single_succ_edge (rec, second_bb, edge_flags);
3852 add_block (rec, EXIT_BLOCK_PTR);
3855 /* Move backward dependences from INSN to CHECK and
3856 move forward dependences from INSN to TWIN. */
3857 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3859 ds_t ds;
3861 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3862 check --TRUE--> producer ??? or ANTI ???
3863 twin --TRUE--> producer
3864 twin --ANTI--> check
3866 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3867 check --ANTI--> producer
3868 twin --ANTI--> producer
3869 twin --ANTI--> check
3871 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3872 check ~~TRUE~~> producer
3873 twin ~~TRUE~~> producer
3874 twin --ANTI--> check */
3876 ds = DEP_STATUS (link);
3878 if (ds & BEGIN_SPEC)
3880 gcc_assert (!mutate_p);
3881 ds &= ~BEGIN_SPEC;
3884 if (rec != EXIT_BLOCK_PTR)
3886 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3887 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3889 else
3890 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3893 for (link = LOG_LINKS (insn); link;)
3894 if ((DEP_STATUS (link) & BEGIN_SPEC)
3895 || mutate_p)
3896 /* We can delete this dep only if we totally overcome it with
3897 BEGIN_SPECULATION. */
3899 delete_back_forw_dep (insn, XEXP (link, 0));
3900 link = LOG_LINKS (insn);
3902 else
3903 link = XEXP (link, 1);
3905 fs = 0;
3907 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3908 here. */
3910 gcc_assert (!DONE_SPEC (insn));
3912 if (!mutate_p)
3914 ds_t ts = TODO_SPEC (insn);
3916 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3917 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3919 if (ts & BEGIN_DATA)
3920 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3921 if (ts & BEGIN_CONTROL)
3922 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3924 else
3925 CHECK_SPEC (check) = CHECK_SPEC (insn);
3927 /* Future speculations: call the helper. */
3928 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3930 if (rec != EXIT_BLOCK_PTR)
3932 /* Which types of dependencies should we use here is,
3933 generally, machine-dependent question... But, for now,
3934 it is not. */
3936 if (!mutate_p)
3938 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3939 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3941 else
3943 if (spec_info->dump)
3944 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3945 (*current_sched_info->print_insn) (insn, 0));
3947 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3948 delete_back_forw_dep (XEXP (link, 0), insn);
3950 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3951 try_ready (check);
3953 sched_remove_insn (insn);
3956 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3958 else
3959 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3961 if (!mutate_p)
3962 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3963 because it'll be done later in add_to_speculative_block. */
3965 clear_priorities (twin);
3966 calc_priorities (twin);
3970 /* Removes dependency between instructions in the recovery block REC
3971 and usual region instructions. It keeps inner dependences so it
3972 won't be necessary to recompute them. */
3973 static void
3974 fix_recovery_deps (basic_block rec)
3976 rtx note, insn, link, jump, ready_list = 0;
3977 bitmap_head in_ready;
3979 bitmap_initialize (&in_ready, 0);
3981 /* NOTE - a basic block note. */
3982 note = NEXT_INSN (BB_HEAD (rec));
3983 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3984 insn = BB_END (rec);
3985 gcc_assert (JUMP_P (insn));
3986 insn = PREV_INSN (insn);
3990 for (link = INSN_DEPEND (insn); link;)
3992 rtx consumer;
3994 consumer = XEXP (link, 0);
3996 if (BLOCK_FOR_INSN (consumer) != rec)
3998 delete_back_forw_dep (consumer, insn);
4000 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
4002 ready_list = alloc_INSN_LIST (consumer, ready_list);
4003 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
4006 link = INSN_DEPEND (insn);
4008 else
4010 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
4012 link = XEXP (link, 1);
4016 insn = PREV_INSN (insn);
4018 while (insn != note);
4020 bitmap_clear (&in_ready);
4022 /* Try to add instructions to the ready or queue list. */
4023 for (link = ready_list; link; link = XEXP (link, 1))
4024 try_ready (XEXP (link, 0));
4025 free_INSN_LIST_list (&ready_list);
4027 /* Fixing jump's dependences. */
4028 insn = BB_HEAD (rec);
4029 jump = BB_END (rec);
4031 gcc_assert (LABEL_P (insn));
4032 insn = NEXT_INSN (insn);
4034 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4035 add_jump_dependencies (insn, jump);
4038 /* The function saves line notes at the beginning of block B. */
4039 static void
4040 associate_line_notes_with_blocks (basic_block b)
4042 rtx line;
4044 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4045 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4047 line_note_head[b->index] = line;
4048 break;
4050 /* Do a forward search as well, since we won't get to see the first
4051 notes in a basic block. */
4052 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4054 if (INSN_P (line))
4055 break;
4056 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4057 line_note_head[b->index] = line;
4061 /* Changes pattern of the INSN to NEW_PAT. */
4062 static void
4063 change_pattern (rtx insn, rtx new_pat)
4065 int t;
4067 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4068 gcc_assert (t);
4069 /* Invalidate INSN_COST, so it'll be recalculated. */
4070 INSN_COST (insn) = -1;
4071 /* Invalidate INSN_TICK, so it'll be recalculated. */
4072 INSN_TICK (insn) = INVALID_TICK;
4073 dfa_clear_single_insn_cache (insn);
4077 /* -1 - can't speculate,
4078 0 - for speculation with REQUEST mode it is OK to use
4079 current instruction pattern,
4080 1 - need to change pattern for *NEW_PAT to be speculative. */
4081 static int
4082 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4084 gcc_assert (current_sched_info->flags & DO_SPECULATION
4085 && (request & SPECULATIVE));
4087 if (!NONJUMP_INSN_P (insn)
4088 || HAS_INTERNAL_DEP (insn)
4089 || SCHED_GROUP_P (insn)
4090 || side_effects_p (PATTERN (insn))
4091 || (request & spec_info->mask) != request)
4092 return -1;
4094 gcc_assert (!RECOVERY_BLOCK (insn));
4096 if (request & BE_IN_SPEC)
4098 if (may_trap_p (PATTERN (insn)))
4099 return -1;
4101 if (!(request & BEGIN_SPEC))
4102 return 0;
4105 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4108 /* Print some information about block BB, which starts with HEAD and
4109 ends with TAIL, before scheduling it.
4110 I is zero, if scheduler is about to start with the fresh ebb. */
4111 static void
4112 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4114 if (!i)
4115 fprintf (sched_dump,
4116 ";; ======================================================\n");
4117 else
4118 fprintf (sched_dump,
4119 ";; =====================ADVANCING TO=====================\n");
4120 fprintf (sched_dump,
4121 ";; -- basic block %d from %d to %d -- %s reload\n",
4122 bb->index, INSN_UID (head), INSN_UID (tail),
4123 (reload_completed ? "after" : "before"));
4124 fprintf (sched_dump,
4125 ";; ======================================================\n");
4126 fprintf (sched_dump, "\n");
4129 /* Unlink basic block notes and labels and saves them, so they
4130 can be easily restored. We unlink basic block notes in EBB to
4131 provide back-compatibility with the previous code, as target backends
4132 assume, that there'll be only instructions between
4133 current_sched_info->{head and tail}. We restore these notes as soon
4134 as we can.
4135 FIRST (LAST) is the first (last) basic block in the ebb.
4136 NB: In usual case (FIRST == LAST) nothing is really done. */
4137 void
4138 unlink_bb_notes (basic_block first, basic_block last)
4140 /* We DON'T unlink basic block notes of the first block in the ebb. */
4141 if (first == last)
4142 return;
4144 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4146 /* Make a sentinel. */
4147 if (last->next_bb != EXIT_BLOCK_PTR)
4148 bb_header[last->next_bb->index] = 0;
4150 first = first->next_bb;
4153 rtx prev, label, note, next;
4155 label = BB_HEAD (last);
4156 if (LABEL_P (label))
4157 note = NEXT_INSN (label);
4158 else
4159 note = label;
4160 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4162 prev = PREV_INSN (label);
4163 next = NEXT_INSN (note);
4164 gcc_assert (prev && next);
4166 NEXT_INSN (prev) = next;
4167 PREV_INSN (next) = prev;
4169 bb_header[last->index] = label;
4171 if (last == first)
4172 break;
4174 last = last->prev_bb;
4176 while (1);
4179 /* Restore basic block notes.
4180 FIRST is the first basic block in the ebb. */
4181 static void
4182 restore_bb_notes (basic_block first)
4184 if (!bb_header)
4185 return;
4187 /* We DON'T unlink basic block notes of the first block in the ebb. */
4188 first = first->next_bb;
4189 /* Remember: FIRST is actually a second basic block in the ebb. */
4191 while (first != EXIT_BLOCK_PTR
4192 && bb_header[first->index])
4194 rtx prev, label, note, next;
4196 label = bb_header[first->index];
4197 prev = PREV_INSN (label);
4198 next = NEXT_INSN (prev);
4200 if (LABEL_P (label))
4201 note = NEXT_INSN (label);
4202 else
4203 note = label;
4204 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4206 bb_header[first->index] = 0;
4208 NEXT_INSN (prev) = label;
4209 NEXT_INSN (note) = next;
4210 PREV_INSN (next) = note;
4212 first = first->next_bb;
4215 free (bb_header);
4216 bb_header = 0;
4219 /* Extend per basic block data structures of the scheduler.
4220 If BB is NULL, initialize structures for the whole CFG.
4221 Otherwise, initialize them for the just created BB. */
4222 static void
4223 extend_bb (basic_block bb)
4225 rtx insn;
4227 if (write_symbols != NO_DEBUG)
4229 /* Save-line-note-head:
4230 Determine the line-number at the start of each basic block.
4231 This must be computed and saved now, because after a basic block's
4232 predecessor has been scheduled, it is impossible to accurately
4233 determine the correct line number for the first insn of the block. */
4234 line_note_head = xrecalloc (line_note_head, last_basic_block,
4235 old_last_basic_block,
4236 sizeof (*line_note_head));
4238 if (bb)
4239 associate_line_notes_with_blocks (bb);
4240 else
4241 FOR_EACH_BB (bb)
4242 associate_line_notes_with_blocks (bb);
4245 old_last_basic_block = last_basic_block;
4247 if (current_sched_info->flags & USE_GLAT)
4249 glat_start = xrealloc (glat_start,
4250 last_basic_block * sizeof (*glat_start));
4251 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4254 /* The following is done to keep current_sched_info->next_tail non null. */
4256 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4257 if (NEXT_INSN (insn) == 0
4258 || (!NOTE_P (insn)
4259 && !LABEL_P (insn)
4260 /* Don't emit a NOTE if it would end up before a BARRIER. */
4261 && !BARRIER_P (NEXT_INSN (insn))))
4263 emit_note_after (NOTE_INSN_DELETED, insn);
4264 /* Make insn to appear outside BB. */
4265 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4269 /* Add a basic block BB to extended basic block EBB.
4270 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4271 If EBB is NULL, then BB should be a new region. */
4272 void
4273 add_block (basic_block bb, basic_block ebb)
4275 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4276 && bb->il.rtl->global_live_at_start == 0
4277 && bb->il.rtl->global_live_at_end == 0);
4279 extend_bb (bb);
4281 glat_start[bb->index] = 0;
4282 glat_end[bb->index] = 0;
4284 if (current_sched_info->add_block)
4285 /* This changes only data structures of the front-end. */
4286 current_sched_info->add_block (bb, ebb);
4289 /* Helper function.
4290 Fix CFG after both in- and inter-block movement of
4291 control_flow_insn_p JUMP. */
4292 static void
4293 fix_jump_move (rtx jump)
4295 basic_block bb, jump_bb, jump_bb_next;
4297 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4298 jump_bb = BLOCK_FOR_INSN (jump);
4299 jump_bb_next = jump_bb->next_bb;
4301 gcc_assert (current_sched_info->flags & SCHED_EBB
4302 || (RECOVERY_BLOCK (jump)
4303 && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
4305 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4306 /* if jump_bb_next is not empty. */
4307 BB_END (jump_bb) = BB_END (jump_bb_next);
4309 if (BB_END (bb) != PREV_INSN (jump))
4310 /* Then there are instruction after jump that should be placed
4311 to jump_bb_next. */
4312 BB_END (jump_bb_next) = BB_END (bb);
4313 else
4314 /* Otherwise jump_bb_next is empty. */
4315 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4317 /* To make assertion in move_insn happy. */
4318 BB_END (bb) = PREV_INSN (jump);
4320 update_bb_for_insn (jump_bb_next);
4323 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4324 static void
4325 move_block_after_check (rtx jump)
4327 basic_block bb, jump_bb, jump_bb_next;
4328 VEC(edge,gc) *t;
4330 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4331 jump_bb = BLOCK_FOR_INSN (jump);
4332 jump_bb_next = jump_bb->next_bb;
4334 update_bb_for_insn (jump_bb);
4336 gcc_assert (RECOVERY_BLOCK (jump)
4337 || RECOVERY_BLOCK (BB_END (jump_bb_next)));
4339 unlink_block (jump_bb_next);
4340 link_block (jump_bb_next, bb);
4342 t = bb->succs;
4343 bb->succs = 0;
4344 move_succs (&(jump_bb->succs), bb);
4345 move_succs (&(jump_bb_next->succs), jump_bb);
4346 move_succs (&t, jump_bb_next);
4348 if (current_sched_info->fix_recovery_cfg)
4349 current_sched_info->fix_recovery_cfg
4350 (bb->index, jump_bb->index, jump_bb_next->index);
4353 /* Helper function for move_block_after_check.
4354 This functions attaches edge vector pointed to by SUCCSP to
4355 block TO. */
4356 static void
4357 move_succs (VEC(edge,gc) **succsp, basic_block to)
4359 edge e;
4360 edge_iterator ei;
4362 gcc_assert (to->succs == 0);
4364 to->succs = *succsp;
4366 FOR_EACH_EDGE (e, ei, to->succs)
4367 e->src = to;
4369 *succsp = 0;
4372 /* Initialize GLAT (global_live_at_{start, end}) structures.
4373 GLAT structures are used to substitute global_live_{start, end}
4374 regsets during scheduling. This is necessary to use such functions as
4375 split_block (), as they assume consistency of register live information. */
4376 static void
4377 init_glat (void)
4379 basic_block bb;
4381 FOR_ALL_BB (bb)
4382 init_glat1 (bb);
4385 /* Helper function for init_glat. */
4386 static void
4387 init_glat1 (basic_block bb)
4389 gcc_assert (bb->il.rtl->global_live_at_start != 0
4390 && bb->il.rtl->global_live_at_end != 0);
4392 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4393 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4395 if (current_sched_info->flags & DETACH_LIFE_INFO)
4397 bb->il.rtl->global_live_at_start = 0;
4398 bb->il.rtl->global_live_at_end = 0;
4402 /* Attach reg_live_info back to basic blocks.
4403 Also save regsets, that should not have been changed during scheduling,
4404 for checking purposes (see check_reg_live). */
4405 void
4406 attach_life_info (void)
4408 basic_block bb;
4410 FOR_ALL_BB (bb)
4411 attach_life_info1 (bb);
4414 /* Helper function for attach_life_info. */
4415 static void
4416 attach_life_info1 (basic_block bb)
4418 gcc_assert (bb->il.rtl->global_live_at_start == 0
4419 && bb->il.rtl->global_live_at_end == 0);
4421 if (glat_start[bb->index])
4423 gcc_assert (glat_end[bb->index]);
4425 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4426 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4428 /* Make them NULL, so they won't be freed in free_glat. */
4429 glat_start[bb->index] = 0;
4430 glat_end[bb->index] = 0;
4432 #ifdef ENABLE_CHECKING
4433 if (bb->index < NUM_FIXED_BLOCKS
4434 || current_sched_info->region_head_or_leaf_p (bb, 0))
4436 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4437 COPY_REG_SET (glat_start[bb->index],
4438 bb->il.rtl->global_live_at_start);
4441 if (bb->index < NUM_FIXED_BLOCKS
4442 || current_sched_info->region_head_or_leaf_p (bb, 1))
4444 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4445 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4447 #endif
4449 else
4451 gcc_assert (!glat_end[bb->index]);
4453 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4454 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4458 /* Free GLAT information. */
4459 static void
4460 free_glat (void)
4462 #ifdef ENABLE_CHECKING
4463 if (current_sched_info->flags & DETACH_LIFE_INFO)
4465 basic_block bb;
4467 FOR_ALL_BB (bb)
4469 if (glat_start[bb->index])
4470 FREE_REG_SET (glat_start[bb->index]);
4471 if (glat_end[bb->index])
4472 FREE_REG_SET (glat_end[bb->index]);
4475 #endif
4477 free (glat_start);
4478 free (glat_end);
4481 /* Remove INSN from the instruction stream.
4482 INSN should have any dependencies. */
4483 static void
4484 sched_remove_insn (rtx insn)
4486 change_queue_index (insn, QUEUE_NOWHERE);
4487 current_sched_info->add_remove_insn (insn, 1);
4488 remove_insn (insn);
4491 /* Clear priorities of all instructions, that are
4492 forward dependent on INSN. */
4493 static void
4494 clear_priorities (rtx insn)
4496 rtx link;
4498 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4500 rtx pro;
4502 pro = XEXP (link, 0);
4503 if (INSN_PRIORITY_KNOWN (pro))
4505 INSN_PRIORITY_KNOWN (pro) = 0;
4506 clear_priorities (pro);
4511 /* Recompute priorities of instructions, whose priorities might have been
4512 changed due to changes in INSN. */
4513 static void
4514 calc_priorities (rtx insn)
4516 rtx link;
4518 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4520 rtx pro;
4522 pro = XEXP (link, 0);
4523 if (!INSN_PRIORITY_KNOWN (pro))
4525 priority (pro);
4526 calc_priorities (pro);
4532 /* Add dependences between JUMP and other instructions in the recovery
4533 block. INSN is the first insn the recovery block. */
4534 static void
4535 add_jump_dependencies (rtx insn, rtx jump)
4539 insn = NEXT_INSN (insn);
4540 if (insn == jump)
4541 break;
4543 if (!INSN_DEPEND (insn))
4544 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4546 while (1);
4547 gcc_assert (LOG_LINKS (jump));
4550 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4552 bb_note (basic_block bb)
4554 rtx note;
4556 note = BB_HEAD (bb);
4557 if (LABEL_P (note))
4558 note = NEXT_INSN (note);
4560 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4561 return note;
4564 #ifdef ENABLE_CHECKING
4565 extern void debug_spec_status (ds_t);
4567 /* Dump information about the dependence status S. */
4568 void
4569 debug_spec_status (ds_t s)
4571 FILE *f = stderr;
4573 if (s & BEGIN_DATA)
4574 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4575 if (s & BE_IN_DATA)
4576 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4577 if (s & BEGIN_CONTROL)
4578 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4579 if (s & BE_IN_CONTROL)
4580 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4582 if (s & HARD_DEP)
4583 fprintf (f, "HARD_DEP; ");
4585 if (s & DEP_TRUE)
4586 fprintf (f, "DEP_TRUE; ");
4587 if (s & DEP_ANTI)
4588 fprintf (f, "DEP_ANTI; ");
4589 if (s & DEP_OUTPUT)
4590 fprintf (f, "DEP_OUTPUT; ");
4592 fprintf (f, "\n");
4595 /* Helper function for check_cfg.
4596 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4597 its flags. */
4598 static int
4599 has_edge_p (VEC(edge,gc) *el, int type)
4601 edge e;
4602 edge_iterator ei;
4604 FOR_EACH_EDGE (e, ei, el)
4605 if (e->flags & type)
4606 return 1;
4607 return 0;
4610 /* Check few properties of CFG between HEAD and TAIL.
4611 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4612 instruction stream. */
4613 static void
4614 check_cfg (rtx head, rtx tail)
4616 rtx next_tail;
4617 basic_block bb = 0;
4618 int not_first = 0, not_last;
4620 if (head == NULL)
4621 head = get_insns ();
4622 if (tail == NULL)
4623 tail = get_last_insn ();
4624 next_tail = NEXT_INSN (tail);
4628 not_last = head != tail;
4630 if (not_first)
4631 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4632 if (not_last)
4633 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4635 if (LABEL_P (head)
4636 || (NOTE_INSN_BASIC_BLOCK_P (head)
4637 && (!not_first
4638 || (not_first && !LABEL_P (PREV_INSN (head))))))
4640 gcc_assert (bb == 0);
4641 bb = BLOCK_FOR_INSN (head);
4642 if (bb != 0)
4643 gcc_assert (BB_HEAD (bb) == head);
4644 else
4645 /* This is the case of jump table. See inside_basic_block_p (). */
4646 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4649 if (bb == 0)
4651 gcc_assert (!inside_basic_block_p (head));
4652 head = NEXT_INSN (head);
4654 else
4656 gcc_assert (inside_basic_block_p (head)
4657 || NOTE_P (head));
4658 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4660 if (LABEL_P (head))
4662 head = NEXT_INSN (head);
4663 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4665 else
4667 if (control_flow_insn_p (head))
4669 gcc_assert (BB_END (bb) == head);
4671 if (any_uncondjump_p (head))
4672 gcc_assert (EDGE_COUNT (bb->succs) == 1
4673 && BARRIER_P (NEXT_INSN (head)));
4674 else if (any_condjump_p (head))
4675 gcc_assert (/* Usual case. */
4676 (EDGE_COUNT (bb->succs) > 1
4677 && !BARRIER_P (NEXT_INSN (head)))
4678 /* Or jump to the next instruction. */
4679 || (EDGE_COUNT (bb->succs) == 1
4680 && (BB_HEAD (EDGE_I (bb->succs, 0)->dest)
4681 == JUMP_LABEL (head))));
4683 if (BB_END (bb) == head)
4685 if (EDGE_COUNT (bb->succs) > 1)
4686 gcc_assert (control_flow_insn_p (head)
4687 || has_edge_p (bb->succs, EDGE_COMPLEX));
4688 bb = 0;
4691 head = NEXT_INSN (head);
4695 not_first = 1;
4697 while (head != next_tail);
4699 gcc_assert (bb == 0);
4702 /* Perform a few consistency checks of flags in different data structures. */
4703 static void
4704 check_sched_flags (void)
4706 unsigned int f = current_sched_info->flags;
4708 if (flag_sched_stalled_insns)
4709 gcc_assert (!(f & DO_SPECULATION));
4710 if (f & DO_SPECULATION)
4711 gcc_assert (!flag_sched_stalled_insns
4712 && (f & DETACH_LIFE_INFO)
4713 && spec_info
4714 && spec_info->mask);
4715 if (f & DETACH_LIFE_INFO)
4716 gcc_assert (f & USE_GLAT);
4719 /* Check global_live_at_{start, end} regsets.
4720 If FATAL_P is TRUE, then abort execution at the first failure.
4721 Otherwise, print diagnostics to STDERR (this mode is for calling
4722 from debugger). */
4723 void
4724 check_reg_live (bool fatal_p)
4726 basic_block bb;
4728 FOR_ALL_BB (bb)
4730 int i;
4732 i = bb->index;
4734 if (glat_start[i])
4736 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4737 glat_start[i]);
4739 if (!b)
4741 gcc_assert (!fatal_p);
4743 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4747 if (glat_end[i])
4749 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4750 glat_end[i]);
4752 if (!b)
4754 gcc_assert (!fatal_p);
4756 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4761 #endif /* ENABLE_CHECKING */
4763 #endif /* INSN_SCHEDULING */