* cfgcleanup.c, cfgexpand.c, cgraphunit.c, config/arm/arm.c,
[official-gcc.git] / gcc / haifa-sched.c
blob95050476c25447f08198f3247e1bb80db7816e58
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 02110-1301, USA. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 The following list shows the order in which we want to break ties
58 among insns in the ready list:
60 1. choose insn with the longest path to end of bb, ties
61 broken by
62 2. choose insn with least contribution to register pressure,
63 ties broken by
64 3. prefer in-block upon interblock motion, ties broken by
65 4. prefer useful upon speculative motion, ties broken by
66 5. choose insn with largest control flow probability, ties
67 broken by
68 6. choose insn with the least dependences upon the previously
69 scheduled insn, or finally
70 7 choose the insn which has the most insns dependent on it.
71 8. choose insn with lowest UID.
73 Memory references complicate matters. Only if we can be certain
74 that memory references are not part of the data dependency graph
75 (via true, anti, or output dependence), can we move operations past
76 memory references. To first approximation, reads can be done
77 independently, while writes introduce dependencies. Better
78 approximations will yield fewer dependencies.
80 Before reload, an extended analysis of interblock data dependences
81 is required for interblock scheduling. This is performed in
82 compute_block_backward_dependences ().
84 Dependencies set up by memory references are treated in exactly the
85 same way as other dependencies, by using LOG_LINKS backward
86 dependences. LOG_LINKS are translated into INSN_DEPEND forward
87 dependences for the purpose of forward list scheduling.
89 Having optimized the critical path, we may have also unduly
90 extended the lifetimes of some registers. If an operation requires
91 that constants be loaded into registers, it is certainly desirable
92 to load those constants as early as necessary, but no earlier.
93 I.e., it will not do to load up a bunch of registers at the
94 beginning of a basic block only to use them at the end, if they
95 could be loaded later, since this may result in excessive register
96 utilization.
98 Note that since branches are never in basic blocks, but only end
99 basic blocks, this pass will not move branches. But that is ok,
100 since we can use GNU's delayed branch scheduling pass to take care
101 of this case.
103 Also note that no further optimizations based on algebraic
104 identities are performed, so this pass would be a good one to
105 perform instruction splitting, such as breaking up a multiply
106 instruction into shifts and adds where that is profitable.
108 Given the memory aliasing analysis that this pass should perform,
109 it should be possible to remove redundant stores to memory, and to
110 load values from registers instead of hitting memory.
112 Before reload, speculative insns are moved only if a 'proof' exists
113 that no exception will be caused by this, and if no live registers
114 exist that inhibit the motion (live registers constraints are not
115 represented by data dependence edges).
117 This pass must update information that subsequent passes expect to
118 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
119 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
121 The information in the line number notes is carefully retained by
122 this pass. Notes that refer to the starting and ending of
123 exception regions are also carefully retained by this pass. All
124 other NOTE insns are grouped in their same relative order at the
125 beginning of basic blocks and regions that have been scheduled. */
127 #include "config.h"
128 #include "system.h"
129 #include "coretypes.h"
130 #include "tm.h"
131 #include "toplev.h"
132 #include "rtl.h"
133 #include "tm_p.h"
134 #include "hard-reg-set.h"
135 #include "regs.h"
136 #include "function.h"
137 #include "flags.h"
138 #include "insn-config.h"
139 #include "insn-attr.h"
140 #include "except.h"
141 #include "toplev.h"
142 #include "recog.h"
143 #include "sched-int.h"
144 #include "target.h"
145 #include "output.h"
146 #include "params.h"
148 #ifdef INSN_SCHEDULING
150 /* issue_rate is the number of insns that can be scheduled in the same
151 machine cycle. It can be defined in the config/mach/mach.h file,
152 otherwise we set it to 1. */
154 static int issue_rate;
156 /* sched-verbose controls the amount of debugging output the
157 scheduler prints. It is controlled by -fsched-verbose=N:
158 N>0 and no -DSR : the output is directed to stderr.
159 N>=10 will direct the printouts to stderr (regardless of -dSR).
160 N=1: same as -dSR.
161 N=2: bb's probabilities, detailed ready list info, unit/insn info.
162 N=3: rtl at abort point, control-flow, regions info.
163 N=5: dependences info. */
165 static int sched_verbose_param = 0;
166 int sched_verbose = 0;
168 /* Debugging file. All printouts are sent to dump, which is always set,
169 either to stderr, or to the dump listing file (-dRS). */
170 FILE *sched_dump = 0;
172 /* Highest uid before scheduling. */
173 static int old_max_uid;
175 /* fix_sched_param() is called from toplev.c upon detection
176 of the -fsched-verbose=N option. */
178 void
179 fix_sched_param (const char *param, const char *val)
181 if (!strcmp (param, "verbose"))
182 sched_verbose_param = atoi (val);
183 else
184 warning (0, "fix_sched_param: unknown param: %s", param);
187 struct haifa_insn_data *h_i_d;
189 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
190 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
191 #define INTER_TICK(INSN) (h_i_d[INSN_UID (INSN)].inter_tick)
193 /* If INSN_TICK of an instruction is equal to INVALID_TICK,
194 then it should be recalculated from scratch. */
195 #define INVALID_TICK (-(max_insn_queue_index + 1))
196 /* The minimal value of the INSN_TICK of an instruction. */
197 #define MIN_TICK (-max_insn_queue_index)
199 /* Issue points are used to distinguish between instructions in max_issue ().
200 For now, all instructions are equally good. */
201 #define ISSUE_POINTS(INSN) 1
203 /* Vector indexed by basic block number giving the starting line-number
204 for each basic block. */
205 static rtx *line_note_head;
207 /* List of important notes we must keep around. This is a pointer to the
208 last element in the list. */
209 static rtx note_list;
211 static struct spec_info_def spec_info_var;
212 /* Description of the speculative part of the scheduling.
213 If NULL - no speculation. */
214 static spec_info_t spec_info;
216 /* True, if recovery block was added during scheduling of current block.
217 Used to determine, if we need to fix INSN_TICKs. */
218 static bool added_recovery_block_p;
220 /* Counters of different types of speculative instructions. */
221 static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
223 /* Pointers to GLAT data. See init_glat for more information. */
224 regset *glat_start, *glat_end;
226 /* Array used in {unlink, restore}_bb_notes. */
227 static rtx *bb_header = 0;
229 /* Number of basic_blocks. */
230 static int old_last_basic_block;
232 /* Basic block after which recovery blocks will be created. */
233 static basic_block before_recovery;
235 /* Queues, etc. */
237 /* An instruction is ready to be scheduled when all insns preceding it
238 have already been scheduled. It is important to ensure that all
239 insns which use its result will not be executed until its result
240 has been computed. An insn is maintained in one of four structures:
242 (P) the "Pending" set of insns which cannot be scheduled until
243 their dependencies have been satisfied.
244 (Q) the "Queued" set of insns that can be scheduled when sufficient
245 time has passed.
246 (R) the "Ready" list of unscheduled, uncommitted insns.
247 (S) the "Scheduled" list of insns.
249 Initially, all insns are either "Pending" or "Ready" depending on
250 whether their dependencies are satisfied.
252 Insns move from the "Ready" list to the "Scheduled" list as they
253 are committed to the schedule. As this occurs, the insns in the
254 "Pending" list have their dependencies satisfied and move to either
255 the "Ready" list or the "Queued" set depending on whether
256 sufficient time has passed to make them ready. As time passes,
257 insns move from the "Queued" set to the "Ready" list.
259 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
260 insns, i.e., those that are ready, queued, and pending.
261 The "Queued" set (Q) is implemented by the variable `insn_queue'.
262 The "Ready" list (R) is implemented by the variables `ready' and
263 `n_ready'.
264 The "Scheduled" list (S) is the new insn chain built by this pass.
266 The transition (R->S) is implemented in the scheduling loop in
267 `schedule_block' when the best insn to schedule is chosen.
268 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
269 insns move from the ready list to the scheduled list.
270 The transition (Q->R) is implemented in 'queue_to_insn' as time
271 passes or stalls are introduced. */
273 /* Implement a circular buffer to delay instructions until sufficient
274 time has passed. For the new pipeline description interface,
275 MAX_INSN_QUEUE_INDEX is a power of two minus one which is not less
276 than maximal time of instruction execution computed by genattr.c on
277 the base maximal time of functional unit reservations and getting a
278 result. This is the longest time an insn may be queued. */
280 static rtx *insn_queue;
281 static int q_ptr = 0;
282 static int q_size = 0;
283 #define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
284 #define NEXT_Q_AFTER(X, C) (((X)+C) & max_insn_queue_index)
286 #define QUEUE_SCHEDULED (-3)
287 #define QUEUE_NOWHERE (-2)
288 #define QUEUE_READY (-1)
289 /* QUEUE_SCHEDULED - INSN is scheduled.
290 QUEUE_NOWHERE - INSN isn't scheduled yet and is neither in
291 queue or ready list.
292 QUEUE_READY - INSN is in ready list.
293 N >= 0 - INSN queued for X [where NEXT_Q_AFTER (q_ptr, X) == N] cycles. */
295 #define QUEUE_INDEX(INSN) (h_i_d[INSN_UID (INSN)].queue_index)
297 /* The following variable value refers for all current and future
298 reservations of the processor units. */
299 state_t curr_state;
301 /* The following variable value is size of memory representing all
302 current and future reservations of the processor units. */
303 static size_t dfa_state_size;
305 /* The following array is used to find the best insn from ready when
306 the automaton pipeline interface is used. */
307 static char *ready_try;
309 /* Describe the ready list of the scheduler.
310 VEC holds space enough for all insns in the current region. VECLEN
311 says how many exactly.
312 FIRST is the index of the element with the highest priority; i.e. the
313 last one in the ready list, since elements are ordered by ascending
314 priority.
315 N_READY determines how many insns are on the ready list. */
317 struct ready_list
319 rtx *vec;
320 int veclen;
321 int first;
322 int n_ready;
325 /* The pointer to the ready list. */
326 static struct ready_list *readyp;
328 /* Scheduling clock. */
329 static int clock_var;
331 /* Number of instructions in current scheduling region. */
332 static int rgn_n_insns;
334 static int may_trap_exp (rtx, int);
336 /* Nonzero iff the address is comprised from at most 1 register. */
337 #define CONST_BASED_ADDRESS_P(x) \
338 (REG_P (x) \
339 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
340 || (GET_CODE (x) == LO_SUM)) \
341 && (CONSTANT_P (XEXP (x, 0)) \
342 || CONSTANT_P (XEXP (x, 1)))))
344 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
345 as found by analyzing insn's expression. */
347 static int
348 may_trap_exp (rtx x, int is_store)
350 enum rtx_code code;
352 if (x == 0)
353 return TRAP_FREE;
354 code = GET_CODE (x);
355 if (is_store)
357 if (code == MEM && may_trap_p (x))
358 return TRAP_RISKY;
359 else
360 return TRAP_FREE;
362 if (code == MEM)
364 /* The insn uses memory: a volatile load. */
365 if (MEM_VOLATILE_P (x))
366 return IRISKY;
367 /* An exception-free load. */
368 if (!may_trap_p (x))
369 return IFREE;
370 /* A load with 1 base register, to be further checked. */
371 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
372 return PFREE_CANDIDATE;
373 /* No info on the load, to be further checked. */
374 return PRISKY_CANDIDATE;
376 else
378 const char *fmt;
379 int i, insn_class = TRAP_FREE;
381 /* Neither store nor load, check if it may cause a trap. */
382 if (may_trap_p (x))
383 return TRAP_RISKY;
384 /* Recursive step: walk the insn... */
385 fmt = GET_RTX_FORMAT (code);
386 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
388 if (fmt[i] == 'e')
390 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
391 insn_class = WORST_CLASS (insn_class, tmp_class);
393 else if (fmt[i] == 'E')
395 int j;
396 for (j = 0; j < XVECLEN (x, i); j++)
398 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
399 insn_class = WORST_CLASS (insn_class, tmp_class);
400 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
401 break;
404 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
405 break;
407 return insn_class;
411 /* Classifies insn for the purpose of verifying that it can be
412 moved speculatively, by examining it's patterns, returning:
413 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
414 TRAP_FREE: non-load insn.
415 IFREE: load from a globally safe location.
416 IRISKY: volatile load.
417 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
418 being either PFREE or PRISKY. */
421 haifa_classify_insn (rtx insn)
423 rtx pat = PATTERN (insn);
424 int tmp_class = TRAP_FREE;
425 int insn_class = TRAP_FREE;
426 enum rtx_code code;
428 if (GET_CODE (pat) == PARALLEL)
430 int i, len = XVECLEN (pat, 0);
432 for (i = len - 1; i >= 0; i--)
434 code = GET_CODE (XVECEXP (pat, 0, i));
435 switch (code)
437 case CLOBBER:
438 /* Test if it is a 'store'. */
439 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
440 break;
441 case SET:
442 /* Test if it is a store. */
443 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
444 if (tmp_class == TRAP_RISKY)
445 break;
446 /* Test if it is a load. */
447 tmp_class
448 = WORST_CLASS (tmp_class,
449 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
450 0));
451 break;
452 case COND_EXEC:
453 case TRAP_IF:
454 tmp_class = TRAP_RISKY;
455 break;
456 default:
459 insn_class = WORST_CLASS (insn_class, tmp_class);
460 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
461 break;
464 else
466 code = GET_CODE (pat);
467 switch (code)
469 case CLOBBER:
470 /* Test if it is a 'store'. */
471 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
472 break;
473 case SET:
474 /* Test if it is a store. */
475 tmp_class = may_trap_exp (SET_DEST (pat), 1);
476 if (tmp_class == TRAP_RISKY)
477 break;
478 /* Test if it is a load. */
479 tmp_class =
480 WORST_CLASS (tmp_class,
481 may_trap_exp (SET_SRC (pat), 0));
482 break;
483 case COND_EXEC:
484 case TRAP_IF:
485 tmp_class = TRAP_RISKY;
486 break;
487 default:;
489 insn_class = tmp_class;
492 return insn_class;
495 /* Forward declarations. */
497 HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
498 static int priority (rtx);
499 static int rank_for_schedule (const void *, const void *);
500 static void swap_sort (rtx *, int);
501 static void queue_insn (rtx, int);
502 static int schedule_insn (rtx);
503 static int find_set_reg_weight (rtx);
504 static void find_insn_reg_weight (basic_block);
505 static void find_insn_reg_weight1 (rtx);
506 static void adjust_priority (rtx);
507 static void advance_one_cycle (void);
509 /* Notes handling mechanism:
510 =========================
511 Generally, NOTES are saved before scheduling and restored after scheduling.
512 The scheduler distinguishes between three types of notes:
514 (1) LINE_NUMBER notes, generated and used for debugging. Here,
515 before scheduling a region, a pointer to the LINE_NUMBER note is
516 added to the insn following it (in save_line_notes()), and the note
517 is removed (in rm_line_notes() and unlink_line_notes()). After
518 scheduling the region, this pointer is used for regeneration of
519 the LINE_NUMBER note (in restore_line_notes()).
521 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
522 Before scheduling a region, a pointer to the note is added to the insn
523 that follows or precedes it. (This happens as part of the data dependence
524 computation). After scheduling an insn, the pointer contained in it is
525 used for regenerating the corresponding note (in reemit_notes).
527 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
528 these notes are put in a list (in rm_other_notes() and
529 unlink_other_notes ()). After scheduling the block, these notes are
530 inserted at the beginning of the block (in schedule_block()). */
532 static rtx unlink_other_notes (rtx, rtx);
533 static rtx unlink_line_notes (rtx, rtx);
534 static void reemit_notes (rtx);
536 static rtx *ready_lastpos (struct ready_list *);
537 static void ready_add (struct ready_list *, rtx, bool);
538 static void ready_sort (struct ready_list *);
539 static rtx ready_remove_first (struct ready_list *);
541 static void queue_to_ready (struct ready_list *);
542 static int early_queue_to_ready (state_t, struct ready_list *);
544 static void debug_ready_list (struct ready_list *);
546 static void move_insn (rtx);
548 /* The following functions are used to implement multi-pass scheduling
549 on the first cycle. */
550 static rtx ready_element (struct ready_list *, int);
551 static rtx ready_remove (struct ready_list *, int);
552 static void ready_remove_insn (rtx);
553 static int max_issue (struct ready_list *, int *, int);
555 static rtx choose_ready (struct ready_list *);
557 static void fix_inter_tick (rtx, rtx);
558 static int fix_tick_ready (rtx);
559 static void change_queue_index (rtx, int);
560 static void resolve_dep (rtx, rtx);
562 /* The following functions are used to implement scheduling of data/control
563 speculative instructions. */
565 static void extend_h_i_d (void);
566 static void extend_ready (int);
567 static void extend_global (rtx);
568 static void extend_all (rtx);
569 static void init_h_i_d (rtx);
570 static void generate_recovery_code (rtx);
571 static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
572 static void begin_speculative_block (rtx);
573 static void add_to_speculative_block (rtx);
574 static dw_t dep_weak (ds_t);
575 static edge find_fallthru_edge (basic_block);
576 static void init_before_recovery (void);
577 static basic_block create_recovery_block (void);
578 static void create_check_block_twin (rtx, bool);
579 static void fix_recovery_deps (basic_block);
580 static void associate_line_notes_with_blocks (basic_block);
581 static void change_pattern (rtx, rtx);
582 static int speculate_insn (rtx, ds_t, rtx *);
583 static void dump_new_block_header (int, basic_block, rtx, rtx);
584 static void restore_bb_notes (basic_block);
585 static void extend_bb (basic_block);
586 static void fix_jump_move (rtx);
587 static void move_block_after_check (rtx);
588 static void move_succs (VEC(edge,gc) **, basic_block);
589 static void init_glat (void);
590 static void init_glat1 (basic_block);
591 static void attach_life_info1 (basic_block);
592 static void free_glat (void);
593 static void sched_remove_insn (rtx);
594 static void clear_priorities (rtx);
595 static void add_jump_dependencies (rtx, rtx);
596 static rtx bb_note (basic_block);
597 static void calc_priorities (rtx);
598 #ifdef ENABLE_CHECKING
599 static int has_edge_p (VEC(edge,gc) *, int);
600 static void check_cfg (rtx, rtx);
601 static void check_sched_flags (void);
602 #endif
604 #endif /* INSN_SCHEDULING */
606 /* Point to state used for the current scheduling pass. */
607 struct sched_info *current_sched_info;
609 #ifndef INSN_SCHEDULING
610 void
611 schedule_insns (void)
614 #else
616 /* Working copy of frontend's sched_info variable. */
617 static struct sched_info current_sched_info_var;
619 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
620 so that insns independent of the last scheduled insn will be preferred
621 over dependent instructions. */
623 static rtx last_scheduled_insn;
625 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
626 This is the number of cycles between instruction issue and
627 instruction results. */
629 HAIFA_INLINE int
630 insn_cost (rtx insn, rtx link, rtx used)
632 return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
633 link, used);
636 /* Compute cost of executing INSN given the dependence on the insn USED.
637 If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
638 Otherwise, dependence between INSN and USED is assumed to be of type
639 DEP_TYPE. This function was introduced as a workaround for
640 targetm.adjust_cost hook.
641 This is the number of cycles between instruction issue and
642 instruction results. */
644 HAIFA_INLINE static int
645 insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
647 int cost = INSN_COST (insn);
649 if (cost < 0)
651 /* A USE insn, or something else we don't need to
652 understand. We can't pass these directly to
653 result_ready_cost or insn_default_latency because it will
654 trigger a fatal error for unrecognizable insns. */
655 if (recog_memoized (insn) < 0)
657 INSN_COST (insn) = 0;
658 return 0;
660 else
662 cost = insn_default_latency (insn);
663 if (cost < 0)
664 cost = 0;
666 INSN_COST (insn) = cost;
670 /* In this case estimate cost without caring how insn is used. */
671 if (used == 0)
672 return cost;
674 /* A USE insn should never require the value used to be computed.
675 This allows the computation of a function's result and parameter
676 values to overlap the return and call. */
677 if (recog_memoized (used) < 0)
678 cost = 0;
679 else
681 gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
683 if (INSN_CODE (insn) >= 0)
685 if (dep_type == REG_DEP_ANTI)
686 cost = 0;
687 else if (dep_type == REG_DEP_OUTPUT)
689 cost = (insn_default_latency (insn)
690 - insn_default_latency (used));
691 if (cost <= 0)
692 cost = 1;
694 else if (bypass_p (insn))
695 cost = insn_latency (insn, used);
698 if (targetm.sched.adjust_cost_2)
699 cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
700 else
702 gcc_assert (link);
703 if (targetm.sched.adjust_cost)
704 cost = targetm.sched.adjust_cost (used, link, insn, cost);
707 if (cost < 0)
708 cost = 0;
711 return cost;
714 /* Compute the priority number for INSN. */
716 static int
717 priority (rtx insn)
719 rtx link;
721 if (! INSN_P (insn))
722 return 0;
724 if (! INSN_PRIORITY_KNOWN (insn))
726 int this_priority = 0;
728 if (INSN_DEPEND (insn) == 0)
729 this_priority = insn_cost (insn, 0, 0);
730 else
732 rtx prev_first, twin;
733 basic_block rec;
735 /* For recovery check instructions we calculate priority slightly
736 different than that of normal instructions. Instead of walking
737 through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
738 of each instruction in the corresponding recovery block. */
740 rec = RECOVERY_BLOCK (insn);
741 if (!rec || rec == EXIT_BLOCK_PTR)
743 prev_first = PREV_INSN (insn);
744 twin = insn;
746 else
748 prev_first = NEXT_INSN (BB_HEAD (rec));
749 twin = PREV_INSN (BB_END (rec));
754 for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
756 rtx next;
757 int next_priority;
759 next = XEXP (link, 0);
761 if (BLOCK_FOR_INSN (next) != rec)
763 /* Critical path is meaningful in block boundaries
764 only. */
765 if (! (*current_sched_info->contributes_to_priority)
766 (next, insn)
767 /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
768 then speculative instructions will less likely be
769 scheduled. That is because the priority of
770 their producers will increase, and, thus, the
771 producers will more likely be scheduled, thus,
772 resolving the dependence. */
773 || ((current_sched_info->flags & DO_SPECULATION)
774 && (DEP_STATUS (link) & SPECULATIVE)
775 && !(spec_info->flags
776 & COUNT_SPEC_IN_CRITICAL_PATH)))
777 continue;
779 next_priority = insn_cost1 (insn,
780 twin == insn ?
781 REG_NOTE_KIND (link) :
782 REG_DEP_ANTI,
783 twin == insn ? link : 0,
784 next) + priority (next);
786 if (next_priority > this_priority)
787 this_priority = next_priority;
791 twin = PREV_INSN (twin);
793 while (twin != prev_first);
795 INSN_PRIORITY (insn) = this_priority;
796 INSN_PRIORITY_KNOWN (insn) = 1;
799 return INSN_PRIORITY (insn);
802 /* Macros and functions for keeping the priority queue sorted, and
803 dealing with queuing and dequeuing of instructions. */
805 #define SCHED_SORT(READY, N_READY) \
806 do { if ((N_READY) == 2) \
807 swap_sort (READY, N_READY); \
808 else if ((N_READY) > 2) \
809 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
810 while (0)
812 /* Returns a positive value if x is preferred; returns a negative value if
813 y is preferred. Should never return 0, since that will make the sort
814 unstable. */
816 static int
817 rank_for_schedule (const void *x, const void *y)
819 rtx tmp = *(const rtx *) y;
820 rtx tmp2 = *(const rtx *) x;
821 rtx link;
822 int tmp_class, tmp2_class, depend_count1, depend_count2;
823 int val, priority_val, weight_val, info_val;
825 /* The insn in a schedule group should be issued the first. */
826 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
827 return SCHED_GROUP_P (tmp2) ? 1 : -1;
829 /* Prefer insn with higher priority. */
830 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
832 if (priority_val)
833 return priority_val;
835 /* Prefer speculative insn with greater dependencies weakness. */
836 if (spec_info)
838 ds_t ds1, ds2;
839 dw_t dw1, dw2;
840 int dw;
842 ds1 = TODO_SPEC (tmp) & SPECULATIVE;
843 if (ds1)
844 dw1 = dep_weak (ds1);
845 else
846 dw1 = NO_DEP_WEAK;
848 ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
849 if (ds2)
850 dw2 = dep_weak (ds2);
851 else
852 dw2 = NO_DEP_WEAK;
854 dw = dw2 - dw1;
855 if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
856 return dw;
859 /* Prefer an insn with smaller contribution to registers-pressure. */
860 if (!reload_completed &&
861 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
862 return weight_val;
864 info_val = (*current_sched_info->rank) (tmp, tmp2);
865 if (info_val)
866 return info_val;
868 /* Compare insns based on their relation to the last-scheduled-insn. */
869 if (INSN_P (last_scheduled_insn))
871 /* Classify the instructions into three classes:
872 1) Data dependent on last schedule insn.
873 2) Anti/Output dependent on last scheduled insn.
874 3) Independent of last scheduled insn, or has latency of one.
875 Choose the insn from the highest numbered class if different. */
876 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
877 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
878 tmp_class = 3;
879 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
880 tmp_class = 1;
881 else
882 tmp_class = 2;
884 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
885 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
886 tmp2_class = 3;
887 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
888 tmp2_class = 1;
889 else
890 tmp2_class = 2;
892 if ((val = tmp2_class - tmp_class))
893 return val;
896 /* Prefer the insn which has more later insns that depend on it.
897 This gives the scheduler more freedom when scheduling later
898 instructions at the expense of added register pressure. */
899 depend_count1 = 0;
900 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
901 depend_count1++;
903 depend_count2 = 0;
904 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
905 depend_count2++;
907 val = depend_count2 - depend_count1;
908 if (val)
909 return val;
911 /* If insns are equally good, sort by INSN_LUID (original insn order),
912 so that we make the sort stable. This minimizes instruction movement,
913 thus minimizing sched's effect on debugging and cross-jumping. */
914 return INSN_LUID (tmp) - INSN_LUID (tmp2);
917 /* Resort the array A in which only element at index N may be out of order. */
919 HAIFA_INLINE static void
920 swap_sort (rtx *a, int n)
922 rtx insn = a[n - 1];
923 int i = n - 2;
925 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
927 a[i + 1] = a[i];
928 i -= 1;
930 a[i + 1] = insn;
933 /* Add INSN to the insn queue so that it can be executed at least
934 N_CYCLES after the currently executing insn. Preserve insns
935 chain for debugging purposes. */
937 HAIFA_INLINE static void
938 queue_insn (rtx insn, int n_cycles)
940 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
941 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
943 gcc_assert (n_cycles <= max_insn_queue_index);
945 insn_queue[next_q] = link;
946 q_size += 1;
948 if (sched_verbose >= 2)
950 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
951 (*current_sched_info->print_insn) (insn, 0));
953 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
956 QUEUE_INDEX (insn) = next_q;
959 /* Remove INSN from queue. */
960 static void
961 queue_remove (rtx insn)
963 gcc_assert (QUEUE_INDEX (insn) >= 0);
964 remove_free_INSN_LIST_elem (insn, &insn_queue[QUEUE_INDEX (insn)]);
965 q_size--;
966 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
969 /* Return a pointer to the bottom of the ready list, i.e. the insn
970 with the lowest priority. */
972 HAIFA_INLINE static rtx *
973 ready_lastpos (struct ready_list *ready)
975 gcc_assert (ready->n_ready >= 1);
976 return ready->vec + ready->first - ready->n_ready + 1;
979 /* Add an element INSN to the ready list so that it ends up with the
980 lowest/highest priority depending on FIRST_P. */
982 HAIFA_INLINE static void
983 ready_add (struct ready_list *ready, rtx insn, bool first_p)
985 if (!first_p)
987 if (ready->first == ready->n_ready)
989 memmove (ready->vec + ready->veclen - ready->n_ready,
990 ready_lastpos (ready),
991 ready->n_ready * sizeof (rtx));
992 ready->first = ready->veclen - 1;
994 ready->vec[ready->first - ready->n_ready] = insn;
996 else
998 if (ready->first == ready->veclen - 1)
1000 if (ready->n_ready)
1001 /* ready_lastpos() fails when called with (ready->n_ready == 0). */
1002 memmove (ready->vec + ready->veclen - ready->n_ready - 1,
1003 ready_lastpos (ready),
1004 ready->n_ready * sizeof (rtx));
1005 ready->first = ready->veclen - 2;
1007 ready->vec[++(ready->first)] = insn;
1010 ready->n_ready++;
1012 gcc_assert (QUEUE_INDEX (insn) != QUEUE_READY);
1013 QUEUE_INDEX (insn) = QUEUE_READY;
1016 /* Remove the element with the highest priority from the ready list and
1017 return it. */
1019 HAIFA_INLINE static rtx
1020 ready_remove_first (struct ready_list *ready)
1022 rtx t;
1024 gcc_assert (ready->n_ready);
1025 t = ready->vec[ready->first--];
1026 ready->n_ready--;
1027 /* If the queue becomes empty, reset it. */
1028 if (ready->n_ready == 0)
1029 ready->first = ready->veclen - 1;
1031 gcc_assert (QUEUE_INDEX (t) == QUEUE_READY);
1032 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1034 return t;
1037 /* The following code implements multi-pass scheduling for the first
1038 cycle. In other words, we will try to choose ready insn which
1039 permits to start maximum number of insns on the same cycle. */
1041 /* Return a pointer to the element INDEX from the ready. INDEX for
1042 insn with the highest priority is 0, and the lowest priority has
1043 N_READY - 1. */
1045 HAIFA_INLINE static rtx
1046 ready_element (struct ready_list *ready, int index)
1048 gcc_assert (ready->n_ready && index < ready->n_ready);
1050 return ready->vec[ready->first - index];
1053 /* Remove the element INDEX from the ready list and return it. INDEX
1054 for insn with the highest priority is 0, and the lowest priority
1055 has N_READY - 1. */
1057 HAIFA_INLINE static rtx
1058 ready_remove (struct ready_list *ready, int index)
1060 rtx t;
1061 int i;
1063 if (index == 0)
1064 return ready_remove_first (ready);
1065 gcc_assert (ready->n_ready && index < ready->n_ready);
1066 t = ready->vec[ready->first - index];
1067 ready->n_ready--;
1068 for (i = index; i < ready->n_ready; i++)
1069 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1070 QUEUE_INDEX (t) = QUEUE_NOWHERE;
1071 return t;
1074 /* Remove INSN from the ready list. */
1075 static void
1076 ready_remove_insn (rtx insn)
1078 int i;
1080 for (i = 0; i < readyp->n_ready; i++)
1081 if (ready_element (readyp, i) == insn)
1083 ready_remove (readyp, i);
1084 return;
1086 gcc_unreachable ();
1089 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1090 macro. */
1092 HAIFA_INLINE static void
1093 ready_sort (struct ready_list *ready)
1095 rtx *first = ready_lastpos (ready);
1096 SCHED_SORT (first, ready->n_ready);
1099 /* PREV is an insn that is ready to execute. Adjust its priority if that
1100 will help shorten or lengthen register lifetimes as appropriate. Also
1101 provide a hook for the target to tweek itself. */
1103 HAIFA_INLINE static void
1104 adjust_priority (rtx prev)
1106 /* ??? There used to be code here to try and estimate how an insn
1107 affected register lifetimes, but it did it by looking at REG_DEAD
1108 notes, which we removed in schedule_region. Nor did it try to
1109 take into account register pressure or anything useful like that.
1111 Revisit when we have a machine model to work with and not before. */
1113 if (targetm.sched.adjust_priority)
1114 INSN_PRIORITY (prev) =
1115 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1118 /* Advance time on one cycle. */
1119 HAIFA_INLINE static void
1120 advance_one_cycle (void)
1122 if (targetm.sched.dfa_pre_cycle_insn)
1123 state_transition (curr_state,
1124 targetm.sched.dfa_pre_cycle_insn ());
1126 state_transition (curr_state, NULL);
1128 if (targetm.sched.dfa_post_cycle_insn)
1129 state_transition (curr_state,
1130 targetm.sched.dfa_post_cycle_insn ());
1133 /* Clock at which the previous instruction was issued. */
1134 static int last_clock_var;
1136 /* INSN is the "currently executing insn". Launch each insn which was
1137 waiting on INSN. READY is the ready list which contains the insns
1138 that are ready to fire. CLOCK is the current cycle. The function
1139 returns necessary cycle advance after issuing the insn (it is not
1140 zero for insns in a schedule group). */
1142 static int
1143 schedule_insn (rtx insn)
1145 rtx link;
1146 int advance = 0;
1148 if (sched_verbose >= 1)
1150 char buf[2048];
1152 print_insn (buf, insn, 0);
1153 buf[40] = 0;
1154 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock_var, buf);
1156 if (recog_memoized (insn) < 0)
1157 fprintf (sched_dump, "nothing");
1158 else
1159 print_reservation (sched_dump, insn);
1160 fputc ('\n', sched_dump);
1163 /* Scheduling instruction should have all its dependencies resolved and
1164 should have been removed from the ready list. */
1165 gcc_assert (INSN_DEP_COUNT (insn) == 0);
1166 gcc_assert (!LOG_LINKS (insn));
1167 gcc_assert (QUEUE_INDEX (insn) == QUEUE_NOWHERE);
1169 QUEUE_INDEX (insn) = QUEUE_SCHEDULED;
1171 /* Now we can free RESOLVED_DEPS list. */
1172 if (current_sched_info->flags & USE_DEPS_LIST)
1173 free_DEPS_LIST_list (&RESOLVED_DEPS (insn));
1174 else
1175 free_INSN_LIST_list (&RESOLVED_DEPS (insn));
1177 gcc_assert (INSN_TICK (insn) >= MIN_TICK);
1178 if (INSN_TICK (insn) > clock_var)
1179 /* INSN has been prematurely moved from the queue to the ready list.
1180 This is possible only if following flag is set. */
1181 gcc_assert (flag_sched_stalled_insns);
1183 /* ??? Probably, if INSN is scheduled prematurely, we should leave
1184 INSN_TICK untouched. This is a machine-dependent issue, actually. */
1185 INSN_TICK (insn) = clock_var;
1187 /* Update dependent instructions. */
1188 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1190 rtx next = XEXP (link, 0);
1192 resolve_dep (next, insn);
1194 if (!RECOVERY_BLOCK (insn)
1195 || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
1197 int effective_cost;
1199 effective_cost = try_ready (next);
1201 if (effective_cost >= 0
1202 && SCHED_GROUP_P (next)
1203 && advance < effective_cost)
1204 advance = effective_cost;
1206 else
1207 /* Check always has only one forward dependence (to the first insn in
1208 the recovery block), therefore, this will be executed only once. */
1210 gcc_assert (XEXP (link, 1) == 0);
1211 fix_recovery_deps (RECOVERY_BLOCK (insn));
1215 /* Annotate the instruction with issue information -- TImode
1216 indicates that the instruction is expected not to be able
1217 to issue on the same cycle as the previous insn. A machine
1218 may use this information to decide how the instruction should
1219 be aligned. */
1220 if (issue_rate > 1
1221 && GET_CODE (PATTERN (insn)) != USE
1222 && GET_CODE (PATTERN (insn)) != CLOBBER)
1224 if (reload_completed)
1225 PUT_MODE (insn, clock_var > last_clock_var ? TImode : VOIDmode);
1226 last_clock_var = clock_var;
1229 return advance;
1232 /* Functions for handling of notes. */
1234 /* Delete notes beginning with INSN and put them in the chain
1235 of notes ended by NOTE_LIST.
1236 Returns the insn following the notes. */
1238 static rtx
1239 unlink_other_notes (rtx insn, rtx tail)
1241 rtx prev = PREV_INSN (insn);
1243 while (insn != tail && NOTE_NOT_BB_P (insn))
1245 rtx next = NEXT_INSN (insn);
1246 /* Delete the note from its current position. */
1247 if (prev)
1248 NEXT_INSN (prev) = next;
1249 if (next)
1250 PREV_INSN (next) = prev;
1252 /* See sched_analyze to see how these are handled. */
1253 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1254 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1256 /* Insert the note at the end of the notes list. */
1257 PREV_INSN (insn) = note_list;
1258 if (note_list)
1259 NEXT_INSN (note_list) = insn;
1260 note_list = insn;
1263 insn = next;
1265 return insn;
1268 /* Delete line notes beginning with INSN. Record line-number notes so
1269 they can be reused. Returns the insn following the notes. */
1271 static rtx
1272 unlink_line_notes (rtx insn, rtx tail)
1274 rtx prev = PREV_INSN (insn);
1276 while (insn != tail && NOTE_P (insn))
1278 rtx next = NEXT_INSN (insn);
1280 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1282 /* Delete the note from its current position. */
1283 if (prev)
1284 NEXT_INSN (prev) = next;
1285 if (next)
1286 PREV_INSN (next) = prev;
1288 /* Record line-number notes so they can be reused. */
1289 LINE_NOTE (insn) = insn;
1291 else
1292 prev = insn;
1294 insn = next;
1296 return insn;
1299 /* Return the head and tail pointers of ebb starting at BEG and ending
1300 at END. */
1302 void
1303 get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
1305 rtx beg_head = BB_HEAD (beg);
1306 rtx beg_tail = BB_END (beg);
1307 rtx end_head = BB_HEAD (end);
1308 rtx end_tail = BB_END (end);
1310 /* Don't include any notes or labels at the beginning of the BEG
1311 basic block, or notes at the end of the END basic blocks. */
1313 if (LABEL_P (beg_head))
1314 beg_head = NEXT_INSN (beg_head);
1316 while (beg_head != beg_tail)
1317 if (NOTE_P (beg_head))
1318 beg_head = NEXT_INSN (beg_head);
1319 else
1320 break;
1322 *headp = beg_head;
1324 if (beg == end)
1325 end_head = beg_head;
1326 else if (LABEL_P (end_head))
1327 end_head = NEXT_INSN (end_head);
1329 while (end_head != end_tail)
1330 if (NOTE_P (end_tail))
1331 end_tail = PREV_INSN (end_tail);
1332 else
1333 break;
1335 *tailp = end_tail;
1338 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1341 no_real_insns_p (rtx head, rtx tail)
1343 while (head != NEXT_INSN (tail))
1345 if (!NOTE_P (head) && !LABEL_P (head))
1346 return 0;
1347 head = NEXT_INSN (head);
1349 return 1;
1352 /* Delete line notes from one block. Save them so they can be later restored
1353 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1354 block in which notes should be processed. */
1356 void
1357 rm_line_notes (rtx head, rtx tail)
1359 rtx next_tail;
1360 rtx insn;
1362 next_tail = NEXT_INSN (tail);
1363 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1365 rtx prev;
1367 /* Farm out notes, and maybe save them in NOTE_LIST.
1368 This is needed to keep the debugger from
1369 getting completely deranged. */
1370 if (NOTE_NOT_BB_P (insn))
1372 prev = insn;
1373 insn = unlink_line_notes (insn, next_tail);
1375 gcc_assert (prev != tail && prev != head && insn != next_tail);
1380 /* Save line number notes for each insn in block B. HEAD and TAIL are
1381 the boundaries of the block in which notes should be processed. */
1383 void
1384 save_line_notes (int b, rtx head, rtx tail)
1386 rtx next_tail;
1388 /* We must use the true line number for the first insn in the block
1389 that was computed and saved at the start of this pass. We can't
1390 use the current line number, because scheduling of the previous
1391 block may have changed the current line number. */
1393 rtx line = line_note_head[b];
1394 rtx insn;
1396 next_tail = NEXT_INSN (tail);
1398 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1399 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1400 line = insn;
1401 else
1402 LINE_NOTE (insn) = line;
1405 /* After a block was scheduled, insert line notes into the insns list.
1406 HEAD and TAIL are the boundaries of the block in which notes should
1407 be processed. */
1409 void
1410 restore_line_notes (rtx head, rtx tail)
1412 rtx line, note, prev, new;
1413 int added_notes = 0;
1414 rtx next_tail, insn;
1416 head = head;
1417 next_tail = NEXT_INSN (tail);
1419 /* Determine the current line-number. We want to know the current
1420 line number of the first insn of the block here, in case it is
1421 different from the true line number that was saved earlier. If
1422 different, then we need a line number note before the first insn
1423 of this block. If it happens to be the same, then we don't want to
1424 emit another line number note here. */
1425 for (line = head; line; line = PREV_INSN (line))
1426 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1427 break;
1429 /* Walk the insns keeping track of the current line-number and inserting
1430 the line-number notes as needed. */
1431 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1432 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1433 line = insn;
1434 /* This used to emit line number notes before every non-deleted note.
1435 However, this confuses a debugger, because line notes not separated
1436 by real instructions all end up at the same address. I can find no
1437 use for line number notes before other notes, so none are emitted. */
1438 else if (!NOTE_P (insn)
1439 && INSN_UID (insn) < old_max_uid
1440 && (note = LINE_NOTE (insn)) != 0
1441 && note != line
1442 && (line == 0
1443 #ifdef USE_MAPPED_LOCATION
1444 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1445 #else
1446 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1447 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1448 #endif
1451 line = note;
1452 prev = PREV_INSN (insn);
1453 if (LINE_NOTE (note))
1455 /* Re-use the original line-number note. */
1456 LINE_NOTE (note) = 0;
1457 PREV_INSN (note) = prev;
1458 NEXT_INSN (prev) = note;
1459 PREV_INSN (insn) = note;
1460 NEXT_INSN (note) = insn;
1461 set_block_for_insn (note, BLOCK_FOR_INSN (insn));
1463 else
1465 added_notes++;
1466 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1467 #ifndef USE_MAPPED_LOCATION
1468 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1469 #endif
1472 if (sched_verbose && added_notes)
1473 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1476 /* After scheduling the function, delete redundant line notes from the
1477 insns list. */
1479 void
1480 rm_redundant_line_notes (void)
1482 rtx line = 0;
1483 rtx insn = get_insns ();
1484 int active_insn = 0;
1485 int notes = 0;
1487 /* Walk the insns deleting redundant line-number notes. Many of these
1488 are already present. The remainder tend to occur at basic
1489 block boundaries. */
1490 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1491 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1493 /* If there are no active insns following, INSN is redundant. */
1494 if (active_insn == 0)
1496 notes++;
1497 SET_INSN_DELETED (insn);
1499 /* If the line number is unchanged, LINE is redundant. */
1500 else if (line
1501 #ifdef USE_MAPPED_LOCATION
1502 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1503 #else
1504 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1505 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1506 #endif
1509 notes++;
1510 SET_INSN_DELETED (line);
1511 line = insn;
1513 else
1514 line = insn;
1515 active_insn = 0;
1517 else if (!((NOTE_P (insn)
1518 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1519 || (NONJUMP_INSN_P (insn)
1520 && (GET_CODE (PATTERN (insn)) == USE
1521 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1522 active_insn++;
1524 if (sched_verbose && notes)
1525 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1528 /* Delete notes between HEAD and TAIL and put them in the chain
1529 of notes ended by NOTE_LIST. */
1531 void
1532 rm_other_notes (rtx head, rtx tail)
1534 rtx next_tail;
1535 rtx insn;
1537 note_list = 0;
1538 if (head == tail && (! INSN_P (head)))
1539 return;
1541 next_tail = NEXT_INSN (tail);
1542 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1544 rtx prev;
1546 /* Farm out notes, and maybe save them in NOTE_LIST.
1547 This is needed to keep the debugger from
1548 getting completely deranged. */
1549 if (NOTE_NOT_BB_P (insn))
1551 prev = insn;
1553 insn = unlink_other_notes (insn, next_tail);
1555 gcc_assert (prev != tail && prev != head && insn != next_tail);
1560 /* Functions for computation of registers live/usage info. */
1562 /* This function looks for a new register being defined.
1563 If the destination register is already used by the source,
1564 a new register is not needed. */
1566 static int
1567 find_set_reg_weight (rtx x)
1569 if (GET_CODE (x) == CLOBBER
1570 && register_operand (SET_DEST (x), VOIDmode))
1571 return 1;
1572 if (GET_CODE (x) == SET
1573 && register_operand (SET_DEST (x), VOIDmode))
1575 if (REG_P (SET_DEST (x)))
1577 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1578 return 1;
1579 else
1580 return 0;
1582 return 1;
1584 return 0;
1587 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1589 static void
1590 find_insn_reg_weight (basic_block bb)
1592 rtx insn, next_tail, head, tail;
1594 get_ebb_head_tail (bb, bb, &head, &tail);
1595 next_tail = NEXT_INSN (tail);
1597 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1598 find_insn_reg_weight1 (insn);
1601 /* Calculate INSN_REG_WEIGHT for single instruction.
1602 Separated from find_insn_reg_weight because of need
1603 to initialize new instruction in generate_recovery_code. */
1604 static void
1605 find_insn_reg_weight1 (rtx insn)
1607 int reg_weight = 0;
1608 rtx x;
1610 /* Handle register life information. */
1611 if (! INSN_P (insn))
1612 return;
1614 /* Increment weight for each register born here. */
1615 x = PATTERN (insn);
1616 reg_weight += find_set_reg_weight (x);
1617 if (GET_CODE (x) == PARALLEL)
1619 int j;
1620 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1622 x = XVECEXP (PATTERN (insn), 0, j);
1623 reg_weight += find_set_reg_weight (x);
1626 /* Decrement weight for each register that dies here. */
1627 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1629 if (REG_NOTE_KIND (x) == REG_DEAD
1630 || REG_NOTE_KIND (x) == REG_UNUSED)
1631 reg_weight--;
1634 INSN_REG_WEIGHT (insn) = reg_weight;
1637 /* Move insns that became ready to fire from queue to ready list. */
1639 static void
1640 queue_to_ready (struct ready_list *ready)
1642 rtx insn;
1643 rtx link;
1645 q_ptr = NEXT_Q (q_ptr);
1647 /* Add all pending insns that can be scheduled without stalls to the
1648 ready list. */
1649 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1651 insn = XEXP (link, 0);
1652 q_size -= 1;
1654 if (sched_verbose >= 2)
1655 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1656 (*current_sched_info->print_insn) (insn, 0));
1658 /* If the ready list is full, delay the insn for 1 cycle.
1659 See the comment in schedule_block for the rationale. */
1660 if (!reload_completed
1661 && ready->n_ready > MAX_SCHED_READY_INSNS
1662 && !SCHED_GROUP_P (insn))
1664 if (sched_verbose >= 2)
1665 fprintf (sched_dump, "requeued because ready full\n");
1666 queue_insn (insn, 1);
1668 else
1670 ready_add (ready, insn, false);
1671 if (sched_verbose >= 2)
1672 fprintf (sched_dump, "moving to ready without stalls\n");
1675 free_INSN_LIST_list (&insn_queue[q_ptr]);
1677 /* If there are no ready insns, stall until one is ready and add all
1678 of the pending insns at that point to the ready list. */
1679 if (ready->n_ready == 0)
1681 int stalls;
1683 for (stalls = 1; stalls <= max_insn_queue_index; stalls++)
1685 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1687 for (; link; link = XEXP (link, 1))
1689 insn = XEXP (link, 0);
1690 q_size -= 1;
1692 if (sched_verbose >= 2)
1693 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1694 (*current_sched_info->print_insn) (insn, 0));
1696 ready_add (ready, insn, false);
1697 if (sched_verbose >= 2)
1698 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1700 free_INSN_LIST_list (&insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]);
1702 advance_one_cycle ();
1704 break;
1707 advance_one_cycle ();
1710 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1711 clock_var += stalls;
1715 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1716 prematurely move INSN from the queue to the ready list. Currently,
1717 if a target defines the hook 'is_costly_dependence', this function
1718 uses the hook to check whether there exist any dependences which are
1719 considered costly by the target, between INSN and other insns that
1720 have already been scheduled. Dependences are checked up to Y cycles
1721 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1722 controlling this value.
1723 (Other considerations could be taken into account instead (or in
1724 addition) depending on user flags and target hooks. */
1726 static bool
1727 ok_for_early_queue_removal (rtx insn)
1729 int n_cycles;
1730 rtx prev_insn = last_scheduled_insn;
1732 if (targetm.sched.is_costly_dependence)
1734 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1736 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1738 rtx dep_link = 0;
1739 int dep_cost;
1741 if (!NOTE_P (prev_insn))
1743 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1744 if (dep_link)
1746 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1747 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1748 dep_link, dep_cost,
1749 flag_sched_stalled_insns_dep - n_cycles))
1750 return false;
1754 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1755 break;
1758 if (!prev_insn)
1759 break;
1760 prev_insn = PREV_INSN (prev_insn);
1764 return true;
1768 /* Remove insns from the queue, before they become "ready" with respect
1769 to FU latency considerations. */
1771 static int
1772 early_queue_to_ready (state_t state, struct ready_list *ready)
1774 rtx insn;
1775 rtx link;
1776 rtx next_link;
1777 rtx prev_link;
1778 bool move_to_ready;
1779 int cost;
1780 state_t temp_state = alloca (dfa_state_size);
1781 int stalls;
1782 int insns_removed = 0;
1785 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1786 function:
1788 X == 0: There is no limit on how many queued insns can be removed
1789 prematurely. (flag_sched_stalled_insns = -1).
1791 X >= 1: Only X queued insns can be removed prematurely in each
1792 invocation. (flag_sched_stalled_insns = X).
1794 Otherwise: Early queue removal is disabled.
1795 (flag_sched_stalled_insns = 0)
1798 if (! flag_sched_stalled_insns)
1799 return 0;
1801 for (stalls = 0; stalls <= max_insn_queue_index; stalls++)
1803 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1805 if (sched_verbose > 6)
1806 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1808 prev_link = 0;
1809 while (link)
1811 next_link = XEXP (link, 1);
1812 insn = XEXP (link, 0);
1813 if (insn && sched_verbose > 6)
1814 print_rtl_single (sched_dump, insn);
1816 memcpy (temp_state, state, dfa_state_size);
1817 if (recog_memoized (insn) < 0)
1818 /* non-negative to indicate that it's not ready
1819 to avoid infinite Q->R->Q->R... */
1820 cost = 0;
1821 else
1822 cost = state_transition (temp_state, insn);
1824 if (sched_verbose >= 6)
1825 fprintf (sched_dump, "transition cost = %d\n", cost);
1827 move_to_ready = false;
1828 if (cost < 0)
1830 move_to_ready = ok_for_early_queue_removal (insn);
1831 if (move_to_ready == true)
1833 /* move from Q to R */
1834 q_size -= 1;
1835 ready_add (ready, insn, false);
1837 if (prev_link)
1838 XEXP (prev_link, 1) = next_link;
1839 else
1840 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1842 free_INSN_LIST_node (link);
1844 if (sched_verbose >= 2)
1845 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1846 (*current_sched_info->print_insn) (insn, 0));
1848 insns_removed++;
1849 if (insns_removed == flag_sched_stalled_insns)
1850 /* Remove no more than flag_sched_stalled_insns insns
1851 from Q at a time. */
1852 return insns_removed;
1856 if (move_to_ready == false)
1857 prev_link = link;
1859 link = next_link;
1860 } /* while link */
1861 } /* if link */
1863 } /* for stalls.. */
1865 return insns_removed;
1869 /* Print the ready list for debugging purposes. Callable from debugger. */
1871 static void
1872 debug_ready_list (struct ready_list *ready)
1874 rtx *p;
1875 int i;
1877 if (ready->n_ready == 0)
1879 fprintf (sched_dump, "\n");
1880 return;
1883 p = ready_lastpos (ready);
1884 for (i = 0; i < ready->n_ready; i++)
1885 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1886 fprintf (sched_dump, "\n");
1889 /* Search INSN for REG_SAVE_NOTE note pairs for
1890 NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
1891 NOTEs. The REG_SAVE_NOTE note following first one is contains the
1892 saved value for NOTE_BLOCK_NUMBER which is useful for
1893 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
1895 static void
1896 reemit_notes (rtx insn)
1898 rtx note, last = insn;
1900 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1902 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
1904 enum insn_note note_type = INTVAL (XEXP (note, 0));
1906 last = emit_note_before (note_type, last);
1907 remove_note (insn, note);
1912 /* Move INSN. Reemit notes if needed. Update CFG, if needed. */
1913 static void
1914 move_insn (rtx insn)
1916 rtx last = last_scheduled_insn;
1918 if (PREV_INSN (insn) != last)
1920 basic_block bb;
1921 rtx note;
1922 int jump_p = 0;
1924 bb = BLOCK_FOR_INSN (insn);
1926 /* BB_HEAD is either LABEL or NOTE. */
1927 gcc_assert (BB_HEAD (bb) != insn);
1929 if (BB_END (bb) == insn)
1930 /* If this is last instruction in BB, move end marker one
1931 instruction up. */
1933 /* Jumps are always placed at the end of basic block. */
1934 jump_p = control_flow_insn_p (insn);
1936 gcc_assert (!jump_p
1937 || ((current_sched_info->flags & SCHED_RGN)
1938 && RECOVERY_BLOCK (insn)
1939 && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
1940 || (current_sched_info->flags & SCHED_EBB));
1942 gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
1944 BB_END (bb) = PREV_INSN (insn);
1947 gcc_assert (BB_END (bb) != last);
1949 if (jump_p)
1950 /* We move the block note along with jump. */
1952 /* NT is needed for assertion below. */
1953 rtx nt = current_sched_info->next_tail;
1955 note = NEXT_INSN (insn);
1956 while (NOTE_NOT_BB_P (note) && note != nt)
1957 note = NEXT_INSN (note);
1959 if (note != nt
1960 && (LABEL_P (note)
1961 || BARRIER_P (note)))
1962 note = NEXT_INSN (note);
1964 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
1966 else
1967 note = insn;
1969 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
1970 PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
1972 NEXT_INSN (note) = NEXT_INSN (last);
1973 PREV_INSN (NEXT_INSN (last)) = note;
1975 NEXT_INSN (last) = insn;
1976 PREV_INSN (insn) = last;
1978 bb = BLOCK_FOR_INSN (last);
1980 if (jump_p)
1982 fix_jump_move (insn);
1984 if (BLOCK_FOR_INSN (insn) != bb)
1985 move_block_after_check (insn);
1987 gcc_assert (BB_END (bb) == last);
1990 set_block_for_insn (insn, bb);
1992 /* Update BB_END, if needed. */
1993 if (BB_END (bb) == last)
1994 BB_END (bb) = insn;
1997 reemit_notes (insn);
1999 SCHED_GROUP_P (insn) = 0;
2002 /* The following structure describe an entry of the stack of choices. */
2003 struct choice_entry
2005 /* Ordinal number of the issued insn in the ready queue. */
2006 int index;
2007 /* The number of the rest insns whose issues we should try. */
2008 int rest;
2009 /* The number of issued essential insns. */
2010 int n;
2011 /* State after issuing the insn. */
2012 state_t state;
2015 /* The following array is used to implement a stack of choices used in
2016 function max_issue. */
2017 static struct choice_entry *choice_stack;
2019 /* The following variable value is number of essential insns issued on
2020 the current cycle. An insn is essential one if it changes the
2021 processors state. */
2022 static int cycle_issued_insns;
2024 /* The following variable value is maximal number of tries of issuing
2025 insns for the first cycle multipass insn scheduling. We define
2026 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2027 need this constraint if all real insns (with non-negative codes)
2028 had reservations because in this case the algorithm complexity is
2029 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2030 might be incomplete and such insn might occur. For such
2031 descriptions, the complexity of algorithm (without the constraint)
2032 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2033 static int max_lookahead_tries;
2035 /* The following value is value of hook
2036 `first_cycle_multipass_dfa_lookahead' at the last call of
2037 `max_issue'. */
2038 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2040 /* The following value is value of `issue_rate' at the last call of
2041 `sched_init'. */
2042 static int cached_issue_rate = 0;
2044 /* The following function returns maximal (or close to maximal) number
2045 of insns which can be issued on the same cycle and one of which
2046 insns is insns with the best rank (the first insn in READY). To
2047 make this function tries different samples of ready insns. READY
2048 is current queue `ready'. Global array READY_TRY reflects what
2049 insns are already issued in this try. MAX_POINTS is the sum of points
2050 of all instructions in READY. The function stops immediately,
2051 if it reached the such a solution, that all instruction can be issued.
2052 INDEX will contain index of the best insn in READY. The following
2053 function is used only for first cycle multipass scheduling. */
2054 static int
2055 max_issue (struct ready_list *ready, int *index, int max_points)
2057 int n, i, all, n_ready, best, delay, tries_num, points = -1;
2058 struct choice_entry *top;
2059 rtx insn;
2061 best = 0;
2062 memcpy (choice_stack->state, curr_state, dfa_state_size);
2063 top = choice_stack;
2064 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2065 top->n = 0;
2066 n_ready = ready->n_ready;
2067 for (all = i = 0; i < n_ready; i++)
2068 if (!ready_try [i])
2069 all++;
2070 i = 0;
2071 tries_num = 0;
2072 for (;;)
2074 if (top->rest == 0 || i >= n_ready)
2076 if (top == choice_stack)
2077 break;
2078 if (best < top - choice_stack && ready_try [0])
2080 best = top - choice_stack;
2081 *index = choice_stack [1].index;
2082 points = top->n;
2083 if (top->n == max_points || best == all)
2084 break;
2086 i = top->index;
2087 ready_try [i] = 0;
2088 top--;
2089 memcpy (curr_state, top->state, dfa_state_size);
2091 else if (!ready_try [i])
2093 tries_num++;
2094 if (tries_num > max_lookahead_tries)
2095 break;
2096 insn = ready_element (ready, i);
2097 delay = state_transition (curr_state, insn);
2098 if (delay < 0)
2100 if (state_dead_lock_p (curr_state))
2101 top->rest = 0;
2102 else
2103 top->rest--;
2104 n = top->n;
2105 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2106 n += ISSUE_POINTS (insn);
2107 top++;
2108 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2109 top->index = i;
2110 top->n = n;
2111 memcpy (top->state, curr_state, dfa_state_size);
2112 ready_try [i] = 1;
2113 i = -1;
2116 i++;
2118 while (top != choice_stack)
2120 ready_try [top->index] = 0;
2121 top--;
2123 memcpy (curr_state, choice_stack->state, dfa_state_size);
2125 if (sched_verbose >= 4)
2126 fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
2127 (*current_sched_info->print_insn) (ready_element (ready, *index),
2128 0),
2129 points, max_points);
2131 return best;
2134 /* The following function chooses insn from READY and modifies
2135 *N_READY and READY. The following function is used only for first
2136 cycle multipass scheduling. */
2138 static rtx
2139 choose_ready (struct ready_list *ready)
2141 int lookahead = 0;
2143 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2144 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2145 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2146 return ready_remove_first (ready);
2147 else
2149 /* Try to choose the better insn. */
2150 int index = 0, i, n;
2151 rtx insn;
2152 int more_issue, max_points, try_data = 1, try_control = 1;
2154 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2156 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2157 max_lookahead_tries = 100;
2158 for (i = 0; i < issue_rate; i++)
2159 max_lookahead_tries *= lookahead;
2161 insn = ready_element (ready, 0);
2162 if (INSN_CODE (insn) < 0)
2163 return ready_remove_first (ready);
2165 if (spec_info
2166 && spec_info->flags & (PREFER_NON_DATA_SPEC
2167 | PREFER_NON_CONTROL_SPEC))
2169 for (i = 0, n = ready->n_ready; i < n; i++)
2171 rtx x;
2172 ds_t s;
2174 x = ready_element (ready, i);
2175 s = TODO_SPEC (x);
2177 if (spec_info->flags & PREFER_NON_DATA_SPEC
2178 && !(s & DATA_SPEC))
2180 try_data = 0;
2181 if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
2182 || !try_control)
2183 break;
2186 if (spec_info->flags & PREFER_NON_CONTROL_SPEC
2187 && !(s & CONTROL_SPEC))
2189 try_control = 0;
2190 if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
2191 break;
2196 if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2197 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2198 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2199 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
2200 (insn)))
2201 /* Discard speculative instruction that stands first in the ready
2202 list. */
2204 change_queue_index (insn, 1);
2205 return 0;
2208 max_points = ISSUE_POINTS (insn);
2209 more_issue = issue_rate - cycle_issued_insns - 1;
2211 for (i = 1; i < ready->n_ready; i++)
2213 insn = ready_element (ready, i);
2214 ready_try [i]
2215 = (INSN_CODE (insn) < 0
2216 || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
2217 || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
2218 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2219 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2220 (insn)));
2222 if (!ready_try [i] && more_issue-- > 0)
2223 max_points += ISSUE_POINTS (insn);
2226 if (max_issue (ready, &index, max_points) == 0)
2227 return ready_remove_first (ready);
2228 else
2229 return ready_remove (ready, index);
2233 /* Use forward list scheduling to rearrange insns of block pointed to by
2234 TARGET_BB, possibly bringing insns from subsequent blocks in the same
2235 region. */
2237 void
2238 schedule_block (basic_block *target_bb, int rgn_n_insns1)
2240 struct ready_list ready;
2241 int i, first_cycle_insn_p;
2242 int can_issue_more;
2243 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2244 int sort_p, advance, start_clock_var;
2246 /* Head/tail info for this block. */
2247 rtx prev_head = current_sched_info->prev_head;
2248 rtx next_tail = current_sched_info->next_tail;
2249 rtx head = NEXT_INSN (prev_head);
2250 rtx tail = PREV_INSN (next_tail);
2252 /* We used to have code to avoid getting parameters moved from hard
2253 argument registers into pseudos.
2255 However, it was removed when it proved to be of marginal benefit
2256 and caused problems because schedule_block and compute_forward_dependences
2257 had different notions of what the "head" insn was. */
2259 gcc_assert (head != tail || INSN_P (head));
2261 added_recovery_block_p = false;
2263 /* Debug info. */
2264 if (sched_verbose)
2265 dump_new_block_header (0, *target_bb, head, tail);
2267 state_reset (curr_state);
2269 /* Allocate the ready list. */
2270 readyp = &ready;
2271 ready.vec = NULL;
2272 ready_try = NULL;
2273 choice_stack = NULL;
2275 rgn_n_insns = -1;
2276 extend_ready (rgn_n_insns1 + 1);
2278 ready.first = ready.veclen - 1;
2279 ready.n_ready = 0;
2281 /* It is used for first cycle multipass scheduling. */
2282 temp_state = alloca (dfa_state_size);
2284 if (targetm.sched.md_init)
2285 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2287 /* We start inserting insns after PREV_HEAD. */
2288 last_scheduled_insn = prev_head;
2290 gcc_assert (NOTE_P (last_scheduled_insn)
2291 && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
2293 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2294 queue. */
2295 q_ptr = 0;
2296 q_size = 0;
2298 insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
2299 memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
2301 /* Start just before the beginning of time. */
2302 clock_var = -1;
2304 /* We need queue and ready lists and clock_var be initialized
2305 in try_ready () (which is called through init_ready_list ()). */
2306 (*current_sched_info->init_ready_list) ();
2308 /* The algorithm is O(n^2) in the number of ready insns at any given
2309 time in the worst case. Before reload we are more likely to have
2310 big lists so truncate them to a reasonable size. */
2311 if (!reload_completed && ready.n_ready > MAX_SCHED_READY_INSNS)
2313 ready_sort (&ready);
2315 /* Find first free-standing insn past MAX_SCHED_READY_INSNS. */
2316 for (i = MAX_SCHED_READY_INSNS; i < ready.n_ready; i++)
2317 if (!SCHED_GROUP_P (ready_element (&ready, i)))
2318 break;
2320 if (sched_verbose >= 2)
2322 fprintf (sched_dump,
2323 ";;\t\tReady list on entry: %d insns\n", ready.n_ready);
2324 fprintf (sched_dump,
2325 ";;\t\t before reload => truncated to %d insns\n", i);
2328 /* Delay all insns past it for 1 cycle. */
2329 while (i < ready.n_ready)
2330 queue_insn (ready_remove (&ready, i), 1);
2333 /* Now we can restore basic block notes and maintain precise cfg. */
2334 restore_bb_notes (*target_bb);
2336 last_clock_var = -1;
2338 advance = 0;
2340 sort_p = TRUE;
2341 /* Loop until all the insns in BB are scheduled. */
2342 while ((*current_sched_info->schedule_more_p) ())
2346 start_clock_var = clock_var;
2348 clock_var++;
2350 advance_one_cycle ();
2352 /* Add to the ready list all pending insns that can be issued now.
2353 If there are no ready insns, increment clock until one
2354 is ready and add all pending insns at that point to the ready
2355 list. */
2356 queue_to_ready (&ready);
2358 gcc_assert (ready.n_ready);
2360 if (sched_verbose >= 2)
2362 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2363 debug_ready_list (&ready);
2365 advance -= clock_var - start_clock_var;
2367 while (advance > 0);
2369 if (sort_p)
2371 /* Sort the ready list based on priority. */
2372 ready_sort (&ready);
2374 if (sched_verbose >= 2)
2376 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2377 debug_ready_list (&ready);
2381 /* Allow the target to reorder the list, typically for
2382 better instruction bundling. */
2383 if (sort_p && targetm.sched.reorder
2384 && (ready.n_ready == 0
2385 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2386 can_issue_more =
2387 targetm.sched.reorder (sched_dump, sched_verbose,
2388 ready_lastpos (&ready),
2389 &ready.n_ready, clock_var);
2390 else
2391 can_issue_more = issue_rate;
2393 first_cycle_insn_p = 1;
2394 cycle_issued_insns = 0;
2395 for (;;)
2397 rtx insn;
2398 int cost;
2399 bool asm_p = false;
2401 if (sched_verbose >= 2)
2403 fprintf (sched_dump, ";;\tReady list (t = %3d): ",
2404 clock_var);
2405 debug_ready_list (&ready);
2408 if (ready.n_ready == 0
2409 && can_issue_more
2410 && reload_completed)
2412 /* Allow scheduling insns directly from the queue in case
2413 there's nothing better to do (ready list is empty) but
2414 there are still vacant dispatch slots in the current cycle. */
2415 if (sched_verbose >= 6)
2416 fprintf(sched_dump,";;\t\tSecond chance\n");
2417 memcpy (temp_state, curr_state, dfa_state_size);
2418 if (early_queue_to_ready (temp_state, &ready))
2419 ready_sort (&ready);
2422 if (ready.n_ready == 0 || !can_issue_more
2423 || state_dead_lock_p (curr_state)
2424 || !(*current_sched_info->schedule_more_p) ())
2425 break;
2427 /* Select and remove the insn from the ready list. */
2428 if (sort_p)
2430 insn = choose_ready (&ready);
2431 if (!insn)
2432 continue;
2434 else
2435 insn = ready_remove_first (&ready);
2437 if (targetm.sched.dfa_new_cycle
2438 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2439 insn, last_clock_var,
2440 clock_var, &sort_p))
2441 /* SORT_P is used by the target to override sorting
2442 of the ready list. This is needed when the target
2443 has modified its internal structures expecting that
2444 the insn will be issued next. As we need the insn
2445 to have the highest priority (so it will be returned by
2446 the ready_remove_first call above), we invoke
2447 ready_add (&ready, insn, true).
2448 But, still, there is one issue: INSN can be later
2449 discarded by scheduler's front end through
2450 current_sched_info->can_schedule_ready_p, hence, won't
2451 be issued next. */
2453 ready_add (&ready, insn, true);
2454 break;
2457 sort_p = TRUE;
2458 memcpy (temp_state, curr_state, dfa_state_size);
2459 if (recog_memoized (insn) < 0)
2461 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2462 || asm_noperands (PATTERN (insn)) >= 0);
2463 if (!first_cycle_insn_p && asm_p)
2464 /* This is asm insn which is tryed to be issued on the
2465 cycle not first. Issue it on the next cycle. */
2466 cost = 1;
2467 else
2468 /* A USE insn, or something else we don't need to
2469 understand. We can't pass these directly to
2470 state_transition because it will trigger a
2471 fatal error for unrecognizable insns. */
2472 cost = 0;
2474 else
2476 cost = state_transition (temp_state, insn);
2477 if (cost < 0)
2478 cost = 0;
2479 else if (cost == 0)
2480 cost = 1;
2483 if (cost >= 1)
2485 queue_insn (insn, cost);
2486 if (SCHED_GROUP_P (insn))
2488 advance = cost;
2489 break;
2492 continue;
2495 if (current_sched_info->can_schedule_ready_p
2496 && ! (*current_sched_info->can_schedule_ready_p) (insn))
2497 /* We normally get here only if we don't want to move
2498 insn from the split block. */
2500 TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
2501 continue;
2504 /* DECISION is made. */
2506 if (TODO_SPEC (insn) & SPECULATIVE)
2507 generate_recovery_code (insn);
2509 if (control_flow_insn_p (last_scheduled_insn)
2510 /* This is used to to switch basic blocks by request
2511 from scheduler front-end (actually, sched-ebb.c only).
2512 This is used to process blocks with single fallthru
2513 edge. If succeeding block has jump, it [jump] will try
2514 move at the end of current bb, thus corrupting CFG. */
2515 || current_sched_info->advance_target_bb (*target_bb, insn))
2517 *target_bb = current_sched_info->advance_target_bb
2518 (*target_bb, 0);
2520 if (sched_verbose)
2522 rtx x;
2524 x = next_real_insn (last_scheduled_insn);
2525 gcc_assert (x);
2526 dump_new_block_header (1, *target_bb, x, tail);
2529 last_scheduled_insn = bb_note (*target_bb);
2532 /* Update counters, etc in the scheduler's front end. */
2533 (*current_sched_info->begin_schedule_ready) (insn,
2534 last_scheduled_insn);
2536 move_insn (insn);
2537 last_scheduled_insn = insn;
2539 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2541 cycle_issued_insns++;
2542 memcpy (curr_state, temp_state, dfa_state_size);
2545 if (targetm.sched.variable_issue)
2546 can_issue_more =
2547 targetm.sched.variable_issue (sched_dump, sched_verbose,
2548 insn, can_issue_more);
2549 /* A naked CLOBBER or USE generates no instruction, so do
2550 not count them against the issue rate. */
2551 else if (GET_CODE (PATTERN (insn)) != USE
2552 && GET_CODE (PATTERN (insn)) != CLOBBER)
2553 can_issue_more--;
2555 advance = schedule_insn (insn);
2557 /* After issuing an asm insn we should start a new cycle. */
2558 if (advance == 0 && asm_p)
2559 advance = 1;
2560 if (advance != 0)
2561 break;
2563 first_cycle_insn_p = 0;
2565 /* Sort the ready list based on priority. This must be
2566 redone here, as schedule_insn may have readied additional
2567 insns that will not be sorted correctly. */
2568 if (ready.n_ready > 0)
2569 ready_sort (&ready);
2571 if (targetm.sched.reorder2
2572 && (ready.n_ready == 0
2573 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2575 can_issue_more =
2576 targetm.sched.reorder2 (sched_dump, sched_verbose,
2577 ready.n_ready
2578 ? ready_lastpos (&ready) : NULL,
2579 &ready.n_ready, clock_var);
2584 /* Debug info. */
2585 if (sched_verbose)
2587 fprintf (sched_dump, ";;\tReady list (final): ");
2588 debug_ready_list (&ready);
2591 if (current_sched_info->queue_must_finish_empty)
2592 /* Sanity check -- queue must be empty now. Meaningless if region has
2593 multiple bbs. */
2594 gcc_assert (!q_size && !ready.n_ready);
2595 else
2597 /* We must maintain QUEUE_INDEX between blocks in region. */
2598 for (i = ready.n_ready - 1; i >= 0; i--)
2600 rtx x;
2602 x = ready_element (&ready, i);
2603 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2604 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2607 if (q_size)
2608 for (i = 0; i <= max_insn_queue_index; i++)
2610 rtx link;
2611 for (link = insn_queue[i]; link; link = XEXP (link, 1))
2613 rtx x;
2615 x = XEXP (link, 0);
2616 QUEUE_INDEX (x) = QUEUE_NOWHERE;
2617 TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
2619 free_INSN_LIST_list (&insn_queue[i]);
2623 if (!current_sched_info->queue_must_finish_empty
2624 || added_recovery_block_p)
2626 /* INSN_TICK (minimum clock tick at which the insn becomes
2627 ready) may be not correct for the insn in the subsequent
2628 blocks of the region. We should use a correct value of
2629 `clock_var' or modify INSN_TICK. It is better to keep
2630 clock_var value equal to 0 at the start of a basic block.
2631 Therefore we modify INSN_TICK here. */
2632 fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
2635 #ifdef ENABLE_CHECKING
2636 /* After the reload the ia64 backend doesn't maintain BB_END, so
2637 if we want to check anything, better do it now.
2638 And it already clobbered previously scheduled code. */
2639 if (reload_completed)
2640 check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
2641 #endif
2643 if (targetm.sched.md_finish)
2644 targetm.sched.md_finish (sched_dump, sched_verbose);
2646 /* Update head/tail boundaries. */
2647 head = NEXT_INSN (prev_head);
2648 tail = last_scheduled_insn;
2650 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2651 previously found among the insns. Insert them at the beginning
2652 of the insns. */
2653 if (note_list != 0)
2655 basic_block head_bb = BLOCK_FOR_INSN (head);
2656 rtx note_head = note_list;
2658 while (PREV_INSN (note_head))
2660 set_block_for_insn (note_head, head_bb);
2661 note_head = PREV_INSN (note_head);
2663 /* In the above cycle we've missed this note: */
2664 set_block_for_insn (note_head, head_bb);
2666 PREV_INSN (note_head) = PREV_INSN (head);
2667 NEXT_INSN (PREV_INSN (head)) = note_head;
2668 PREV_INSN (head) = note_list;
2669 NEXT_INSN (note_list) = head;
2670 head = note_head;
2673 /* Debugging. */
2674 if (sched_verbose)
2676 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2677 clock_var, INSN_UID (head));
2678 fprintf (sched_dump, ";; new tail = %d\n\n",
2679 INSN_UID (tail));
2682 current_sched_info->head = head;
2683 current_sched_info->tail = tail;
2685 free (ready.vec);
2687 free (ready_try);
2688 for (i = 0; i <= rgn_n_insns; i++)
2689 free (choice_stack [i].state);
2690 free (choice_stack);
2693 /* Set_priorities: compute priority of each insn in the block. */
2696 set_priorities (rtx head, rtx tail)
2698 rtx insn;
2699 int n_insn;
2700 int sched_max_insns_priority =
2701 current_sched_info->sched_max_insns_priority;
2702 rtx prev_head;
2704 if (head == tail && (! INSN_P (head)))
2705 return 0;
2707 n_insn = 0;
2709 prev_head = PREV_INSN (head);
2710 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2712 if (!INSN_P (insn))
2713 continue;
2715 n_insn++;
2716 (void) priority (insn);
2718 if (INSN_PRIORITY_KNOWN (insn))
2719 sched_max_insns_priority =
2720 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2723 current_sched_info->sched_max_insns_priority = sched_max_insns_priority;
2725 return n_insn;
2728 /* Next LUID to assign to an instruction. */
2729 static int luid;
2731 /* Initialize some global state for the scheduler. */
2733 void
2734 sched_init (void)
2736 basic_block b;
2737 rtx insn;
2738 int i;
2740 /* Switch to working copy of sched_info. */
2741 memcpy (&current_sched_info_var, current_sched_info,
2742 sizeof (current_sched_info_var));
2743 current_sched_info = &current_sched_info_var;
2745 /* Disable speculative loads in their presence if cc0 defined. */
2746 #ifdef HAVE_cc0
2747 flag_schedule_speculative_load = 0;
2748 #endif
2750 /* Set dump and sched_verbose for the desired debugging output. If no
2751 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2752 For -fsched-verbose=N, N>=10, print everything to stderr. */
2753 sched_verbose = sched_verbose_param;
2754 if (sched_verbose_param == 0 && dump_file)
2755 sched_verbose = 1;
2756 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2757 ? stderr : dump_file);
2759 /* Initialize SPEC_INFO. */
2760 if (targetm.sched.set_sched_flags)
2762 spec_info = &spec_info_var;
2763 targetm.sched.set_sched_flags (spec_info);
2764 if (current_sched_info->flags & DO_SPECULATION)
2765 spec_info->weakness_cutoff =
2766 (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
2767 else
2768 /* So we won't read anything accidentally. */
2769 spec_info = 0;
2770 #ifdef ENABLE_CHECKING
2771 check_sched_flags ();
2772 #endif
2774 else
2775 /* So we won't read anything accidentally. */
2776 spec_info = 0;
2778 /* Initialize issue_rate. */
2779 if (targetm.sched.issue_rate)
2780 issue_rate = targetm.sched.issue_rate ();
2781 else
2782 issue_rate = 1;
2784 if (cached_issue_rate != issue_rate)
2786 cached_issue_rate = issue_rate;
2787 /* To invalidate max_lookahead_tries: */
2788 cached_first_cycle_multipass_dfa_lookahead = 0;
2791 old_max_uid = 0;
2792 h_i_d = 0;
2793 extend_h_i_d ();
2795 for (i = 0; i < old_max_uid; i++)
2797 h_i_d[i].cost = -1;
2798 h_i_d[i].todo_spec = HARD_DEP;
2799 h_i_d[i].queue_index = QUEUE_NOWHERE;
2800 h_i_d[i].tick = INVALID_TICK;
2801 h_i_d[i].inter_tick = INVALID_TICK;
2804 if (targetm.sched.init_dfa_pre_cycle_insn)
2805 targetm.sched.init_dfa_pre_cycle_insn ();
2807 if (targetm.sched.init_dfa_post_cycle_insn)
2808 targetm.sched.init_dfa_post_cycle_insn ();
2810 dfa_start ();
2811 dfa_state_size = state_size ();
2812 curr_state = xmalloc (dfa_state_size);
2814 h_i_d[0].luid = 0;
2815 luid = 1;
2816 FOR_EACH_BB (b)
2817 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2819 INSN_LUID (insn) = luid;
2821 /* Increment the next luid, unless this is a note. We don't
2822 really need separate IDs for notes and we don't want to
2823 schedule differently depending on whether or not there are
2824 line-number notes, i.e., depending on whether or not we're
2825 generating debugging information. */
2826 if (!NOTE_P (insn))
2827 ++luid;
2829 if (insn == BB_END (b))
2830 break;
2833 init_dependency_caches (luid);
2835 init_alias_analysis ();
2837 line_note_head = 0;
2838 old_last_basic_block = 0;
2839 glat_start = 0;
2840 glat_end = 0;
2841 extend_bb (0);
2843 if (current_sched_info->flags & USE_GLAT)
2844 init_glat ();
2846 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2847 removing death notes. */
2848 FOR_EACH_BB_REVERSE (b)
2849 find_insn_reg_weight (b);
2851 if (targetm.sched.md_init_global)
2852 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2854 nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
2855 before_recovery = 0;
2857 #ifdef ENABLE_CHECKING
2858 /* This is used preferably for finding bugs in check_cfg () itself. */
2859 check_cfg (0, 0);
2860 #endif
2863 /* Free global data used during insn scheduling. */
2865 void
2866 sched_finish (void)
2868 free (h_i_d);
2869 free (curr_state);
2870 dfa_finish ();
2871 free_dependency_caches ();
2872 end_alias_analysis ();
2873 free (line_note_head);
2874 free_glat ();
2876 if (targetm.sched.md_finish_global)
2877 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2879 if (spec_info && spec_info->dump)
2881 char c = reload_completed ? 'a' : 'b';
2883 fprintf (spec_info->dump,
2884 ";; %s:\n", current_function_name ());
2886 fprintf (spec_info->dump,
2887 ";; Procedure %cr-begin-data-spec motions == %d\n",
2888 c, nr_begin_data);
2889 fprintf (spec_info->dump,
2890 ";; Procedure %cr-be-in-data-spec motions == %d\n",
2891 c, nr_be_in_data);
2892 fprintf (spec_info->dump,
2893 ";; Procedure %cr-begin-control-spec motions == %d\n",
2894 c, nr_begin_control);
2895 fprintf (spec_info->dump,
2896 ";; Procedure %cr-be-in-control-spec motions == %d\n",
2897 c, nr_be_in_control);
2900 #ifdef ENABLE_CHECKING
2901 /* After reload ia64 backend clobbers CFG, so can't check anything. */
2902 if (!reload_completed)
2903 check_cfg (0, 0);
2904 #endif
2906 current_sched_info = NULL;
2909 /* Fix INSN_TICKs of the instructions in the current block as well as
2910 INSN_TICKs of their dependents.
2911 HEAD and TAIL are the begin and the end of the current scheduled block. */
2912 static void
2913 fix_inter_tick (rtx head, rtx tail)
2915 /* Set of instructions with corrected INSN_TICK. */
2916 bitmap_head processed;
2917 int next_clock = clock_var + 1;
2919 bitmap_initialize (&processed, 0);
2921 /* Iterates over scheduled instructions and fix their INSN_TICKs and
2922 INSN_TICKs of dependent instructions, so that INSN_TICKs are consistent
2923 across different blocks. */
2924 for (tail = NEXT_INSN (tail); head != tail; head = NEXT_INSN (head))
2926 if (INSN_P (head))
2928 int tick;
2929 rtx link;
2931 tick = INSN_TICK (head);
2932 gcc_assert (tick >= MIN_TICK);
2934 /* Fix INSN_TICK of instruction from just scheduled block. */
2935 if (!bitmap_bit_p (&processed, INSN_LUID (head)))
2937 bitmap_set_bit (&processed, INSN_LUID (head));
2938 tick -= next_clock;
2940 if (tick < MIN_TICK)
2941 tick = MIN_TICK;
2943 INSN_TICK (head) = tick;
2946 for (link = INSN_DEPEND (head); link; link = XEXP (link, 1))
2948 rtx next;
2950 next = XEXP (link, 0);
2951 tick = INSN_TICK (next);
2953 if (tick != INVALID_TICK
2954 /* If NEXT has its INSN_TICK calculated, fix it.
2955 If not - it will be properly calculated from
2956 scratch later in fix_tick_ready. */
2957 && !bitmap_bit_p (&processed, INSN_LUID (next)))
2959 bitmap_set_bit (&processed, INSN_LUID (next));
2960 tick -= next_clock;
2962 if (tick < MIN_TICK)
2963 tick = MIN_TICK;
2965 if (tick > INTER_TICK (next))
2966 INTER_TICK (next) = tick;
2967 else
2968 tick = INTER_TICK (next);
2970 INSN_TICK (next) = tick;
2975 bitmap_clear (&processed);
2978 /* Check if NEXT is ready to be added to the ready or queue list.
2979 If "yes", add it to the proper list.
2980 Returns:
2981 -1 - is not ready yet,
2982 0 - added to the ready list,
2983 0 < N - queued for N cycles. */
2985 try_ready (rtx next)
2987 ds_t old_ts, *ts;
2988 rtx link;
2990 ts = &TODO_SPEC (next);
2991 old_ts = *ts;
2993 gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
2994 && ((old_ts & HARD_DEP)
2995 || (old_ts & SPECULATIVE)));
2997 if (!(current_sched_info->flags & DO_SPECULATION))
2999 if (!LOG_LINKS (next))
3000 *ts &= ~HARD_DEP;
3002 else
3004 *ts &= ~SPECULATIVE & ~HARD_DEP;
3006 link = LOG_LINKS (next);
3007 if (link)
3009 /* LOG_LINKS are maintained sorted.
3010 So if DEP_STATUS of the first dep is SPECULATIVE,
3011 than all other deps are speculative too. */
3012 if (DEP_STATUS (link) & SPECULATIVE)
3014 /* Now we've got NEXT with speculative deps only.
3015 1. Look at the deps to see what we have to do.
3016 2. Check if we can do 'todo'. */
3017 *ts = DEP_STATUS (link) & SPECULATIVE;
3018 while ((link = XEXP (link, 1)))
3019 *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
3021 if (dep_weak (*ts) < spec_info->weakness_cutoff)
3022 /* Too few points. */
3023 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3025 else
3026 *ts |= HARD_DEP;
3030 if (*ts & HARD_DEP)
3031 gcc_assert (*ts == old_ts
3032 && QUEUE_INDEX (next) == QUEUE_NOWHERE);
3033 else if (current_sched_info->new_ready)
3034 *ts = current_sched_info->new_ready (next, *ts);
3036 /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
3037 have its original pattern or changed (speculative) one. This is due
3038 to changing ebb in region scheduling.
3039 * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
3040 has speculative pattern.
3042 We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
3043 control-speculative NEXT could have been discarded by sched-rgn.c
3044 (the same case as when discarded by can_schedule_ready_p ()). */
3046 if ((*ts & SPECULATIVE)
3047 /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
3048 need to change anything. */
3049 && *ts != old_ts)
3051 int res;
3052 rtx new_pat;
3054 gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
3056 res = speculate_insn (next, *ts, &new_pat);
3058 switch (res)
3060 case -1:
3061 /* It would be nice to change DEP_STATUS of all dependences,
3062 which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
3063 so we won't reanalyze anything. */
3064 *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
3065 break;
3067 case 0:
3068 /* We follow the rule, that every speculative insn
3069 has non-null ORIG_PAT. */
3070 if (!ORIG_PAT (next))
3071 ORIG_PAT (next) = PATTERN (next);
3072 break;
3074 case 1:
3075 if (!ORIG_PAT (next))
3076 /* If we gonna to overwrite the original pattern of insn,
3077 save it. */
3078 ORIG_PAT (next) = PATTERN (next);
3080 change_pattern (next, new_pat);
3081 break;
3083 default:
3084 gcc_unreachable ();
3088 /* We need to restore pattern only if (*ts == 0), because otherwise it is
3089 either correct (*ts & SPECULATIVE),
3090 or we simply don't care (*ts & HARD_DEP). */
3092 gcc_assert (!ORIG_PAT (next)
3093 || !RECOVERY_BLOCK (next)
3094 || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
3096 if (*ts & HARD_DEP)
3098 /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
3099 control-speculative NEXT could have been discarded by sched-rgn.c
3100 (the same case as when discarded by can_schedule_ready_p ()). */
3101 /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
3103 change_queue_index (next, QUEUE_NOWHERE);
3104 return -1;
3106 else if (!(*ts & BEGIN_SPEC) && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
3107 /* We should change pattern of every previously speculative
3108 instruction - and we determine if NEXT was speculative by using
3109 ORIG_PAT field. Except one case - simple checks have ORIG_PAT
3110 pat too, hence we also check for the RECOVERY_BLOCK. */
3112 change_pattern (next, ORIG_PAT (next));
3113 ORIG_PAT (next) = 0;
3116 if (sched_verbose >= 2)
3118 int s = TODO_SPEC (next);
3120 fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
3121 (*current_sched_info->print_insn) (next, 0));
3123 if (spec_info && spec_info->dump)
3125 if (s & BEGIN_DATA)
3126 fprintf (spec_info->dump, "; data-spec;");
3127 if (s & BEGIN_CONTROL)
3128 fprintf (spec_info->dump, "; control-spec;");
3129 if (s & BE_IN_CONTROL)
3130 fprintf (spec_info->dump, "; in-control-spec;");
3133 fprintf (sched_dump, "\n");
3136 adjust_priority (next);
3138 return fix_tick_ready (next);
3141 /* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
3142 static int
3143 fix_tick_ready (rtx next)
3145 rtx link;
3146 int tick, delay;
3148 link = RESOLVED_DEPS (next);
3150 if (link)
3152 int full_p;
3154 tick = INSN_TICK (next);
3155 /* if tick is not equal to INVALID_TICK, then update
3156 INSN_TICK of NEXT with the most recent resolved dependence
3157 cost. Otherwise, recalculate from scratch. */
3158 full_p = tick == INVALID_TICK;
3161 rtx pro;
3162 int tick1;
3164 pro = XEXP (link, 0);
3165 gcc_assert (INSN_TICK (pro) >= MIN_TICK);
3167 tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
3168 if (tick1 > tick)
3169 tick = tick1;
3171 while ((link = XEXP (link, 1)) && full_p);
3173 else
3174 tick = -1;
3176 INSN_TICK (next) = tick;
3178 delay = tick - clock_var;
3179 if (delay <= 0)
3180 delay = QUEUE_READY;
3182 change_queue_index (next, delay);
3184 return delay;
3187 /* Move NEXT to the proper queue list with (DELAY >= 1),
3188 or add it to the ready list (DELAY == QUEUE_READY),
3189 or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
3190 static void
3191 change_queue_index (rtx next, int delay)
3193 int i = QUEUE_INDEX (next);
3195 gcc_assert (QUEUE_NOWHERE <= delay && delay <= max_insn_queue_index
3196 && delay != 0);
3197 gcc_assert (i != QUEUE_SCHEDULED);
3199 if ((delay > 0 && NEXT_Q_AFTER (q_ptr, delay) == i)
3200 || (delay < 0 && delay == i))
3201 /* We have nothing to do. */
3202 return;
3204 /* Remove NEXT from wherever it is now. */
3205 if (i == QUEUE_READY)
3206 ready_remove_insn (next);
3207 else if (i >= 0)
3208 queue_remove (next);
3210 /* Add it to the proper place. */
3211 if (delay == QUEUE_READY)
3212 ready_add (readyp, next, false);
3213 else if (delay >= 1)
3214 queue_insn (next, delay);
3216 if (sched_verbose >= 2)
3218 fprintf (sched_dump, ";;\t\ttick updated: insn %s",
3219 (*current_sched_info->print_insn) (next, 0));
3221 if (delay == QUEUE_READY)
3222 fprintf (sched_dump, " into ready\n");
3223 else if (delay >= 1)
3224 fprintf (sched_dump, " into queue with cost=%d\n", delay);
3225 else
3226 fprintf (sched_dump, " removed from ready or queue lists\n");
3230 /* INSN is being scheduled. Resolve the dependence between INSN and NEXT. */
3231 static void
3232 resolve_dep (rtx next, rtx insn)
3234 rtx dep;
3236 INSN_DEP_COUNT (next)--;
3238 dep = remove_list_elem (insn, &LOG_LINKS (next));
3239 XEXP (dep, 1) = RESOLVED_DEPS (next);
3240 RESOLVED_DEPS (next) = dep;
3242 gcc_assert ((INSN_DEP_COUNT (next) != 0 || !LOG_LINKS (next))
3243 && (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
3246 /* Extend H_I_D data. */
3247 static void
3248 extend_h_i_d (void)
3250 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
3251 pseudos which do not cross calls. */
3252 int new_max_uid = get_max_uid() + 1;
3254 h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
3255 old_max_uid = new_max_uid;
3257 if (targetm.sched.h_i_d_extended)
3258 targetm.sched.h_i_d_extended ();
3261 /* Extend READY, READY_TRY and CHOICE_STACK arrays.
3262 N_NEW_INSNS is the number of additional elements to allocate. */
3263 static void
3264 extend_ready (int n_new_insns)
3266 int i;
3268 readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
3269 readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
3271 ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
3272 rgn_n_insns + 1, sizeof (char));
3274 rgn_n_insns += n_new_insns;
3276 choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
3277 rgn_n_insns + 1);
3279 for (i = rgn_n_insns; n_new_insns--; i--)
3280 choice_stack[i].state = xmalloc (dfa_state_size);
3283 /* Extend global scheduler structures (those, that live across calls to
3284 schedule_block) to include information about just emitted INSN. */
3285 static void
3286 extend_global (rtx insn)
3288 gcc_assert (INSN_P (insn));
3289 /* These structures have scheduler scope. */
3290 extend_h_i_d ();
3291 init_h_i_d (insn);
3293 extend_dependency_caches (1, 0);
3296 /* Extends global and local scheduler structures to include information
3297 about just emitted INSN. */
3298 static void
3299 extend_all (rtx insn)
3301 extend_global (insn);
3303 /* These structures have block scope. */
3304 extend_ready (1);
3306 (*current_sched_info->add_remove_insn) (insn, 0);
3309 /* Initialize h_i_d entry of the new INSN with default values.
3310 Values, that are not explicitly initialized here, hold zero. */
3311 static void
3312 init_h_i_d (rtx insn)
3314 INSN_LUID (insn) = luid++;
3315 INSN_COST (insn) = -1;
3316 TODO_SPEC (insn) = HARD_DEP;
3317 QUEUE_INDEX (insn) = QUEUE_NOWHERE;
3318 INSN_TICK (insn) = INVALID_TICK;
3319 INTER_TICK (insn) = INVALID_TICK;
3320 find_insn_reg_weight1 (insn);
3323 /* Generates recovery code for INSN. */
3324 static void
3325 generate_recovery_code (rtx insn)
3327 if (TODO_SPEC (insn) & BEGIN_SPEC)
3328 begin_speculative_block (insn);
3330 /* Here we have insn with no dependencies to
3331 instructions other then CHECK_SPEC ones. */
3333 if (TODO_SPEC (insn) & BE_IN_SPEC)
3334 add_to_speculative_block (insn);
3337 /* Helper function.
3338 Tries to add speculative dependencies of type FS between instructions
3339 in LINK list and TWIN. */
3340 static void
3341 process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
3343 for (; link; link = XEXP (link, 1))
3345 ds_t ds;
3346 rtx consumer;
3348 consumer = XEXP (link, 0);
3350 ds = DEP_STATUS (link);
3352 if (/* If we want to create speculative dep. */
3354 /* And we can do that because this is a true dep. */
3355 && (ds & DEP_TYPES) == DEP_TRUE)
3357 gcc_assert (!(ds & BE_IN_SPEC));
3359 if (/* If this dep can be overcome with 'begin speculation'. */
3360 ds & BEGIN_SPEC)
3361 /* Then we have a choice: keep the dep 'begin speculative'
3362 or transform it into 'be in speculative'. */
3364 if (/* In try_ready we assert that if insn once became ready
3365 it can be removed from the ready (or queue) list only
3366 due to backend decision. Hence we can't let the
3367 probability of the speculative dep to decrease. */
3368 dep_weak (ds) <= dep_weak (fs))
3369 /* Transform it to be in speculative. */
3370 ds = (ds & ~BEGIN_SPEC) | fs;
3372 else
3373 /* Mark the dep as 'be in speculative'. */
3374 ds |= fs;
3377 add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
3381 /* Generates recovery code for BEGIN speculative INSN. */
3382 static void
3383 begin_speculative_block (rtx insn)
3385 if (TODO_SPEC (insn) & BEGIN_DATA)
3386 nr_begin_data++;
3387 if (TODO_SPEC (insn) & BEGIN_CONTROL)
3388 nr_begin_control++;
3390 create_check_block_twin (insn, false);
3392 TODO_SPEC (insn) &= ~BEGIN_SPEC;
3395 /* Generates recovery code for BE_IN speculative INSN. */
3396 static void
3397 add_to_speculative_block (rtx insn)
3399 ds_t ts;
3400 rtx link, twins = NULL;
3402 ts = TODO_SPEC (insn);
3403 gcc_assert (!(ts & ~BE_IN_SPEC));
3405 if (ts & BE_IN_DATA)
3406 nr_be_in_data++;
3407 if (ts & BE_IN_CONTROL)
3408 nr_be_in_control++;
3410 TODO_SPEC (insn) &= ~BE_IN_SPEC;
3411 gcc_assert (!TODO_SPEC (insn));
3413 DONE_SPEC (insn) |= ts;
3415 /* First we convert all simple checks to branchy. */
3416 for (link = LOG_LINKS (insn); link;)
3418 rtx check;
3420 check = XEXP (link, 0);
3422 if (RECOVERY_BLOCK (check))
3424 create_check_block_twin (check, true);
3425 link = LOG_LINKS (insn);
3427 else
3428 link = XEXP (link, 1);
3431 clear_priorities (insn);
3435 rtx link, check, twin;
3436 basic_block rec;
3438 link = LOG_LINKS (insn);
3439 gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
3440 && (DEP_STATUS (link) & BE_IN_SPEC)
3441 && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3443 check = XEXP (link, 0);
3444 gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
3445 && QUEUE_INDEX (check) == QUEUE_NOWHERE);
3447 rec = BLOCK_FOR_INSN (check);
3449 twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
3450 extend_global (twin);
3452 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3454 if (sched_verbose && spec_info->dump)
3455 /* INSN_BB (insn) isn't determined for twin insns yet.
3456 So we can't use current_sched_info->print_insn. */
3457 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3458 INSN_UID (twin), rec->index);
3460 twins = alloc_INSN_LIST (twin, twins);
3462 /* Add dependences between TWIN and all appropriate
3463 instructions from REC. */
3466 add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
3470 link = XEXP (link, 1);
3471 if (link)
3473 check = XEXP (link, 0);
3474 if (BLOCK_FOR_INSN (check) == rec)
3475 break;
3477 else
3478 break;
3480 while (1);
3482 while (link);
3484 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
3486 for (link = LOG_LINKS (insn); link;)
3488 check = XEXP (link, 0);
3490 if (BLOCK_FOR_INSN (check) == rec)
3492 delete_back_forw_dep (insn, check);
3493 link = LOG_LINKS (insn);
3495 else
3496 link = XEXP (link, 1);
3499 while (LOG_LINKS (insn));
3501 /* We can't add the dependence between insn and twin earlier because
3502 that would make twin appear in the INSN_DEPEND (insn). */
3503 while (twins)
3505 rtx twin;
3507 twin = XEXP (twins, 0);
3508 calc_priorities (twin);
3509 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3511 twin = XEXP (twins, 1);
3512 free_INSN_LIST_node (twins);
3513 twins = twin;
3517 /* Extends and fills with zeros (only the new part) array pointed to by P. */
3518 void *
3519 xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
3521 gcc_assert (new_nmemb >= old_nmemb);
3522 p = XRESIZEVAR (void, p, new_nmemb * size);
3523 memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
3524 return p;
3527 /* Return the probability of speculation success for the speculation
3528 status DS. */
3529 static dw_t
3530 dep_weak (ds_t ds)
3532 ds_t res = 1, dt;
3533 int n = 0;
3535 dt = FIRST_SPEC_TYPE;
3538 if (ds & dt)
3540 res *= (ds_t) get_dep_weak (ds, dt);
3541 n++;
3544 if (dt == LAST_SPEC_TYPE)
3545 break;
3546 dt <<= SPEC_TYPE_SHIFT;
3548 while (1);
3550 gcc_assert (n);
3551 while (--n)
3552 res /= MAX_DEP_WEAK;
3554 if (res < MIN_DEP_WEAK)
3555 res = MIN_DEP_WEAK;
3557 gcc_assert (res <= MAX_DEP_WEAK);
3559 return (dw_t) res;
3562 /* Helper function.
3563 Find fallthru edge from PRED. */
3564 static edge
3565 find_fallthru_edge (basic_block pred)
3567 edge e;
3568 edge_iterator ei;
3569 basic_block succ;
3571 succ = pred->next_bb;
3572 gcc_assert (succ->prev_bb == pred);
3574 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
3576 FOR_EACH_EDGE (e, ei, pred->succs)
3577 if (e->flags & EDGE_FALLTHRU)
3579 gcc_assert (e->dest == succ);
3580 return e;
3583 else
3585 FOR_EACH_EDGE (e, ei, succ->preds)
3586 if (e->flags & EDGE_FALLTHRU)
3588 gcc_assert (e->src == pred);
3589 return e;
3593 return NULL;
3596 /* Initialize BEFORE_RECOVERY variable. */
3597 static void
3598 init_before_recovery (void)
3600 basic_block last;
3601 edge e;
3603 last = EXIT_BLOCK_PTR->prev_bb;
3604 e = find_fallthru_edge (last);
3606 if (e)
3608 /* We create two basic blocks:
3609 1. Single instruction block is inserted right after E->SRC
3610 and has jump to
3611 2. Empty block right before EXIT_BLOCK.
3612 Between these two blocks recovery blocks will be emitted. */
3614 basic_block single, empty;
3615 rtx x, label;
3617 single = create_empty_bb (last);
3618 empty = create_empty_bb (single);
3620 single->count = last->count;
3621 empty->count = last->count;
3622 single->frequency = last->frequency;
3623 empty->frequency = last->frequency;
3624 BB_COPY_PARTITION (single, last);
3625 BB_COPY_PARTITION (empty, last);
3627 redirect_edge_succ (e, single);
3628 make_single_succ_edge (single, empty, 0);
3629 make_single_succ_edge (empty, EXIT_BLOCK_PTR,
3630 EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
3632 label = block_label (empty);
3633 x = emit_jump_insn_after (gen_jump (label), BB_END (single));
3634 JUMP_LABEL (x) = label;
3635 LABEL_NUSES (label)++;
3636 extend_global (x);
3638 emit_barrier_after (x);
3640 add_block (empty, 0);
3641 add_block (single, 0);
3643 before_recovery = single;
3645 if (sched_verbose >= 2 && spec_info->dump)
3646 fprintf (spec_info->dump,
3647 ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
3648 last->index, single->index, empty->index);
3650 else
3651 before_recovery = last;
3654 /* Returns new recovery block. */
3655 static basic_block
3656 create_recovery_block (void)
3658 rtx label;
3659 basic_block rec;
3661 added_recovery_block_p = true;
3663 if (!before_recovery)
3664 init_before_recovery ();
3666 label = gen_label_rtx ();
3667 gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
3668 label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
3670 rec = create_basic_block (label, label, before_recovery);
3671 emit_barrier_after (BB_END (rec));
3673 if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
3674 BB_SET_PARTITION (rec, BB_COLD_PARTITION);
3676 if (sched_verbose && spec_info->dump)
3677 fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
3678 rec->index);
3680 before_recovery = rec;
3682 return rec;
3685 /* This function creates recovery code for INSN. If MUTATE_P is nonzero,
3686 INSN is a simple check, that should be converted to branchy one. */
3687 static void
3688 create_check_block_twin (rtx insn, bool mutate_p)
3690 basic_block rec;
3691 rtx label, check, twin, link;
3692 ds_t fs;
3694 gcc_assert (ORIG_PAT (insn)
3695 && (!mutate_p
3696 || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
3697 && !(TODO_SPEC (insn) & SPECULATIVE))));
3699 /* Create recovery block. */
3700 if (mutate_p || targetm.sched.needs_block_p (insn))
3702 rec = create_recovery_block ();
3703 label = BB_HEAD (rec);
3705 else
3707 rec = EXIT_BLOCK_PTR;
3708 label = 0;
3711 /* Emit CHECK. */
3712 check = targetm.sched.gen_check (insn, label, mutate_p);
3714 if (rec != EXIT_BLOCK_PTR)
3716 /* To have mem_reg alive at the beginning of second_bb,
3717 we emit check BEFORE insn, so insn after splitting
3718 insn will be at the beginning of second_bb, which will
3719 provide us with the correct life information. */
3720 check = emit_jump_insn_before (check, insn);
3721 JUMP_LABEL (check) = label;
3722 LABEL_NUSES (label)++;
3724 else
3725 check = emit_insn_before (check, insn);
3727 /* Extend data structures. */
3728 extend_all (check);
3729 RECOVERY_BLOCK (check) = rec;
3731 if (sched_verbose && spec_info->dump)
3732 fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
3733 (*current_sched_info->print_insn) (check, 0));
3735 gcc_assert (ORIG_PAT (insn));
3737 /* Initialize TWIN (twin is a duplicate of original instruction
3738 in the recovery block). */
3739 if (rec != EXIT_BLOCK_PTR)
3741 rtx link;
3743 for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
3744 if (DEP_STATUS (link) & DEP_OUTPUT)
3746 RESOLVED_DEPS (check) =
3747 alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
3748 PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
3751 twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
3752 extend_global (twin);
3754 if (sched_verbose && spec_info->dump)
3755 /* INSN_BB (insn) isn't determined for twin insns yet.
3756 So we can't use current_sched_info->print_insn. */
3757 fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
3758 INSN_UID (twin), rec->index);
3760 else
3762 ORIG_PAT (check) = ORIG_PAT (insn);
3763 HAS_INTERNAL_DEP (check) = 1;
3764 twin = check;
3765 /* ??? We probably should change all OUTPUT dependencies to
3766 (TRUE | OUTPUT). */
3769 RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
3771 if (rec != EXIT_BLOCK_PTR)
3772 /* In case of branchy check, fix CFG. */
3774 basic_block first_bb, second_bb;
3775 rtx jump;
3776 edge e;
3777 int edge_flags;
3779 first_bb = BLOCK_FOR_INSN (check);
3780 e = split_block (first_bb, check);
3781 /* split_block emits note if *check == BB_END. Probably it
3782 is better to rip that note off. */
3783 gcc_assert (e->src == first_bb);
3784 second_bb = e->dest;
3786 /* This is fixing of incoming edge. */
3787 /* ??? Which other flags should be specified? */
3788 if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
3789 /* Partition type is the same, if it is "unpartitioned". */
3790 edge_flags = EDGE_CROSSING;
3791 else
3792 edge_flags = 0;
3794 e = make_edge (first_bb, rec, edge_flags);
3796 add_block (second_bb, first_bb);
3798 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
3799 label = block_label (second_bb);
3800 jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
3801 JUMP_LABEL (jump) = label;
3802 LABEL_NUSES (label)++;
3803 extend_global (jump);
3805 if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
3806 /* Partition type is the same, if it is "unpartitioned". */
3808 /* Rewritten from cfgrtl.c. */
3809 if (flag_reorder_blocks_and_partition
3810 && targetm.have_named_sections
3811 /*&& !any_condjump_p (jump)*/)
3812 /* any_condjump_p (jump) == false.
3813 We don't need the same note for the check because
3814 any_condjump_p (check) == true. */
3816 REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
3817 NULL_RTX,
3818 REG_NOTES (jump));
3820 edge_flags = EDGE_CROSSING;
3822 else
3823 edge_flags = 0;
3825 make_single_succ_edge (rec, second_bb, edge_flags);
3827 add_block (rec, EXIT_BLOCK_PTR);
3830 /* Move backward dependences from INSN to CHECK and
3831 move forward dependences from INSN to TWIN. */
3832 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3834 ds_t ds;
3836 /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
3837 check --TRUE--> producer ??? or ANTI ???
3838 twin --TRUE--> producer
3839 twin --ANTI--> check
3841 If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
3842 check --ANTI--> producer
3843 twin --ANTI--> producer
3844 twin --ANTI--> check
3846 If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
3847 check ~~TRUE~~> producer
3848 twin ~~TRUE~~> producer
3849 twin --ANTI--> check */
3851 ds = DEP_STATUS (link);
3853 if (ds & BEGIN_SPEC)
3855 gcc_assert (!mutate_p);
3856 ds &= ~BEGIN_SPEC;
3859 if (rec != EXIT_BLOCK_PTR)
3861 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3862 add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3864 else
3865 add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
3868 for (link = LOG_LINKS (insn); link;)
3869 if ((DEP_STATUS (link) & BEGIN_SPEC)
3870 || mutate_p)
3871 /* We can delete this dep only if we totally overcome it with
3872 BEGIN_SPECULATION. */
3874 delete_back_forw_dep (insn, XEXP (link, 0));
3875 link = LOG_LINKS (insn);
3877 else
3878 link = XEXP (link, 1);
3880 fs = 0;
3882 /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
3883 here. */
3885 gcc_assert (!DONE_SPEC (insn));
3887 if (!mutate_p)
3889 ds_t ts = TODO_SPEC (insn);
3891 DONE_SPEC (insn) = ts & BEGIN_SPEC;
3892 CHECK_SPEC (check) = ts & BEGIN_SPEC;
3894 if (ts & BEGIN_DATA)
3895 fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
3896 if (ts & BEGIN_CONTROL)
3897 fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
3899 else
3900 CHECK_SPEC (check) = CHECK_SPEC (insn);
3902 /* Future speculations: call the helper. */
3903 process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
3905 if (rec != EXIT_BLOCK_PTR)
3907 /* Which types of dependencies should we use here is,
3908 generally, machine-dependent question... But, for now,
3909 it is not. */
3911 if (!mutate_p)
3913 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
3914 add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
3916 else
3918 if (spec_info->dump)
3919 fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
3920 (*current_sched_info->print_insn) (insn, 0));
3922 for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
3923 delete_back_forw_dep (XEXP (link, 0), insn);
3925 if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
3926 try_ready (check);
3928 sched_remove_insn (insn);
3931 add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
3933 else
3934 add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
3936 if (!mutate_p)
3937 /* Fix priorities. If MUTATE_P is nonzero, this is not necessary,
3938 because it'll be done later in add_to_speculative_block. */
3940 clear_priorities (twin);
3941 calc_priorities (twin);
3945 /* Removes dependency between instructions in the recovery block REC
3946 and usual region instructions. It keeps inner dependences so it
3947 won't be necessary to recompute them. */
3948 static void
3949 fix_recovery_deps (basic_block rec)
3951 rtx note, insn, link, jump, ready_list = 0;
3952 bitmap_head in_ready;
3954 bitmap_initialize (&in_ready, 0);
3956 /* NOTE - a basic block note. */
3957 note = NEXT_INSN (BB_HEAD (rec));
3958 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
3959 insn = BB_END (rec);
3960 gcc_assert (JUMP_P (insn));
3961 insn = PREV_INSN (insn);
3965 for (link = INSN_DEPEND (insn); link;)
3967 rtx consumer;
3969 consumer = XEXP (link, 0);
3971 if (BLOCK_FOR_INSN (consumer) != rec)
3973 delete_back_forw_dep (consumer, insn);
3975 if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
3977 ready_list = alloc_INSN_LIST (consumer, ready_list);
3978 bitmap_set_bit (&in_ready, INSN_LUID (consumer));
3981 link = INSN_DEPEND (insn);
3983 else
3985 gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
3987 link = XEXP (link, 1);
3991 insn = PREV_INSN (insn);
3993 while (insn != note);
3995 bitmap_clear (&in_ready);
3997 /* Try to add instructions to the ready or queue list. */
3998 for (link = ready_list; link; link = XEXP (link, 1))
3999 try_ready (XEXP (link, 0));
4000 free_INSN_LIST_list (&ready_list);
4002 /* Fixing jump's dependences. */
4003 insn = BB_HEAD (rec);
4004 jump = BB_END (rec);
4006 gcc_assert (LABEL_P (insn));
4007 insn = NEXT_INSN (insn);
4009 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4010 add_jump_dependencies (insn, jump);
4013 /* The function saves line notes at the beginning of block B. */
4014 static void
4015 associate_line_notes_with_blocks (basic_block b)
4017 rtx line;
4019 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
4020 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4022 line_note_head[b->index] = line;
4023 break;
4025 /* Do a forward search as well, since we won't get to see the first
4026 notes in a basic block. */
4027 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
4029 if (INSN_P (line))
4030 break;
4031 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
4032 line_note_head[b->index] = line;
4036 /* Changes pattern of the INSN to NEW_PAT. */
4037 static void
4038 change_pattern (rtx insn, rtx new_pat)
4040 int t;
4042 t = validate_change (insn, &PATTERN (insn), new_pat, 0);
4043 gcc_assert (t);
4044 /* Invalidate INSN_COST, so it'll be recalculated. */
4045 INSN_COST (insn) = -1;
4046 /* Invalidate INSN_TICK, so it'll be recalculated. */
4047 INSN_TICK (insn) = INVALID_TICK;
4048 dfa_clear_single_insn_cache (insn);
4052 /* -1 - can't speculate,
4053 0 - for speculation with REQUEST mode it is OK to use
4054 current instruction pattern,
4055 1 - need to change pattern for *NEW_PAT to be speculative. */
4056 static int
4057 speculate_insn (rtx insn, ds_t request, rtx *new_pat)
4059 gcc_assert (current_sched_info->flags & DO_SPECULATION
4060 && (request & SPECULATIVE));
4062 if (!NONJUMP_INSN_P (insn)
4063 || HAS_INTERNAL_DEP (insn)
4064 || SCHED_GROUP_P (insn)
4065 || side_effects_p (PATTERN (insn))
4066 || (request & spec_info->mask) != request)
4067 return -1;
4069 gcc_assert (!RECOVERY_BLOCK (insn));
4071 if (request & BE_IN_SPEC)
4073 if (may_trap_p (PATTERN (insn)))
4074 return -1;
4076 if (!(request & BEGIN_SPEC))
4077 return 0;
4080 return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
4083 /* Print some information about block BB, which starts with HEAD and
4084 ends with TAIL, before scheduling it.
4085 I is zero, if scheduler is about to start with the fresh ebb. */
4086 static void
4087 dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
4089 if (!i)
4090 fprintf (sched_dump,
4091 ";; ======================================================\n");
4092 else
4093 fprintf (sched_dump,
4094 ";; =====================ADVANCING TO=====================\n");
4095 fprintf (sched_dump,
4096 ";; -- basic block %d from %d to %d -- %s reload\n",
4097 bb->index, INSN_UID (head), INSN_UID (tail),
4098 (reload_completed ? "after" : "before"));
4099 fprintf (sched_dump,
4100 ";; ======================================================\n");
4101 fprintf (sched_dump, "\n");
4104 /* Unlink basic block notes and labels and saves them, so they
4105 can be easily restored. We unlink basic block notes in EBB to
4106 provide back-compatibility with the previous code, as target backends
4107 assume, that there'll be only instructions between
4108 current_sched_info->{head and tail}. We restore these notes as soon
4109 as we can.
4110 FIRST (LAST) is the first (last) basic block in the ebb.
4111 NB: In usual case (FIRST == LAST) nothing is really done. */
4112 void
4113 unlink_bb_notes (basic_block first, basic_block last)
4115 /* We DON'T unlink basic block notes of the first block in the ebb. */
4116 if (first == last)
4117 return;
4119 bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
4121 /* Make a sentinel. */
4122 if (last->next_bb != EXIT_BLOCK_PTR)
4123 bb_header[last->next_bb->index] = 0;
4125 first = first->next_bb;
4128 rtx prev, label, note, next;
4130 label = BB_HEAD (last);
4131 if (LABEL_P (label))
4132 note = NEXT_INSN (label);
4133 else
4134 note = label;
4135 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4137 prev = PREV_INSN (label);
4138 next = NEXT_INSN (note);
4139 gcc_assert (prev && next);
4141 NEXT_INSN (prev) = next;
4142 PREV_INSN (next) = prev;
4144 bb_header[last->index] = label;
4146 if (last == first)
4147 break;
4149 last = last->prev_bb;
4151 while (1);
4154 /* Restore basic block notes.
4155 FIRST is the first basic block in the ebb. */
4156 static void
4157 restore_bb_notes (basic_block first)
4159 if (!bb_header)
4160 return;
4162 /* We DON'T unlink basic block notes of the first block in the ebb. */
4163 first = first->next_bb;
4164 /* Remember: FIRST is actually a second basic block in the ebb. */
4166 while (first != EXIT_BLOCK_PTR
4167 && bb_header[first->index])
4169 rtx prev, label, note, next;
4171 label = bb_header[first->index];
4172 prev = PREV_INSN (label);
4173 next = NEXT_INSN (prev);
4175 if (LABEL_P (label))
4176 note = NEXT_INSN (label);
4177 else
4178 note = label;
4179 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4181 bb_header[first->index] = 0;
4183 NEXT_INSN (prev) = label;
4184 NEXT_INSN (note) = next;
4185 PREV_INSN (next) = note;
4187 first = first->next_bb;
4190 free (bb_header);
4191 bb_header = 0;
4194 /* Extend per basic block data structures of the scheduler.
4195 If BB is NULL, initialize structures for the whole CFG.
4196 Otherwise, initialize them for the just created BB. */
4197 static void
4198 extend_bb (basic_block bb)
4200 rtx insn;
4202 if (write_symbols != NO_DEBUG)
4204 /* Save-line-note-head:
4205 Determine the line-number at the start of each basic block.
4206 This must be computed and saved now, because after a basic block's
4207 predecessor has been scheduled, it is impossible to accurately
4208 determine the correct line number for the first insn of the block. */
4209 line_note_head = xrecalloc (line_note_head, last_basic_block,
4210 old_last_basic_block,
4211 sizeof (*line_note_head));
4213 if (bb)
4214 associate_line_notes_with_blocks (bb);
4215 else
4216 FOR_EACH_BB (bb)
4217 associate_line_notes_with_blocks (bb);
4220 old_last_basic_block = last_basic_block;
4222 if (current_sched_info->flags & USE_GLAT)
4224 glat_start = xrealloc (glat_start,
4225 last_basic_block * sizeof (*glat_start));
4226 glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
4229 /* The following is done to keep current_sched_info->next_tail non null. */
4231 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
4232 if (NEXT_INSN (insn) == 0
4233 || (!NOTE_P (insn)
4234 && !LABEL_P (insn)
4235 /* Don't emit a NOTE if it would end up before a BARRIER. */
4236 && !BARRIER_P (NEXT_INSN (insn))))
4238 emit_note_after (NOTE_INSN_DELETED, insn);
4239 /* Make insn to appear outside BB. */
4240 BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
4244 /* Add a basic block BB to extended basic block EBB.
4245 If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
4246 If EBB is NULL, then BB should be a new region. */
4247 void
4248 add_block (basic_block bb, basic_block ebb)
4250 gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
4251 && bb->il.rtl->global_live_at_start == 0
4252 && bb->il.rtl->global_live_at_end == 0);
4254 extend_bb (bb);
4256 glat_start[bb->index] = 0;
4257 glat_end[bb->index] = 0;
4259 if (current_sched_info->add_block)
4260 /* This changes only data structures of the front-end. */
4261 current_sched_info->add_block (bb, ebb);
4264 /* Helper function.
4265 Fix CFG after both in- and inter-block movement of
4266 control_flow_insn_p JUMP. */
4267 static void
4268 fix_jump_move (rtx jump)
4270 basic_block bb, jump_bb, jump_bb_next;
4272 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4273 jump_bb = BLOCK_FOR_INSN (jump);
4274 jump_bb_next = jump_bb->next_bb;
4276 gcc_assert (current_sched_info->flags & SCHED_EBB
4277 || (RECOVERY_BLOCK (jump)
4278 && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
4280 if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
4281 /* if jump_bb_next is not empty. */
4282 BB_END (jump_bb) = BB_END (jump_bb_next);
4284 if (BB_END (bb) != PREV_INSN (jump))
4285 /* Then there are instruction after jump that should be placed
4286 to jump_bb_next. */
4287 BB_END (jump_bb_next) = BB_END (bb);
4288 else
4289 /* Otherwise jump_bb_next is empty. */
4290 BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
4292 /* To make assertion in move_insn happy. */
4293 BB_END (bb) = PREV_INSN (jump);
4295 update_bb_for_insn (jump_bb_next);
4298 /* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
4299 static void
4300 move_block_after_check (rtx jump)
4302 basic_block bb, jump_bb, jump_bb_next;
4303 VEC(edge,gc) *t;
4305 bb = BLOCK_FOR_INSN (PREV_INSN (jump));
4306 jump_bb = BLOCK_FOR_INSN (jump);
4307 jump_bb_next = jump_bb->next_bb;
4309 update_bb_for_insn (jump_bb);
4311 gcc_assert (RECOVERY_BLOCK (jump)
4312 || RECOVERY_BLOCK (BB_END (jump_bb_next)));
4314 unlink_block (jump_bb_next);
4315 link_block (jump_bb_next, bb);
4317 t = bb->succs;
4318 bb->succs = 0;
4319 move_succs (&(jump_bb->succs), bb);
4320 move_succs (&(jump_bb_next->succs), jump_bb);
4321 move_succs (&t, jump_bb_next);
4323 if (current_sched_info->fix_recovery_cfg)
4324 current_sched_info->fix_recovery_cfg
4325 (bb->index, jump_bb->index, jump_bb_next->index);
4328 /* Helper function for move_block_after_check.
4329 This functions attaches edge vector pointed to by SUCCSP to
4330 block TO. */
4331 static void
4332 move_succs (VEC(edge,gc) **succsp, basic_block to)
4334 edge e;
4335 edge_iterator ei;
4337 gcc_assert (to->succs == 0);
4339 to->succs = *succsp;
4341 FOR_EACH_EDGE (e, ei, to->succs)
4342 e->src = to;
4344 *succsp = 0;
4347 /* Initialize GLAT (global_live_at_{start, end}) structures.
4348 GLAT structures are used to substitute global_live_{start, end}
4349 regsets during scheduling. This is necessary to use such functions as
4350 split_block (), as they assume consistency of register live information. */
4351 static void
4352 init_glat (void)
4354 basic_block bb;
4356 FOR_ALL_BB (bb)
4357 init_glat1 (bb);
4360 /* Helper function for init_glat. */
4361 static void
4362 init_glat1 (basic_block bb)
4364 gcc_assert (bb->il.rtl->global_live_at_start != 0
4365 && bb->il.rtl->global_live_at_end != 0);
4367 glat_start[bb->index] = bb->il.rtl->global_live_at_start;
4368 glat_end[bb->index] = bb->il.rtl->global_live_at_end;
4370 if (current_sched_info->flags & DETACH_LIFE_INFO)
4372 bb->il.rtl->global_live_at_start = 0;
4373 bb->il.rtl->global_live_at_end = 0;
4377 /* Attach reg_live_info back to basic blocks.
4378 Also save regsets, that should not have been changed during scheduling,
4379 for checking purposes (see check_reg_live). */
4380 void
4381 attach_life_info (void)
4383 basic_block bb;
4385 FOR_ALL_BB (bb)
4386 attach_life_info1 (bb);
4389 /* Helper function for attach_life_info. */
4390 static void
4391 attach_life_info1 (basic_block bb)
4393 gcc_assert (bb->il.rtl->global_live_at_start == 0
4394 && bb->il.rtl->global_live_at_end == 0);
4396 if (glat_start[bb->index])
4398 gcc_assert (glat_end[bb->index]);
4400 bb->il.rtl->global_live_at_start = glat_start[bb->index];
4401 bb->il.rtl->global_live_at_end = glat_end[bb->index];
4403 /* Make them NULL, so they won't be freed in free_glat. */
4404 glat_start[bb->index] = 0;
4405 glat_end[bb->index] = 0;
4407 #ifdef ENABLE_CHECKING
4408 if (bb->index < NUM_FIXED_BLOCKS
4409 || current_sched_info->region_head_or_leaf_p (bb, 0))
4411 glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
4412 COPY_REG_SET (glat_start[bb->index],
4413 bb->il.rtl->global_live_at_start);
4416 if (bb->index < NUM_FIXED_BLOCKS
4417 || current_sched_info->region_head_or_leaf_p (bb, 1))
4419 glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
4420 COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
4422 #endif
4424 else
4426 gcc_assert (!glat_end[bb->index]);
4428 bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
4429 bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
4433 /* Free GLAT information. */
4434 static void
4435 free_glat (void)
4437 #ifdef ENABLE_CHECKING
4438 if (current_sched_info->flags & DETACH_LIFE_INFO)
4440 basic_block bb;
4442 FOR_ALL_BB (bb)
4444 if (glat_start[bb->index])
4445 FREE_REG_SET (glat_start[bb->index]);
4446 if (glat_end[bb->index])
4447 FREE_REG_SET (glat_end[bb->index]);
4450 #endif
4452 free (glat_start);
4453 free (glat_end);
4456 /* Remove INSN from the instruction stream.
4457 INSN should have any dependencies. */
4458 static void
4459 sched_remove_insn (rtx insn)
4461 change_queue_index (insn, QUEUE_NOWHERE);
4462 current_sched_info->add_remove_insn (insn, 1);
4463 remove_insn (insn);
4466 /* Clear priorities of all instructions, that are
4467 forward dependent on INSN. */
4468 static void
4469 clear_priorities (rtx insn)
4471 rtx link;
4473 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4475 rtx pro;
4477 pro = XEXP (link, 0);
4478 if (INSN_PRIORITY_KNOWN (pro))
4480 INSN_PRIORITY_KNOWN (pro) = 0;
4481 clear_priorities (pro);
4486 /* Recompute priorities of instructions, whose priorities might have been
4487 changed due to changes in INSN. */
4488 static void
4489 calc_priorities (rtx insn)
4491 rtx link;
4493 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4495 rtx pro;
4497 pro = XEXP (link, 0);
4498 if (!INSN_PRIORITY_KNOWN (pro))
4500 priority (pro);
4501 calc_priorities (pro);
4507 /* Add dependences between JUMP and other instructions in the recovery
4508 block. INSN is the first insn the recovery block. */
4509 static void
4510 add_jump_dependencies (rtx insn, rtx jump)
4514 insn = NEXT_INSN (insn);
4515 if (insn == jump)
4516 break;
4518 if (!INSN_DEPEND (insn))
4519 add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
4521 while (1);
4522 gcc_assert (LOG_LINKS (jump));
4525 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
4526 static rtx
4527 bb_note (basic_block bb)
4529 rtx note;
4531 note = BB_HEAD (bb);
4532 if (LABEL_P (note))
4533 note = NEXT_INSN (note);
4535 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
4536 return note;
4539 #ifdef ENABLE_CHECKING
4540 extern void debug_spec_status (ds_t);
4542 /* Dump information about the dependence status S. */
4543 void
4544 debug_spec_status (ds_t s)
4546 FILE *f = stderr;
4548 if (s & BEGIN_DATA)
4549 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
4550 if (s & BE_IN_DATA)
4551 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
4552 if (s & BEGIN_CONTROL)
4553 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
4554 if (s & BE_IN_CONTROL)
4555 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
4557 if (s & HARD_DEP)
4558 fprintf (f, "HARD_DEP; ");
4560 if (s & DEP_TRUE)
4561 fprintf (f, "DEP_TRUE; ");
4562 if (s & DEP_ANTI)
4563 fprintf (f, "DEP_ANTI; ");
4564 if (s & DEP_OUTPUT)
4565 fprintf (f, "DEP_OUTPUT; ");
4567 fprintf (f, "\n");
4570 /* Helper function for check_cfg.
4571 Return nonzero, if edge vector pointed to by EL has edge with TYPE in
4572 its flags. */
4573 static int
4574 has_edge_p (VEC(edge,gc) *el, int type)
4576 edge e;
4577 edge_iterator ei;
4579 FOR_EACH_EDGE (e, ei, el)
4580 if (e->flags & type)
4581 return 1;
4582 return 0;
4585 /* Check few properties of CFG between HEAD and TAIL.
4586 If HEAD (TAIL) is NULL check from the beginning (till the end) of the
4587 instruction stream. */
4588 static void
4589 check_cfg (rtx head, rtx tail)
4591 rtx next_tail;
4592 basic_block bb = 0;
4593 int not_first = 0, not_last;
4595 if (head == NULL)
4596 head = get_insns ();
4597 if (tail == NULL)
4598 tail = get_last_insn ();
4599 next_tail = NEXT_INSN (tail);
4603 not_last = head != tail;
4605 if (not_first)
4606 gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
4607 if (not_last)
4608 gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
4610 if (LABEL_P (head)
4611 || (NOTE_INSN_BASIC_BLOCK_P (head)
4612 && (!not_first
4613 || (not_first && !LABEL_P (PREV_INSN (head))))))
4615 gcc_assert (bb == 0);
4616 bb = BLOCK_FOR_INSN (head);
4617 if (bb != 0)
4618 gcc_assert (BB_HEAD (bb) == head);
4619 else
4620 /* This is the case of jump table. See inside_basic_block_p (). */
4621 gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
4624 if (bb == 0)
4626 gcc_assert (!inside_basic_block_p (head));
4627 head = NEXT_INSN (head);
4629 else
4631 gcc_assert (inside_basic_block_p (head)
4632 || NOTE_P (head));
4633 gcc_assert (BLOCK_FOR_INSN (head) == bb);
4635 if (LABEL_P (head))
4637 head = NEXT_INSN (head);
4638 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
4640 else
4642 if (control_flow_insn_p (head))
4644 gcc_assert (BB_END (bb) == head);
4646 if (any_uncondjump_p (head))
4647 gcc_assert (EDGE_COUNT (bb->succs) == 1
4648 && BARRIER_P (NEXT_INSN (head)));
4649 else if (any_condjump_p (head))
4650 gcc_assert (EDGE_COUNT (bb->succs) > 1
4651 && !BARRIER_P (NEXT_INSN (head)));
4653 if (BB_END (bb) == head)
4655 if (EDGE_COUNT (bb->succs) > 1)
4656 gcc_assert (control_flow_insn_p (head)
4657 || has_edge_p (bb->succs, EDGE_COMPLEX));
4658 bb = 0;
4661 head = NEXT_INSN (head);
4665 not_first = 1;
4667 while (head != next_tail);
4669 gcc_assert (bb == 0);
4672 /* Perform a few consistency checks of flags in different data structures. */
4673 static void
4674 check_sched_flags (void)
4676 unsigned int f = current_sched_info->flags;
4678 if (flag_sched_stalled_insns)
4679 gcc_assert (!(f & DO_SPECULATION));
4680 if (f & DO_SPECULATION)
4681 gcc_assert (!flag_sched_stalled_insns
4682 && (f & DETACH_LIFE_INFO)
4683 && spec_info
4684 && spec_info->mask);
4685 if (f & DETACH_LIFE_INFO)
4686 gcc_assert (f & USE_GLAT);
4689 /* Check global_live_at_{start, end} regsets.
4690 If FATAL_P is TRUE, then abort execution at the first failure.
4691 Otherwise, print diagnostics to STDERR (this mode is for calling
4692 from debugger). */
4693 void
4694 check_reg_live (bool fatal_p)
4696 basic_block bb;
4698 FOR_ALL_BB (bb)
4700 int i;
4702 i = bb->index;
4704 if (glat_start[i])
4706 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_start,
4707 glat_start[i]);
4709 if (!b)
4711 gcc_assert (!fatal_p);
4713 fprintf (stderr, ";; check_reg_live_at_start (%d) failed.\n", i);
4717 if (glat_end[i])
4719 bool b = bitmap_equal_p (bb->il.rtl->global_live_at_end,
4720 glat_end[i]);
4722 if (!b)
4724 gcc_assert (!fatal_p);
4726 fprintf (stderr, ";; check_reg_live_at_end (%d) failed.\n", i);
4731 #endif /* ENABLE_CHECKING */
4733 #endif /* INSN_SCHEDULING */