PR c++/16115
[official-gcc.git] / gcc / haifa-sched.c
blob1e29e7f98b04a135c640c03f836e66493d0d75ff
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
24 /* Instruction scheduling pass. This file, along with sched-deps.c,
25 contains the generic parts. The actual entry point is found for
26 the normal instruction scheduling pass is found in sched-rgn.c.
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning values
39 to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 Function unit conflicts are resolved during forward list scheduling
58 by tracking the time when each insn is committed to the schedule
59 and from that, the time the function units it uses must be free.
60 As insns on the ready list are considered for scheduling, those
61 that would result in a blockage of the already committed insns are
62 queued until no blockage will result.
64 The following list shows the order in which we want to break ties
65 among insns in the ready list:
67 1. choose insn with the longest path to end of bb, ties
68 broken by
69 2. choose insn with least contribution to register pressure,
70 ties broken by
71 3. prefer in-block upon interblock motion, ties broken by
72 4. prefer useful upon speculative motion, ties broken by
73 5. choose insn with largest control flow probability, ties
74 broken by
75 6. choose insn with the least dependences upon the previously
76 scheduled insn, or finally
77 7 choose the insn which has the most insns dependent on it.
78 8. choose insn with lowest UID.
80 Memory references complicate matters. Only if we can be certain
81 that memory references are not part of the data dependency graph
82 (via true, anti, or output dependence), can we move operations past
83 memory references. To first approximation, reads can be done
84 independently, while writes introduce dependencies. Better
85 approximations will yield fewer dependencies.
87 Before reload, an extended analysis of interblock data dependences
88 is required for interblock scheduling. This is performed in
89 compute_block_backward_dependences ().
91 Dependencies set up by memory references are treated in exactly the
92 same way as other dependencies, by using LOG_LINKS backward
93 dependences. LOG_LINKS are translated into INSN_DEPEND forward
94 dependences for the purpose of forward list scheduling.
96 Having optimized the critical path, we may have also unduly
97 extended the lifetimes of some registers. If an operation requires
98 that constants be loaded into registers, it is certainly desirable
99 to load those constants as early as necessary, but no earlier.
100 I.e., it will not do to load up a bunch of registers at the
101 beginning of a basic block only to use them at the end, if they
102 could be loaded later, since this may result in excessive register
103 utilization.
105 Note that since branches are never in basic blocks, but only end
106 basic blocks, this pass will not move branches. But that is ok,
107 since we can use GNU's delayed branch scheduling pass to take care
108 of this case.
110 Also note that no further optimizations based on algebraic
111 identities are performed, so this pass would be a good one to
112 perform instruction splitting, such as breaking up a multiply
113 instruction into shifts and adds where that is profitable.
115 Given the memory aliasing analysis that this pass should perform,
116 it should be possible to remove redundant stores to memory, and to
117 load values from registers instead of hitting memory.
119 Before reload, speculative insns are moved only if a 'proof' exists
120 that no exception will be caused by this, and if no live registers
121 exist that inhibit the motion (live registers constraints are not
122 represented by data dependence edges).
124 This pass must update information that subsequent passes expect to
125 be correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
126 reg_n_calls_crossed, and reg_live_length. Also, BB_HEAD, BB_END.
128 The information in the line number notes is carefully retained by
129 this pass. Notes that refer to the starting and ending of
130 exception regions are also carefully retained by this pass. All
131 other NOTE insns are grouped in their same relative order at the
132 beginning of basic blocks and regions that have been scheduled. */
134 #include "config.h"
135 #include "system.h"
136 #include "coretypes.h"
137 #include "tm.h"
138 #include "toplev.h"
139 #include "rtl.h"
140 #include "tm_p.h"
141 #include "hard-reg-set.h"
142 #include "basic-block.h"
143 #include "regs.h"
144 #include "function.h"
145 #include "flags.h"
146 #include "insn-config.h"
147 #include "insn-attr.h"
148 #include "except.h"
149 #include "toplev.h"
150 #include "recog.h"
151 #include "sched-int.h"
152 #include "target.h"
154 #ifdef INSN_SCHEDULING
156 /* issue_rate is the number of insns that can be scheduled in the same
157 machine cycle. It can be defined in the config/mach/mach.h file,
158 otherwise we set it to 1. */
160 static int issue_rate;
162 /* sched-verbose controls the amount of debugging output the
163 scheduler prints. It is controlled by -fsched-verbose=N:
164 N>0 and no -DSR : the output is directed to stderr.
165 N>=10 will direct the printouts to stderr (regardless of -dSR).
166 N=1: same as -dSR.
167 N=2: bb's probabilities, detailed ready list info, unit/insn info.
168 N=3: rtl at abort point, control-flow, regions info.
169 N=5: dependences info. */
171 static int sched_verbose_param = 0;
172 int sched_verbose = 0;
174 /* Debugging file. All printouts are sent to dump, which is always set,
175 either to stderr, or to the dump listing file (-dRS). */
176 FILE *sched_dump = 0;
178 /* Highest uid before scheduling. */
179 static int old_max_uid;
181 /* fix_sched_param() is called from toplev.c upon detection
182 of the -fsched-verbose=N option. */
184 void
185 fix_sched_param (const char *param, const char *val)
187 if (!strcmp (param, "verbose"))
188 sched_verbose_param = atoi (val);
189 else
190 warning ("fix_sched_param: unknown param: %s", param);
193 struct haifa_insn_data *h_i_d;
195 #define LINE_NOTE(INSN) (h_i_d[INSN_UID (INSN)].line_note)
196 #define INSN_TICK(INSN) (h_i_d[INSN_UID (INSN)].tick)
198 /* Vector indexed by basic block number giving the starting line-number
199 for each basic block. */
200 static rtx *line_note_head;
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
206 /* Queues, etc. */
208 /* An instruction is ready to be scheduled when all insns preceding it
209 have already been scheduled. It is important to ensure that all
210 insns which use its result will not be executed until its result
211 has been computed. An insn is maintained in one of four structures:
213 (P) the "Pending" set of insns which cannot be scheduled until
214 their dependencies have been satisfied.
215 (Q) the "Queued" set of insns that can be scheduled when sufficient
216 time has passed.
217 (R) the "Ready" list of unscheduled, uncommitted insns.
218 (S) the "Scheduled" list of insns.
220 Initially, all insns are either "Pending" or "Ready" depending on
221 whether their dependencies are satisfied.
223 Insns move from the "Ready" list to the "Scheduled" list as they
224 are committed to the schedule. As this occurs, the insns in the
225 "Pending" list have their dependencies satisfied and move to either
226 the "Ready" list or the "Queued" set depending on whether
227 sufficient time has passed to make them ready. As time passes,
228 insns move from the "Queued" set to the "Ready" list. Insns may
229 move from the "Ready" list to the "Queued" set if they are blocked
230 due to a function unit conflict.
232 The "Pending" list (P) are the insns in the INSN_DEPEND of the unscheduled
233 insns, i.e., those that are ready, queued, and pending.
234 The "Queued" set (Q) is implemented by the variable `insn_queue'.
235 The "Ready" list (R) is implemented by the variables `ready' and
236 `n_ready'.
237 The "Scheduled" list (S) is the new insn chain built by this pass.
239 The transition (R->S) is implemented in the scheduling loop in
240 `schedule_block' when the best insn to schedule is chosen.
241 The transition (R->Q) is implemented in `queue_insn' when an
242 insn is found to have a function unit conflict with the already
243 committed insns.
244 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
245 insns move from the ready list to the scheduled list.
246 The transition (Q->R) is implemented in 'queue_to_insn' as time
247 passes or stalls are introduced. */
249 /* Implement a circular buffer to delay instructions until sufficient
250 time has passed. For the old pipeline description interface,
251 INSN_QUEUE_SIZE is a power of two larger than MAX_BLOCKAGE and
252 MAX_READY_COST computed by genattr.c. For the new pipeline
253 description interface, MAX_INSN_QUEUE_INDEX is a power of two minus
254 one which is larger than maximal time of instruction execution
255 computed by genattr.c on the base maximal time of functional unit
256 reservations and getting a result. This is the longest time an
257 insn may be queued. */
259 #define MAX_INSN_QUEUE_INDEX max_insn_queue_index_macro_value
261 static rtx *insn_queue;
262 static int q_ptr = 0;
263 static int q_size = 0;
264 #define NEXT_Q(X) (((X)+1) & MAX_INSN_QUEUE_INDEX)
265 #define NEXT_Q_AFTER(X, C) (((X)+C) & MAX_INSN_QUEUE_INDEX)
267 /* The following variable defines value for macro
268 MAX_INSN_QUEUE_INDEX. */
269 static int max_insn_queue_index_macro_value;
271 /* The following variable value refers for all current and future
272 reservations of the processor units. */
273 state_t curr_state;
275 /* The following variable value is size of memory representing all
276 current and future reservations of the processor units. It is used
277 only by DFA based scheduler. */
278 static size_t dfa_state_size;
280 /* The following array is used to find the best insn from ready when
281 the automaton pipeline interface is used. */
282 static char *ready_try;
284 /* Describe the ready list of the scheduler.
285 VEC holds space enough for all insns in the current region. VECLEN
286 says how many exactly.
287 FIRST is the index of the element with the highest priority; i.e. the
288 last one in the ready list, since elements are ordered by ascending
289 priority.
290 N_READY determines how many insns are on the ready list. */
292 struct ready_list
294 rtx *vec;
295 int veclen;
296 int first;
297 int n_ready;
300 static int may_trap_exp (rtx, int);
302 /* Nonzero iff the address is comprised from at most 1 register. */
303 #define CONST_BASED_ADDRESS_P(x) \
304 (REG_P (x) \
305 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
306 || (GET_CODE (x) == LO_SUM)) \
307 && (CONSTANT_P (XEXP (x, 0)) \
308 || CONSTANT_P (XEXP (x, 1)))))
310 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
311 as found by analyzing insn's expression. */
313 static int
314 may_trap_exp (rtx x, int is_store)
316 enum rtx_code code;
318 if (x == 0)
319 return TRAP_FREE;
320 code = GET_CODE (x);
321 if (is_store)
323 if (code == MEM && may_trap_p (x))
324 return TRAP_RISKY;
325 else
326 return TRAP_FREE;
328 if (code == MEM)
330 /* The insn uses memory: a volatile load. */
331 if (MEM_VOLATILE_P (x))
332 return IRISKY;
333 /* An exception-free load. */
334 if (!may_trap_p (x))
335 return IFREE;
336 /* A load with 1 base register, to be further checked. */
337 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
338 return PFREE_CANDIDATE;
339 /* No info on the load, to be further checked. */
340 return PRISKY_CANDIDATE;
342 else
344 const char *fmt;
345 int i, insn_class = TRAP_FREE;
347 /* Neither store nor load, check if it may cause a trap. */
348 if (may_trap_p (x))
349 return TRAP_RISKY;
350 /* Recursive step: walk the insn... */
351 fmt = GET_RTX_FORMAT (code);
352 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
354 if (fmt[i] == 'e')
356 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
357 insn_class = WORST_CLASS (insn_class, tmp_class);
359 else if (fmt[i] == 'E')
361 int j;
362 for (j = 0; j < XVECLEN (x, i); j++)
364 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
365 insn_class = WORST_CLASS (insn_class, tmp_class);
366 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
367 break;
370 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
371 break;
373 return insn_class;
377 /* Classifies insn for the purpose of verifying that it can be
378 moved speculatively, by examining it's patterns, returning:
379 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
380 TRAP_FREE: non-load insn.
381 IFREE: load from a globally safe location.
382 IRISKY: volatile load.
383 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
384 being either PFREE or PRISKY. */
387 haifa_classify_insn (rtx insn)
389 rtx pat = PATTERN (insn);
390 int tmp_class = TRAP_FREE;
391 int insn_class = TRAP_FREE;
392 enum rtx_code code;
394 if (GET_CODE (pat) == PARALLEL)
396 int i, len = XVECLEN (pat, 0);
398 for (i = len - 1; i >= 0; i--)
400 code = GET_CODE (XVECEXP (pat, 0, i));
401 switch (code)
403 case CLOBBER:
404 /* Test if it is a 'store'. */
405 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
406 break;
407 case SET:
408 /* Test if it is a store. */
409 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
410 if (tmp_class == TRAP_RISKY)
411 break;
412 /* Test if it is a load. */
413 tmp_class
414 = WORST_CLASS (tmp_class,
415 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
416 0));
417 break;
418 case COND_EXEC:
419 case TRAP_IF:
420 tmp_class = TRAP_RISKY;
421 break;
422 default:
425 insn_class = WORST_CLASS (insn_class, tmp_class);
426 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
427 break;
430 else
432 code = GET_CODE (pat);
433 switch (code)
435 case CLOBBER:
436 /* Test if it is a 'store'. */
437 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
438 break;
439 case SET:
440 /* Test if it is a store. */
441 tmp_class = may_trap_exp (SET_DEST (pat), 1);
442 if (tmp_class == TRAP_RISKY)
443 break;
444 /* Test if it is a load. */
445 tmp_class =
446 WORST_CLASS (tmp_class,
447 may_trap_exp (SET_SRC (pat), 0));
448 break;
449 case COND_EXEC:
450 case TRAP_IF:
451 tmp_class = TRAP_RISKY;
452 break;
453 default:;
455 insn_class = tmp_class;
458 return insn_class;
461 /* Forward declarations. */
463 /* The scheduler using only DFA description should never use the
464 following five functions: */
465 static unsigned int blockage_range (int, rtx);
466 static void clear_units (void);
467 static void schedule_unit (int, rtx, int);
468 static int actual_hazard (int, rtx, int, int);
469 static int potential_hazard (int, rtx, int);
471 static int priority (rtx);
472 static int rank_for_schedule (const void *, const void *);
473 static void swap_sort (rtx *, int);
474 static void queue_insn (rtx, int);
475 static int schedule_insn (rtx, struct ready_list *, int);
476 static int find_set_reg_weight (rtx);
477 static void find_insn_reg_weight (int);
478 static void adjust_priority (rtx);
479 static void advance_one_cycle (void);
481 /* Notes handling mechanism:
482 =========================
483 Generally, NOTES are saved before scheduling and restored after scheduling.
484 The scheduler distinguishes between three types of notes:
486 (1) LINE_NUMBER notes, generated and used for debugging. Here,
487 before scheduling a region, a pointer to the LINE_NUMBER note is
488 added to the insn following it (in save_line_notes()), and the note
489 is removed (in rm_line_notes() and unlink_line_notes()). After
490 scheduling the region, this pointer is used for regeneration of
491 the LINE_NUMBER note (in restore_line_notes()).
493 (2) LOOP_BEGIN, LOOP_END, SETJMP, EHREGION_BEG, EHREGION_END notes:
494 Before scheduling a region, a pointer to the note is added to the insn
495 that follows or precedes it. (This happens as part of the data dependence
496 computation). After scheduling an insn, the pointer contained in it is
497 used for regenerating the corresponding note (in reemit_notes).
499 (3) All other notes (e.g. INSN_DELETED): Before scheduling a block,
500 these notes are put in a list (in rm_other_notes() and
501 unlink_other_notes ()). After scheduling the block, these notes are
502 inserted at the beginning of the block (in schedule_block()). */
504 static rtx unlink_other_notes (rtx, rtx);
505 static rtx unlink_line_notes (rtx, rtx);
506 static rtx reemit_notes (rtx, rtx);
508 static rtx *ready_lastpos (struct ready_list *);
509 static void ready_sort (struct ready_list *);
510 static rtx ready_remove_first (struct ready_list *);
512 static void queue_to_ready (struct ready_list *);
513 static int early_queue_to_ready (state_t, struct ready_list *);
515 static void debug_ready_list (struct ready_list *);
517 static rtx move_insn1 (rtx, rtx);
518 static rtx move_insn (rtx, rtx);
520 /* The following functions are used to implement multi-pass scheduling
521 on the first cycle. It is used only for DFA based scheduler. */
522 static rtx ready_element (struct ready_list *, int);
523 static rtx ready_remove (struct ready_list *, int);
524 static int max_issue (struct ready_list *, int *);
526 static rtx choose_ready (struct ready_list *);
528 #endif /* INSN_SCHEDULING */
530 /* Point to state used for the current scheduling pass. */
531 struct sched_info *current_sched_info;
533 #ifndef INSN_SCHEDULING
534 void
535 schedule_insns (FILE *dump_file ATTRIBUTE_UNUSED)
538 #else
540 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
541 so that insns independent of the last scheduled insn will be preferred
542 over dependent instructions. */
544 static rtx last_scheduled_insn;
546 /* Compute the function units used by INSN. This caches the value
547 returned by function_units_used. A function unit is encoded as the
548 unit number if the value is non-negative and the complement of a
549 mask if the value is negative. A function unit index is the
550 non-negative encoding. The scheduler using only DFA description
551 should never use the following function. */
553 HAIFA_INLINE int
554 insn_unit (rtx insn)
556 int unit = INSN_UNIT (insn);
558 if (unit == 0)
560 recog_memoized (insn);
562 /* A USE insn, or something else we don't need to understand.
563 We can't pass these directly to function_units_used because it will
564 trigger a fatal error for unrecognizable insns. */
565 if (INSN_CODE (insn) < 0)
566 unit = -1;
567 else
569 unit = function_units_used (insn);
570 /* Increment non-negative values so we can cache zero. */
571 if (unit >= 0)
572 unit++;
574 /* We only cache 16 bits of the result, so if the value is out of
575 range, don't cache it. */
576 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
577 || unit >= 0
578 || (unit & ~((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
579 INSN_UNIT (insn) = unit;
581 return (unit > 0 ? unit - 1 : unit);
584 /* Compute the blockage range for executing INSN on UNIT. This caches
585 the value returned by the blockage_range_function for the unit.
586 These values are encoded in an int where the upper half gives the
587 minimum value and the lower half gives the maximum value. The
588 scheduler using only DFA description should never use the following
589 function. */
591 HAIFA_INLINE static unsigned int
592 blockage_range (int unit, rtx insn)
594 unsigned int blockage = INSN_BLOCKAGE (insn);
595 unsigned int range;
597 if ((int) UNIT_BLOCKED (blockage) != unit + 1)
599 range = function_units[unit].blockage_range_function (insn);
600 /* We only cache the blockage range for one unit and then only if
601 the values fit. */
602 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
603 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
605 else
606 range = BLOCKAGE_RANGE (blockage);
608 return range;
611 /* A vector indexed by function unit instance giving the last insn to
612 use the unit. The value of the function unit instance index for
613 unit U instance I is (U + I * FUNCTION_UNITS_SIZE). The scheduler
614 using only DFA description should never use the following variable. */
615 #if FUNCTION_UNITS_SIZE
616 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
617 #else
618 static rtx unit_last_insn[1];
619 #endif
621 /* A vector indexed by function unit instance giving the minimum time
622 when the unit will unblock based on the maximum blockage cost. The
623 scheduler using only DFA description should never use the following
624 variable. */
625 #if FUNCTION_UNITS_SIZE
626 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
627 #else
628 static int unit_tick[1];
629 #endif
631 /* A vector indexed by function unit number giving the number of insns
632 that remain to use the unit. The scheduler using only DFA
633 description should never use the following variable. */
634 #if FUNCTION_UNITS_SIZE
635 static int unit_n_insns[FUNCTION_UNITS_SIZE];
636 #else
637 static int unit_n_insns[1];
638 #endif
640 /* Access the unit_last_insn array. Used by the visualization code.
641 The scheduler using only DFA description should never use the
642 following function. */
645 get_unit_last_insn (int instance)
647 return unit_last_insn[instance];
650 /* Reset the function unit state to the null state. */
652 static void
653 clear_units (void)
655 memset (unit_last_insn, 0, sizeof (unit_last_insn));
656 memset (unit_tick, 0, sizeof (unit_tick));
657 memset (unit_n_insns, 0, sizeof (unit_n_insns));
660 /* Return the issue-delay of an insn. The scheduler using only DFA
661 description should never use the following function. */
663 HAIFA_INLINE int
664 insn_issue_delay (rtx insn)
666 int i, delay = 0;
667 int unit = insn_unit (insn);
669 /* Efficiency note: in fact, we are working 'hard' to compute a
670 value that was available in md file, and is not available in
671 function_units[] structure. It would be nice to have this
672 value there, too. */
673 if (unit >= 0)
675 if (function_units[unit].blockage_range_function &&
676 function_units[unit].blockage_function)
677 delay = function_units[unit].blockage_function (insn, insn);
679 else
680 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
681 if ((unit & 1) != 0 && function_units[i].blockage_range_function
682 && function_units[i].blockage_function)
683 delay = MAX (delay, function_units[i].blockage_function (insn, insn));
685 return delay;
688 /* Return the actual hazard cost of executing INSN on the unit UNIT,
689 instance INSTANCE at time CLOCK if the previous actual hazard cost
690 was COST. The scheduler using only DFA description should never
691 use the following function. */
693 HAIFA_INLINE int
694 actual_hazard_this_instance (int unit, int instance, rtx insn, int clock, int cost)
696 int tick = unit_tick[instance]; /* Issue time of the last issued insn. */
698 if (tick - clock > cost)
700 /* The scheduler is operating forward, so unit's last insn is the
701 executing insn and INSN is the candidate insn. We want a
702 more exact measure of the blockage if we execute INSN at CLOCK
703 given when we committed the execution of the unit's last insn.
705 The blockage value is given by either the unit's max blockage
706 constant, blockage range function, or blockage function. Use
707 the most exact form for the given unit. */
709 if (function_units[unit].blockage_range_function)
711 if (function_units[unit].blockage_function)
712 tick += (function_units[unit].blockage_function
713 (unit_last_insn[instance], insn)
714 - function_units[unit].max_blockage);
715 else
716 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
717 - function_units[unit].max_blockage);
719 if (tick - clock > cost)
720 cost = tick - clock;
722 return cost;
725 /* Record INSN as having begun execution on the units encoded by UNIT
726 at time CLOCK. The scheduler using only DFA description should
727 never use the following function. */
729 static void
730 schedule_unit (int unit, rtx insn, int clock)
732 int i;
734 if (unit >= 0)
736 int instance = unit;
737 #if MAX_MULTIPLICITY > 1
738 /* Find the first free instance of the function unit and use that
739 one. We assume that one is free. */
740 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
742 if (!actual_hazard_this_instance (unit, instance, insn, clock, 0))
743 break;
744 instance += FUNCTION_UNITS_SIZE;
746 #endif
747 unit_last_insn[instance] = insn;
748 unit_tick[instance] = (clock + function_units[unit].max_blockage);
750 else
751 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
752 if ((unit & 1) != 0)
753 schedule_unit (i, insn, clock);
756 /* Return the actual hazard cost of executing INSN on the units
757 encoded by UNIT at time CLOCK if the previous actual hazard cost
758 was COST. The scheduler using only DFA description should never
759 use the following function. */
761 static int
762 actual_hazard (int unit, rtx insn, int clock, int cost)
764 int i;
766 if (unit >= 0)
768 /* Find the instance of the function unit with the minimum hazard. */
769 int instance = unit;
770 int best_cost = actual_hazard_this_instance (unit, instance, insn,
771 clock, cost);
772 #if MAX_MULTIPLICITY > 1
773 int this_cost;
775 if (best_cost > cost)
777 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
779 instance += FUNCTION_UNITS_SIZE;
780 this_cost = actual_hazard_this_instance (unit, instance, insn,
781 clock, cost);
782 if (this_cost < best_cost)
784 best_cost = this_cost;
785 if (this_cost <= cost)
786 break;
790 #endif
791 cost = MAX (cost, best_cost);
793 else
794 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
795 if ((unit & 1) != 0)
796 cost = actual_hazard (i, insn, clock, cost);
798 return cost;
801 /* Return the potential hazard cost of executing an instruction on the
802 units encoded by UNIT if the previous potential hazard cost was
803 COST. An insn with a large blockage time is chosen in preference
804 to one with a smaller time; an insn that uses a unit that is more
805 likely to be used is chosen in preference to one with a unit that
806 is less used. We are trying to minimize a subsequent actual
807 hazard. The scheduler using only DFA description should never use
808 the following function. */
810 HAIFA_INLINE static int
811 potential_hazard (int unit, rtx insn, int cost)
813 int i, ncost;
814 unsigned int minb, maxb;
816 if (unit >= 0)
818 minb = maxb = function_units[unit].max_blockage;
819 if (maxb > 1)
821 if (function_units[unit].blockage_range_function)
823 maxb = minb = blockage_range (unit, insn);
824 maxb = MAX_BLOCKAGE_COST (maxb);
825 minb = MIN_BLOCKAGE_COST (minb);
828 if (maxb > 1)
830 /* Make the number of instructions left dominate. Make the
831 minimum delay dominate the maximum delay. If all these
832 are the same, use the unit number to add an arbitrary
833 ordering. Other terms can be added. */
834 ncost = minb * 0x40 + maxb;
835 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
836 if (ncost > cost)
837 cost = ncost;
841 else
842 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
843 if ((unit & 1) != 0)
844 cost = potential_hazard (i, insn, cost);
846 return cost;
849 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
850 This is the number of cycles between instruction issue and
851 instruction results. */
853 HAIFA_INLINE int
854 insn_cost (rtx insn, rtx link, rtx used)
856 int cost = INSN_COST (insn);
858 if (cost < 0)
860 /* A USE insn, or something else we don't need to
861 understand. We can't pass these directly to
862 result_ready_cost or insn_default_latency because it will
863 trigger a fatal error for unrecognizable insns. */
864 if (recog_memoized (insn) < 0)
866 INSN_COST (insn) = 0;
867 return 0;
869 else
871 if (targetm.sched.use_dfa_pipeline_interface
872 && targetm.sched.use_dfa_pipeline_interface ())
873 cost = insn_default_latency (insn);
874 else
875 cost = result_ready_cost (insn);
877 if (cost < 0)
878 cost = 0;
880 INSN_COST (insn) = cost;
884 /* In this case estimate cost without caring how insn is used. */
885 if (link == 0 || used == 0)
886 return cost;
888 /* A USE insn should never require the value used to be computed.
889 This allows the computation of a function's result and parameter
890 values to overlap the return and call. */
891 if (recog_memoized (used) < 0)
892 cost = 0;
893 else
895 if (targetm.sched.use_dfa_pipeline_interface
896 && targetm.sched.use_dfa_pipeline_interface ())
898 if (INSN_CODE (insn) >= 0)
900 if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
901 cost = 0;
902 else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
904 cost = (insn_default_latency (insn)
905 - insn_default_latency (used));
906 if (cost <= 0)
907 cost = 1;
909 else if (bypass_p (insn))
910 cost = insn_latency (insn, used);
914 if (targetm.sched.adjust_cost)
915 cost = targetm.sched.adjust_cost (used, link, insn, cost);
917 if (cost < 0)
918 cost = 0;
921 return cost;
924 /* Compute the priority number for INSN. */
926 static int
927 priority (rtx insn)
929 rtx link;
931 if (! INSN_P (insn))
932 return 0;
934 if (! INSN_PRIORITY_KNOWN (insn))
936 int this_priority = 0;
938 if (INSN_DEPEND (insn) == 0)
939 this_priority = insn_cost (insn, 0, 0);
940 else
942 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
944 rtx next;
945 int next_priority;
947 next = XEXP (link, 0);
949 /* Critical path is meaningful in block boundaries only. */
950 if (! (*current_sched_info->contributes_to_priority) (next, insn))
951 continue;
953 next_priority = insn_cost (insn, link, next) + priority (next);
954 if (next_priority > this_priority)
955 this_priority = next_priority;
958 INSN_PRIORITY (insn) = this_priority;
959 INSN_PRIORITY_KNOWN (insn) = 1;
962 return INSN_PRIORITY (insn);
965 /* Macros and functions for keeping the priority queue sorted, and
966 dealing with queuing and dequeuing of instructions. */
968 #define SCHED_SORT(READY, N_READY) \
969 do { if ((N_READY) == 2) \
970 swap_sort (READY, N_READY); \
971 else if ((N_READY) > 2) \
972 qsort (READY, N_READY, sizeof (rtx), rank_for_schedule); } \
973 while (0)
975 /* Returns a positive value if x is preferred; returns a negative value if
976 y is preferred. Should never return 0, since that will make the sort
977 unstable. */
979 static int
980 rank_for_schedule (const void *x, const void *y)
982 rtx tmp = *(const rtx *) y;
983 rtx tmp2 = *(const rtx *) x;
984 rtx link;
985 int tmp_class, tmp2_class, depend_count1, depend_count2;
986 int val, priority_val, weight_val, info_val;
988 /* The insn in a schedule group should be issued the first. */
989 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
990 return SCHED_GROUP_P (tmp2) ? 1 : -1;
992 /* Prefer insn with higher priority. */
993 priority_val = INSN_PRIORITY (tmp2) - INSN_PRIORITY (tmp);
995 if (priority_val)
996 return priority_val;
998 /* Prefer an insn with smaller contribution to registers-pressure. */
999 if (!reload_completed &&
1000 (weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
1001 return weight_val;
1003 info_val = (*current_sched_info->rank) (tmp, tmp2);
1004 if (info_val)
1005 return info_val;
1007 /* Compare insns based on their relation to the last-scheduled-insn. */
1008 if (last_scheduled_insn)
1010 /* Classify the instructions into three classes:
1011 1) Data dependent on last schedule insn.
1012 2) Anti/Output dependent on last scheduled insn.
1013 3) Independent of last scheduled insn, or has latency of one.
1014 Choose the insn from the highest numbered class if different. */
1015 link = find_insn_list (tmp, INSN_DEPEND (last_scheduled_insn));
1016 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp) == 1)
1017 tmp_class = 3;
1018 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1019 tmp_class = 1;
1020 else
1021 tmp_class = 2;
1023 link = find_insn_list (tmp2, INSN_DEPEND (last_scheduled_insn));
1024 if (link == 0 || insn_cost (last_scheduled_insn, link, tmp2) == 1)
1025 tmp2_class = 3;
1026 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
1027 tmp2_class = 1;
1028 else
1029 tmp2_class = 2;
1031 if ((val = tmp2_class - tmp_class))
1032 return val;
1035 /* Prefer the insn which has more later insns that depend on it.
1036 This gives the scheduler more freedom when scheduling later
1037 instructions at the expense of added register pressure. */
1038 depend_count1 = 0;
1039 for (link = INSN_DEPEND (tmp); link; link = XEXP (link, 1))
1040 depend_count1++;
1042 depend_count2 = 0;
1043 for (link = INSN_DEPEND (tmp2); link; link = XEXP (link, 1))
1044 depend_count2++;
1046 val = depend_count2 - depend_count1;
1047 if (val)
1048 return val;
1050 /* If insns are equally good, sort by INSN_LUID (original insn order),
1051 so that we make the sort stable. This minimizes instruction movement,
1052 thus minimizing sched's effect on debugging and cross-jumping. */
1053 return INSN_LUID (tmp) - INSN_LUID (tmp2);
1056 /* Resort the array A in which only element at index N may be out of order. */
1058 HAIFA_INLINE static void
1059 swap_sort (rtx *a, int n)
1061 rtx insn = a[n - 1];
1062 int i = n - 2;
1064 while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
1066 a[i + 1] = a[i];
1067 i -= 1;
1069 a[i + 1] = insn;
1072 /* Add INSN to the insn queue so that it can be executed at least
1073 N_CYCLES after the currently executing insn. Preserve insns
1074 chain for debugging purposes. */
1076 HAIFA_INLINE static void
1077 queue_insn (rtx insn, int n_cycles)
1079 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
1080 rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
1081 insn_queue[next_q] = link;
1082 q_size += 1;
1084 if (sched_verbose >= 2)
1086 fprintf (sched_dump, ";;\t\tReady-->Q: insn %s: ",
1087 (*current_sched_info->print_insn) (insn, 0));
1089 fprintf (sched_dump, "queued for %d cycles.\n", n_cycles);
1093 /* Return a pointer to the bottom of the ready list, i.e. the insn
1094 with the lowest priority. */
1096 HAIFA_INLINE static rtx *
1097 ready_lastpos (struct ready_list *ready)
1099 if (ready->n_ready == 0)
1100 abort ();
1101 return ready->vec + ready->first - ready->n_ready + 1;
1104 /* Add an element INSN to the ready list so that it ends up with the lowest
1105 priority. */
1107 HAIFA_INLINE void
1108 ready_add (struct ready_list *ready, rtx insn)
1110 if (ready->first == ready->n_ready)
1112 memmove (ready->vec + ready->veclen - ready->n_ready,
1113 ready_lastpos (ready),
1114 ready->n_ready * sizeof (rtx));
1115 ready->first = ready->veclen - 1;
1117 ready->vec[ready->first - ready->n_ready] = insn;
1118 ready->n_ready++;
1121 /* Remove the element with the highest priority from the ready list and
1122 return it. */
1124 HAIFA_INLINE static rtx
1125 ready_remove_first (struct ready_list *ready)
1127 rtx t;
1128 if (ready->n_ready == 0)
1129 abort ();
1130 t = ready->vec[ready->first--];
1131 ready->n_ready--;
1132 /* If the queue becomes empty, reset it. */
1133 if (ready->n_ready == 0)
1134 ready->first = ready->veclen - 1;
1135 return t;
1138 /* The following code implements multi-pass scheduling for the first
1139 cycle. In other words, we will try to choose ready insn which
1140 permits to start maximum number of insns on the same cycle. */
1142 /* Return a pointer to the element INDEX from the ready. INDEX for
1143 insn with the highest priority is 0, and the lowest priority has
1144 N_READY - 1. */
1146 HAIFA_INLINE static rtx
1147 ready_element (struct ready_list *ready, int index)
1149 #ifdef ENABLE_CHECKING
1150 if (ready->n_ready == 0 || index >= ready->n_ready)
1151 abort ();
1152 #endif
1153 return ready->vec[ready->first - index];
1156 /* Remove the element INDEX from the ready list and return it. INDEX
1157 for insn with the highest priority is 0, and the lowest priority
1158 has N_READY - 1. */
1160 HAIFA_INLINE static rtx
1161 ready_remove (struct ready_list *ready, int index)
1163 rtx t;
1164 int i;
1166 if (index == 0)
1167 return ready_remove_first (ready);
1168 if (ready->n_ready == 0 || index >= ready->n_ready)
1169 abort ();
1170 t = ready->vec[ready->first - index];
1171 ready->n_ready--;
1172 for (i = index; i < ready->n_ready; i++)
1173 ready->vec[ready->first - i] = ready->vec[ready->first - i - 1];
1174 return t;
1178 /* Sort the ready list READY by ascending priority, using the SCHED_SORT
1179 macro. */
1181 HAIFA_INLINE static void
1182 ready_sort (struct ready_list *ready)
1184 rtx *first = ready_lastpos (ready);
1185 SCHED_SORT (first, ready->n_ready);
1188 /* PREV is an insn that is ready to execute. Adjust its priority if that
1189 will help shorten or lengthen register lifetimes as appropriate. Also
1190 provide a hook for the target to tweek itself. */
1192 HAIFA_INLINE static void
1193 adjust_priority (rtx prev)
1195 /* ??? There used to be code here to try and estimate how an insn
1196 affected register lifetimes, but it did it by looking at REG_DEAD
1197 notes, which we removed in schedule_region. Nor did it try to
1198 take into account register pressure or anything useful like that.
1200 Revisit when we have a machine model to work with and not before. */
1202 if (targetm.sched.adjust_priority)
1203 INSN_PRIORITY (prev) =
1204 targetm.sched.adjust_priority (prev, INSN_PRIORITY (prev));
1207 /* Advance time on one cycle. */
1208 HAIFA_INLINE static void
1209 advance_one_cycle (void)
1211 if (targetm.sched.use_dfa_pipeline_interface
1212 && targetm.sched.use_dfa_pipeline_interface ())
1214 if (targetm.sched.dfa_pre_cycle_insn)
1215 state_transition (curr_state,
1216 targetm.sched.dfa_pre_cycle_insn ());
1218 state_transition (curr_state, NULL);
1220 if (targetm.sched.dfa_post_cycle_insn)
1221 state_transition (curr_state,
1222 targetm.sched.dfa_post_cycle_insn ());
1226 /* Clock at which the previous instruction was issued. */
1227 static int last_clock_var;
1229 /* INSN is the "currently executing insn". Launch each insn which was
1230 waiting on INSN. READY is the ready list which contains the insns
1231 that are ready to fire. CLOCK is the current cycle. The function
1232 returns necessary cycle advance after issuing the insn (it is not
1233 zero for insns in a schedule group). */
1235 static int
1236 schedule_insn (rtx insn, struct ready_list *ready, int clock)
1238 rtx link;
1239 int advance = 0;
1240 int unit = 0;
1241 int premature_issue = 0;
1243 if (!targetm.sched.use_dfa_pipeline_interface
1244 || !targetm.sched.use_dfa_pipeline_interface ())
1245 unit = insn_unit (insn);
1247 if (targetm.sched.use_dfa_pipeline_interface
1248 && targetm.sched.use_dfa_pipeline_interface ()
1249 && sched_verbose >= 1)
1251 char buf[2048];
1253 print_insn (buf, insn, 0);
1254 buf[40] = 0;
1255 fprintf (sched_dump, ";;\t%3i--> %-40s:", clock, buf);
1257 if (recog_memoized (insn) < 0)
1258 fprintf (sched_dump, "nothing");
1259 else
1260 print_reservation (sched_dump, insn);
1261 fputc ('\n', sched_dump);
1263 else if (sched_verbose >= 2)
1265 fprintf (sched_dump, ";;\t\t--> scheduling insn <<<%d>>> on unit ",
1266 INSN_UID (insn));
1267 insn_print_units (insn);
1268 fputc ('\n', sched_dump);
1271 if (!targetm.sched.use_dfa_pipeline_interface
1272 || !targetm.sched.use_dfa_pipeline_interface ())
1274 if (sched_verbose && unit == -1)
1275 visualize_no_unit (insn);
1278 if (MAX_BLOCKAGE > 1 || issue_rate > 1 || sched_verbose)
1279 schedule_unit (unit, insn, clock);
1281 if (INSN_DEPEND (insn) == 0)
1282 return 0;
1285 if (INSN_TICK (insn) > clock)
1287 /* 'insn' has been prematurely moved from the queue to the
1288 ready list. */
1289 premature_issue = INSN_TICK (insn) - clock;
1292 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
1294 rtx next = XEXP (link, 0);
1295 int cost = insn_cost (insn, link, next);
1297 INSN_TICK (next) = MAX (INSN_TICK (next), clock + cost + premature_issue);
1299 if ((INSN_DEP_COUNT (next) -= 1) == 0)
1301 int effective_cost = INSN_TICK (next) - clock;
1303 if (! (*current_sched_info->new_ready) (next))
1304 continue;
1306 if (sched_verbose >= 2)
1308 fprintf (sched_dump, ";;\t\tdependences resolved: insn %s ",
1309 (*current_sched_info->print_insn) (next, 0));
1311 if (effective_cost < 1)
1312 fprintf (sched_dump, "into ready\n");
1313 else
1314 fprintf (sched_dump, "into queue with cost=%d\n",
1315 effective_cost);
1318 /* Adjust the priority of NEXT and either put it on the ready
1319 list or queue it. */
1320 adjust_priority (next);
1321 if (effective_cost < 1)
1322 ready_add (ready, next);
1323 else
1325 queue_insn (next, effective_cost);
1327 if (SCHED_GROUP_P (next) && advance < effective_cost)
1328 advance = effective_cost;
1333 /* Annotate the instruction with issue information -- TImode
1334 indicates that the instruction is expected not to be able
1335 to issue on the same cycle as the previous insn. A machine
1336 may use this information to decide how the instruction should
1337 be aligned. */
1338 if (issue_rate > 1
1339 && GET_CODE (PATTERN (insn)) != USE
1340 && GET_CODE (PATTERN (insn)) != CLOBBER)
1342 if (reload_completed)
1343 PUT_MODE (insn, clock > last_clock_var ? TImode : VOIDmode);
1344 last_clock_var = clock;
1346 return advance;
1349 /* Functions for handling of notes. */
1351 /* Delete notes beginning with INSN and put them in the chain
1352 of notes ended by NOTE_LIST.
1353 Returns the insn following the notes. */
1355 static rtx
1356 unlink_other_notes (rtx insn, rtx tail)
1358 rtx prev = PREV_INSN (insn);
1360 while (insn != tail && NOTE_P (insn))
1362 rtx next = NEXT_INSN (insn);
1363 /* Delete the note from its current position. */
1364 if (prev)
1365 NEXT_INSN (prev) = next;
1366 if (next)
1367 PREV_INSN (next) = prev;
1369 /* See sched_analyze to see how these are handled. */
1370 if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG
1371 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END
1372 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
1373 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
1374 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
1376 /* Insert the note at the end of the notes list. */
1377 PREV_INSN (insn) = note_list;
1378 if (note_list)
1379 NEXT_INSN (note_list) = insn;
1380 note_list = insn;
1383 insn = next;
1385 return insn;
1388 /* Delete line notes beginning with INSN. Record line-number notes so
1389 they can be reused. Returns the insn following the notes. */
1391 static rtx
1392 unlink_line_notes (rtx insn, rtx tail)
1394 rtx prev = PREV_INSN (insn);
1396 while (insn != tail && NOTE_P (insn))
1398 rtx next = NEXT_INSN (insn);
1400 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
1402 /* Delete the note from its current position. */
1403 if (prev)
1404 NEXT_INSN (prev) = next;
1405 if (next)
1406 PREV_INSN (next) = prev;
1408 /* Record line-number notes so they can be reused. */
1409 LINE_NOTE (insn) = insn;
1411 else
1412 prev = insn;
1414 insn = next;
1416 return insn;
1419 /* Return the head and tail pointers of BB. */
1421 void
1422 get_block_head_tail (int b, rtx *headp, rtx *tailp)
1424 /* HEAD and TAIL delimit the basic block being scheduled. */
1425 rtx head = BB_HEAD (BASIC_BLOCK (b));
1426 rtx tail = BB_END (BASIC_BLOCK (b));
1428 /* Don't include any notes or labels at the beginning of the
1429 basic block, or notes at the ends of basic blocks. */
1430 while (head != tail)
1432 if (NOTE_P (head))
1433 head = NEXT_INSN (head);
1434 else if (NOTE_P (tail))
1435 tail = PREV_INSN (tail);
1436 else if (LABEL_P (head))
1437 head = NEXT_INSN (head);
1438 else
1439 break;
1442 *headp = head;
1443 *tailp = tail;
1446 /* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
1449 no_real_insns_p (rtx head, rtx tail)
1451 while (head != NEXT_INSN (tail))
1453 if (!NOTE_P (head) && !LABEL_P (head))
1454 return 0;
1455 head = NEXT_INSN (head);
1457 return 1;
1460 /* Delete line notes from one block. Save them so they can be later restored
1461 (in restore_line_notes). HEAD and TAIL are the boundaries of the
1462 block in which notes should be processed. */
1464 void
1465 rm_line_notes (rtx head, rtx tail)
1467 rtx next_tail;
1468 rtx insn;
1470 next_tail = NEXT_INSN (tail);
1471 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1473 rtx prev;
1475 /* Farm out notes, and maybe save them in NOTE_LIST.
1476 This is needed to keep the debugger from
1477 getting completely deranged. */
1478 if (NOTE_P (insn))
1480 prev = insn;
1481 insn = unlink_line_notes (insn, next_tail);
1483 if (prev == tail)
1484 abort ();
1485 if (prev == head)
1486 abort ();
1487 if (insn == next_tail)
1488 abort ();
1493 /* Save line number notes for each insn in block B. HEAD and TAIL are
1494 the boundaries of the block in which notes should be processed. */
1496 void
1497 save_line_notes (int b, rtx head, rtx tail)
1499 rtx next_tail;
1501 /* We must use the true line number for the first insn in the block
1502 that was computed and saved at the start of this pass. We can't
1503 use the current line number, because scheduling of the previous
1504 block may have changed the current line number. */
1506 rtx line = line_note_head[b];
1507 rtx insn;
1509 next_tail = NEXT_INSN (tail);
1511 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1512 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1513 line = insn;
1514 else
1515 LINE_NOTE (insn) = line;
1518 /* After a block was scheduled, insert line notes into the insns list.
1519 HEAD and TAIL are the boundaries of the block in which notes should
1520 be processed. */
1522 void
1523 restore_line_notes (rtx head, rtx tail)
1525 rtx line, note, prev, new;
1526 int added_notes = 0;
1527 rtx next_tail, insn;
1529 head = head;
1530 next_tail = NEXT_INSN (tail);
1532 /* Determine the current line-number. We want to know the current
1533 line number of the first insn of the block here, in case it is
1534 different from the true line number that was saved earlier. If
1535 different, then we need a line number note before the first insn
1536 of this block. If it happens to be the same, then we don't want to
1537 emit another line number note here. */
1538 for (line = head; line; line = PREV_INSN (line))
1539 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
1540 break;
1542 /* Walk the insns keeping track of the current line-number and inserting
1543 the line-number notes as needed. */
1544 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1545 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1546 line = insn;
1547 /* This used to emit line number notes before every non-deleted note.
1548 However, this confuses a debugger, because line notes not separated
1549 by real instructions all end up at the same address. I can find no
1550 use for line number notes before other notes, so none are emitted. */
1551 else if (!NOTE_P (insn)
1552 && INSN_UID (insn) < old_max_uid
1553 && (note = LINE_NOTE (insn)) != 0
1554 && note != line
1555 && (line == 0
1556 #ifdef USE_MAPPED_LOCATION
1557 || NOTE_SOURCE_LOCATION (note) != NOTE_SOURCE_LOCATION (line)
1558 #else
1559 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
1560 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)
1561 #endif
1564 line = note;
1565 prev = PREV_INSN (insn);
1566 if (LINE_NOTE (note))
1568 /* Re-use the original line-number note. */
1569 LINE_NOTE (note) = 0;
1570 PREV_INSN (note) = prev;
1571 NEXT_INSN (prev) = note;
1572 PREV_INSN (insn) = note;
1573 NEXT_INSN (note) = insn;
1575 else
1577 added_notes++;
1578 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
1579 #ifndef USE_MAPPED_LOCATION
1580 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
1581 #endif
1584 if (sched_verbose && added_notes)
1585 fprintf (sched_dump, ";; added %d line-number notes\n", added_notes);
1588 /* After scheduling the function, delete redundant line notes from the
1589 insns list. */
1591 void
1592 rm_redundant_line_notes (void)
1594 rtx line = 0;
1595 rtx insn = get_insns ();
1596 int active_insn = 0;
1597 int notes = 0;
1599 /* Walk the insns deleting redundant line-number notes. Many of these
1600 are already present. The remainder tend to occur at basic
1601 block boundaries. */
1602 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1603 if (NOTE_P (insn) && NOTE_LINE_NUMBER (insn) > 0)
1605 /* If there are no active insns following, INSN is redundant. */
1606 if (active_insn == 0)
1608 notes++;
1609 SET_INSN_DELETED (insn);
1611 /* If the line number is unchanged, LINE is redundant. */
1612 else if (line
1613 #ifdef USE_MAPPED_LOCATION
1614 && NOTE_SOURCE_LOCATION (line) == NOTE_SOURCE_LOCATION (insn)
1615 #else
1616 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
1617 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn)
1618 #endif
1621 notes++;
1622 SET_INSN_DELETED (line);
1623 line = insn;
1625 else
1626 line = insn;
1627 active_insn = 0;
1629 else if (!((NOTE_P (insn)
1630 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
1631 || (NONJUMP_INSN_P (insn)
1632 && (GET_CODE (PATTERN (insn)) == USE
1633 || GET_CODE (PATTERN (insn)) == CLOBBER))))
1634 active_insn++;
1636 if (sched_verbose && notes)
1637 fprintf (sched_dump, ";; deleted %d line-number notes\n", notes);
1640 /* Delete notes between HEAD and TAIL and put them in the chain
1641 of notes ended by NOTE_LIST. */
1643 void
1644 rm_other_notes (rtx head, rtx tail)
1646 rtx next_tail;
1647 rtx insn;
1649 note_list = 0;
1650 if (head == tail && (! INSN_P (head)))
1651 return;
1653 next_tail = NEXT_INSN (tail);
1654 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1656 rtx prev;
1658 /* Farm out notes, and maybe save them in NOTE_LIST.
1659 This is needed to keep the debugger from
1660 getting completely deranged. */
1661 if (NOTE_P (insn))
1663 prev = insn;
1665 insn = unlink_other_notes (insn, next_tail);
1667 if (prev == tail)
1668 abort ();
1669 if (prev == head)
1670 abort ();
1671 if (insn == next_tail)
1672 abort ();
1677 /* Functions for computation of registers live/usage info. */
1679 /* This function looks for a new register being defined.
1680 If the destination register is already used by the source,
1681 a new register is not needed. */
1683 static int
1684 find_set_reg_weight (rtx x)
1686 if (GET_CODE (x) == CLOBBER
1687 && register_operand (SET_DEST (x), VOIDmode))
1688 return 1;
1689 if (GET_CODE (x) == SET
1690 && register_operand (SET_DEST (x), VOIDmode))
1692 if (REG_P (SET_DEST (x)))
1694 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
1695 return 1;
1696 else
1697 return 0;
1699 return 1;
1701 return 0;
1704 /* Calculate INSN_REG_WEIGHT for all insns of a block. */
1706 static void
1707 find_insn_reg_weight (int b)
1709 rtx insn, next_tail, head, tail;
1711 get_block_head_tail (b, &head, &tail);
1712 next_tail = NEXT_INSN (tail);
1714 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
1716 int reg_weight = 0;
1717 rtx x;
1719 /* Handle register life information. */
1720 if (! INSN_P (insn))
1721 continue;
1723 /* Increment weight for each register born here. */
1724 x = PATTERN (insn);
1725 reg_weight += find_set_reg_weight (x);
1726 if (GET_CODE (x) == PARALLEL)
1728 int j;
1729 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
1731 x = XVECEXP (PATTERN (insn), 0, j);
1732 reg_weight += find_set_reg_weight (x);
1735 /* Decrement weight for each register that dies here. */
1736 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
1738 if (REG_NOTE_KIND (x) == REG_DEAD
1739 || REG_NOTE_KIND (x) == REG_UNUSED)
1740 reg_weight--;
1743 INSN_REG_WEIGHT (insn) = reg_weight;
1747 /* Scheduling clock, modified in schedule_block() and queue_to_ready (). */
1748 static int clock_var;
1750 /* Move insns that became ready to fire from queue to ready list. */
1752 static void
1753 queue_to_ready (struct ready_list *ready)
1755 rtx insn;
1756 rtx link;
1758 q_ptr = NEXT_Q (q_ptr);
1760 /* Add all pending insns that can be scheduled without stalls to the
1761 ready list. */
1762 for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
1764 insn = XEXP (link, 0);
1765 q_size -= 1;
1767 if (sched_verbose >= 2)
1768 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1769 (*current_sched_info->print_insn) (insn, 0));
1771 ready_add (ready, insn);
1772 if (sched_verbose >= 2)
1773 fprintf (sched_dump, "moving to ready without stalls\n");
1775 insn_queue[q_ptr] = 0;
1777 /* If there are no ready insns, stall until one is ready and add all
1778 of the pending insns at that point to the ready list. */
1779 if (ready->n_ready == 0)
1781 int stalls;
1783 for (stalls = 1; stalls <= MAX_INSN_QUEUE_INDEX; stalls++)
1785 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1787 for (; link; link = XEXP (link, 1))
1789 insn = XEXP (link, 0);
1790 q_size -= 1;
1792 if (sched_verbose >= 2)
1793 fprintf (sched_dump, ";;\t\tQ-->Ready: insn %s: ",
1794 (*current_sched_info->print_insn) (insn, 0));
1796 ready_add (ready, insn);
1797 if (sched_verbose >= 2)
1798 fprintf (sched_dump, "moving to ready with %d stalls\n", stalls);
1800 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
1802 advance_one_cycle ();
1804 break;
1807 advance_one_cycle ();
1810 if ((!targetm.sched.use_dfa_pipeline_interface
1811 || !targetm.sched.use_dfa_pipeline_interface ())
1812 && sched_verbose && stalls)
1813 visualize_stall_cycles (stalls);
1815 q_ptr = NEXT_Q_AFTER (q_ptr, stalls);
1816 clock_var += stalls;
1820 /* Used by early_queue_to_ready. Determines whether it is "ok" to
1821 prematurely move INSN from the queue to the ready list. Currently,
1822 if a target defines the hook 'is_costly_dependence', this function
1823 uses the hook to check whether there exist any dependences which are
1824 considered costly by the target, between INSN and other insns that
1825 have already been scheduled. Dependences are checked up to Y cycles
1826 back, with default Y=1; The flag -fsched-stalled-insns-dep=Y allows
1827 controlling this value.
1828 (Other considerations could be taken into account instead (or in
1829 addition) depending on user flags and target hooks. */
1831 static bool
1832 ok_for_early_queue_removal (rtx insn)
1834 int n_cycles;
1835 rtx prev_insn = last_scheduled_insn;
1837 if (targetm.sched.is_costly_dependence)
1839 for (n_cycles = flag_sched_stalled_insns_dep; n_cycles; n_cycles--)
1841 for ( ; prev_insn; prev_insn = PREV_INSN (prev_insn))
1843 rtx dep_link = 0;
1844 int dep_cost;
1846 if (!NOTE_P (prev_insn))
1848 dep_link = find_insn_list (insn, INSN_DEPEND (prev_insn));
1849 if (dep_link)
1851 dep_cost = insn_cost (prev_insn, dep_link, insn) ;
1852 if (targetm.sched.is_costly_dependence (prev_insn, insn,
1853 dep_link, dep_cost,
1854 flag_sched_stalled_insns_dep - n_cycles))
1855 return false;
1859 if (GET_MODE (prev_insn) == TImode) /* end of dispatch group */
1860 break;
1863 if (!prev_insn)
1864 break;
1865 prev_insn = PREV_INSN (prev_insn);
1869 return true;
1873 /* Remove insns from the queue, before they become "ready" with respect
1874 to FU latency considerations. */
1876 static int
1877 early_queue_to_ready (state_t state, struct ready_list *ready)
1879 rtx insn;
1880 rtx link;
1881 rtx next_link;
1882 rtx prev_link;
1883 bool move_to_ready;
1884 int cost;
1885 state_t temp_state = alloca (dfa_state_size);
1886 int stalls;
1887 int insns_removed = 0;
1890 Flag '-fsched-stalled-insns=X' determines the aggressiveness of this
1891 function:
1893 X == 0: There is no limit on how many queued insns can be removed
1894 prematurely. (flag_sched_stalled_insns = -1).
1896 X >= 1: Only X queued insns can be removed prematurely in each
1897 invocation. (flag_sched_stalled_insns = X).
1899 Otherwise: Early queue removal is disabled.
1900 (flag_sched_stalled_insns = 0)
1903 if (! flag_sched_stalled_insns)
1904 return 0;
1906 for (stalls = 0; stalls <= MAX_INSN_QUEUE_INDEX; stalls++)
1908 if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
1910 if (sched_verbose > 6)
1911 fprintf (sched_dump, ";; look at index %d + %d\n", q_ptr, stalls);
1913 prev_link = 0;
1914 while (link)
1916 next_link = XEXP (link, 1);
1917 insn = XEXP (link, 0);
1918 if (insn && sched_verbose > 6)
1919 print_rtl_single (sched_dump, insn);
1921 memcpy (temp_state, state, dfa_state_size);
1922 if (recog_memoized (insn) < 0)
1923 /* non-negative to indicate that it's not ready
1924 to avoid infinite Q->R->Q->R... */
1925 cost = 0;
1926 else
1927 cost = state_transition (temp_state, insn);
1929 if (sched_verbose >= 6)
1930 fprintf (sched_dump, "transition cost = %d\n", cost);
1932 move_to_ready = false;
1933 if (cost < 0)
1935 move_to_ready = ok_for_early_queue_removal (insn);
1936 if (move_to_ready == true)
1938 /* move from Q to R */
1939 q_size -= 1;
1940 ready_add (ready, insn);
1942 if (prev_link)
1943 XEXP (prev_link, 1) = next_link;
1944 else
1945 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = next_link;
1947 free_INSN_LIST_node (link);
1949 if (sched_verbose >= 2)
1950 fprintf (sched_dump, ";;\t\tEarly Q-->Ready: insn %s\n",
1951 (*current_sched_info->print_insn) (insn, 0));
1953 insns_removed++;
1954 if (insns_removed == flag_sched_stalled_insns)
1955 /* Remove only one insn from Q at a time. */
1956 return insns_removed;
1960 if (move_to_ready == false)
1961 prev_link = link;
1963 link = next_link;
1964 } /* while link */
1965 } /* if link */
1967 } /* for stalls.. */
1969 return insns_removed;
1973 /* Print the ready list for debugging purposes. Callable from debugger. */
1975 static void
1976 debug_ready_list (struct ready_list *ready)
1978 rtx *p;
1979 int i;
1981 if (ready->n_ready == 0)
1983 fprintf (sched_dump, "\n");
1984 return;
1987 p = ready_lastpos (ready);
1988 for (i = 0; i < ready->n_ready; i++)
1989 fprintf (sched_dump, " %s", (*current_sched_info->print_insn) (p[i], 0));
1990 fprintf (sched_dump, "\n");
1993 /* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */
1995 static rtx
1996 move_insn1 (rtx insn, rtx last)
1998 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1999 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2001 NEXT_INSN (insn) = NEXT_INSN (last);
2002 PREV_INSN (NEXT_INSN (last)) = insn;
2004 NEXT_INSN (last) = insn;
2005 PREV_INSN (insn) = last;
2007 return insn;
2010 /* Search INSN for REG_SAVE_NOTE note pairs for
2011 NOTE_INSN_{LOOP,EHREGION}_{BEG,END}; and convert them back into
2012 NOTEs. The REG_SAVE_NOTE note following first one is contains the
2013 saved value for NOTE_BLOCK_NUMBER which is useful for
2014 NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
2015 output by the instruction scheduler. Return the new value of LAST. */
2017 static rtx
2018 reemit_notes (rtx insn, rtx last)
2020 rtx note, retval;
2022 retval = last;
2023 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2025 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2027 enum insn_note note_type = INTVAL (XEXP (note, 0));
2029 last = emit_note_before (note_type, last);
2030 remove_note (insn, note);
2031 note = XEXP (note, 1);
2032 if (note_type == NOTE_INSN_EH_REGION_BEG
2033 || note_type == NOTE_INSN_EH_REGION_END)
2034 NOTE_EH_HANDLER (last) = INTVAL (XEXP (note, 0));
2035 remove_note (insn, note);
2038 return retval;
2041 /* Move INSN. Reemit notes if needed.
2043 Return the last insn emitted by the scheduler, which is the
2044 return value from the first call to reemit_notes. */
2046 static rtx
2047 move_insn (rtx insn, rtx last)
2049 rtx retval = NULL;
2051 move_insn1 (insn, last);
2053 /* If this is the first call to reemit_notes, then record
2054 its return value. */
2055 if (retval == NULL_RTX)
2056 retval = reemit_notes (insn, insn);
2057 else
2058 reemit_notes (insn, insn);
2060 SCHED_GROUP_P (insn) = 0;
2062 return retval;
2065 /* The following structure describe an entry of the stack of choices. */
2066 struct choice_entry
2068 /* Ordinal number of the issued insn in the ready queue. */
2069 int index;
2070 /* The number of the rest insns whose issues we should try. */
2071 int rest;
2072 /* The number of issued essential insns. */
2073 int n;
2074 /* State after issuing the insn. */
2075 state_t state;
2078 /* The following array is used to implement a stack of choices used in
2079 function max_issue. */
2080 static struct choice_entry *choice_stack;
2082 /* The following variable value is number of essential insns issued on
2083 the current cycle. An insn is essential one if it changes the
2084 processors state. */
2085 static int cycle_issued_insns;
2087 /* The following variable value is maximal number of tries of issuing
2088 insns for the first cycle multipass insn scheduling. We define
2089 this value as constant*(DFA_LOOKAHEAD**ISSUE_RATE). We would not
2090 need this constraint if all real insns (with non-negative codes)
2091 had reservations because in this case the algorithm complexity is
2092 O(DFA_LOOKAHEAD**ISSUE_RATE). Unfortunately, the dfa descriptions
2093 might be incomplete and such insn might occur. For such
2094 descriptions, the complexity of algorithm (without the constraint)
2095 could achieve DFA_LOOKAHEAD ** N , where N is the queue length. */
2096 static int max_lookahead_tries;
2098 /* The following value is value of hook
2099 `first_cycle_multipass_dfa_lookahead' at the last call of
2100 `max_issue'. */
2101 static int cached_first_cycle_multipass_dfa_lookahead = 0;
2103 /* The following value is value of `issue_rate' at the last call of
2104 `sched_init'. */
2105 static int cached_issue_rate = 0;
2107 /* The following function returns maximal (or close to maximal) number
2108 of insns which can be issued on the same cycle and one of which
2109 insns is insns with the best rank (the first insn in READY). To
2110 make this function tries different samples of ready insns. READY
2111 is current queue `ready'. Global array READY_TRY reflects what
2112 insns are already issued in this try. INDEX will contain index
2113 of the best insn in READY. The following function is used only for
2114 first cycle multipass scheduling. */
2115 static int
2116 max_issue (struct ready_list *ready, int *index)
2118 int n, i, all, n_ready, best, delay, tries_num;
2119 struct choice_entry *top;
2120 rtx insn;
2122 best = 0;
2123 memcpy (choice_stack->state, curr_state, dfa_state_size);
2124 top = choice_stack;
2125 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2126 top->n = 0;
2127 n_ready = ready->n_ready;
2128 for (all = i = 0; i < n_ready; i++)
2129 if (!ready_try [i])
2130 all++;
2131 i = 0;
2132 tries_num = 0;
2133 for (;;)
2135 if (top->rest == 0 || i >= n_ready)
2137 if (top == choice_stack)
2138 break;
2139 if (best < top - choice_stack && ready_try [0])
2141 best = top - choice_stack;
2142 *index = choice_stack [1].index;
2143 if (top->n == issue_rate - cycle_issued_insns || best == all)
2144 break;
2146 i = top->index;
2147 ready_try [i] = 0;
2148 top--;
2149 memcpy (curr_state, top->state, dfa_state_size);
2151 else if (!ready_try [i])
2153 tries_num++;
2154 if (tries_num > max_lookahead_tries)
2155 break;
2156 insn = ready_element (ready, i);
2157 delay = state_transition (curr_state, insn);
2158 if (delay < 0)
2160 if (state_dead_lock_p (curr_state))
2161 top->rest = 0;
2162 else
2163 top->rest--;
2164 n = top->n;
2165 if (memcmp (top->state, curr_state, dfa_state_size) != 0)
2166 n++;
2167 top++;
2168 top->rest = cached_first_cycle_multipass_dfa_lookahead;
2169 top->index = i;
2170 top->n = n;
2171 memcpy (top->state, curr_state, dfa_state_size);
2172 ready_try [i] = 1;
2173 i = -1;
2176 i++;
2178 while (top != choice_stack)
2180 ready_try [top->index] = 0;
2181 top--;
2183 memcpy (curr_state, choice_stack->state, dfa_state_size);
2184 return best;
2187 /* The following function chooses insn from READY and modifies
2188 *N_READY and READY. The following function is used only for first
2189 cycle multipass scheduling. */
2191 static rtx
2192 choose_ready (struct ready_list *ready)
2194 int lookahead = 0;
2196 if (targetm.sched.first_cycle_multipass_dfa_lookahead)
2197 lookahead = targetm.sched.first_cycle_multipass_dfa_lookahead ();
2198 if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0)))
2199 return ready_remove_first (ready);
2200 else
2202 /* Try to choose the better insn. */
2203 int index = 0, i;
2204 rtx insn;
2206 if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
2208 cached_first_cycle_multipass_dfa_lookahead = lookahead;
2209 max_lookahead_tries = 100;
2210 for (i = 0; i < issue_rate; i++)
2211 max_lookahead_tries *= lookahead;
2213 insn = ready_element (ready, 0);
2214 if (INSN_CODE (insn) < 0)
2215 return ready_remove_first (ready);
2216 for (i = 1; i < ready->n_ready; i++)
2218 insn = ready_element (ready, i);
2219 ready_try [i]
2220 = (INSN_CODE (insn) < 0
2221 || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
2222 && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn)));
2224 if (max_issue (ready, &index) == 0)
2225 return ready_remove_first (ready);
2226 else
2227 return ready_remove (ready, index);
2231 /* Use forward list scheduling to rearrange insns of block B in region RGN,
2232 possibly bringing insns from subsequent blocks in the same region. */
2234 void
2235 schedule_block (int b, int rgn_n_insns)
2237 struct ready_list ready;
2238 int i, first_cycle_insn_p;
2239 int can_issue_more;
2240 state_t temp_state = NULL; /* It is used for multipass scheduling. */
2241 int sort_p, advance, start_clock_var;
2243 /* Head/tail info for this block. */
2244 rtx prev_head = current_sched_info->prev_head;
2245 rtx next_tail = current_sched_info->next_tail;
2246 rtx head = NEXT_INSN (prev_head);
2247 rtx tail = PREV_INSN (next_tail);
2249 /* We used to have code to avoid getting parameters moved from hard
2250 argument registers into pseudos.
2252 However, it was removed when it proved to be of marginal benefit
2253 and caused problems because schedule_block and compute_forward_dependences
2254 had different notions of what the "head" insn was. */
2256 if (head == tail && (! INSN_P (head)))
2257 abort ();
2259 /* Debug info. */
2260 if (sched_verbose)
2262 fprintf (sched_dump, ";; ======================================================\n");
2263 fprintf (sched_dump,
2264 ";; -- basic block %d from %d to %d -- %s reload\n",
2265 b, INSN_UID (head), INSN_UID (tail),
2266 (reload_completed ? "after" : "before"));
2267 fprintf (sched_dump, ";; ======================================================\n");
2268 fprintf (sched_dump, "\n");
2270 visualize_alloc ();
2271 init_block_visualization ();
2274 if (targetm.sched.use_dfa_pipeline_interface
2275 && targetm.sched.use_dfa_pipeline_interface ())
2276 state_reset (curr_state);
2277 else
2278 clear_units ();
2280 /* Allocate the ready list. */
2281 ready.veclen = rgn_n_insns + 1 + issue_rate;
2282 ready.first = ready.veclen - 1;
2283 ready.vec = xmalloc (ready.veclen * sizeof (rtx));
2284 ready.n_ready = 0;
2286 if (targetm.sched.use_dfa_pipeline_interface
2287 && targetm.sched.use_dfa_pipeline_interface ())
2289 /* It is used for first cycle multipass scheduling. */
2290 temp_state = alloca (dfa_state_size);
2291 ready_try = xcalloc ((rgn_n_insns + 1), sizeof (char));
2292 choice_stack = xmalloc ((rgn_n_insns + 1)
2293 * sizeof (struct choice_entry));
2294 for (i = 0; i <= rgn_n_insns; i++)
2295 choice_stack[i].state = xmalloc (dfa_state_size);
2298 (*current_sched_info->init_ready_list) (&ready);
2300 if (targetm.sched.md_init)
2301 targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
2303 /* We start inserting insns after PREV_HEAD. */
2304 last_scheduled_insn = prev_head;
2306 /* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
2307 queue. */
2308 q_ptr = 0;
2309 q_size = 0;
2311 if (!targetm.sched.use_dfa_pipeline_interface
2312 || !targetm.sched.use_dfa_pipeline_interface ())
2313 max_insn_queue_index_macro_value = INSN_QUEUE_SIZE - 1;
2314 else
2315 max_insn_queue_index_macro_value = max_insn_queue_index;
2317 insn_queue = alloca ((MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
2318 memset (insn_queue, 0, (MAX_INSN_QUEUE_INDEX + 1) * sizeof (rtx));
2319 last_clock_var = -1;
2321 /* Start just before the beginning of time. */
2322 clock_var = -1;
2323 advance = 0;
2325 sort_p = TRUE;
2326 /* Loop until all the insns in BB are scheduled. */
2327 while ((*current_sched_info->schedule_more_p) ())
2331 start_clock_var = clock_var;
2333 clock_var++;
2335 advance_one_cycle ();
2337 /* Add to the ready list all pending insns that can be issued now.
2338 If there are no ready insns, increment clock until one
2339 is ready and add all pending insns at that point to the ready
2340 list. */
2341 queue_to_ready (&ready);
2343 if (ready.n_ready == 0)
2344 abort ();
2346 if (sched_verbose >= 2)
2348 fprintf (sched_dump, ";;\t\tReady list after queue_to_ready: ");
2349 debug_ready_list (&ready);
2351 advance -= clock_var - start_clock_var;
2353 while (advance > 0);
2355 if (sort_p)
2357 /* Sort the ready list based on priority. */
2358 ready_sort (&ready);
2360 if (sched_verbose >= 2)
2362 fprintf (sched_dump, ";;\t\tReady list after ready_sort: ");
2363 debug_ready_list (&ready);
2367 /* Allow the target to reorder the list, typically for
2368 better instruction bundling. */
2369 if (sort_p && targetm.sched.reorder
2370 && (ready.n_ready == 0
2371 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2372 can_issue_more =
2373 targetm.sched.reorder (sched_dump, sched_verbose,
2374 ready_lastpos (&ready),
2375 &ready.n_ready, clock_var);
2376 else
2377 can_issue_more = issue_rate;
2379 first_cycle_insn_p = 1;
2380 cycle_issued_insns = 0;
2381 for (;;)
2383 rtx insn;
2384 int cost;
2385 bool asm_p = false;
2387 if (sched_verbose >= 2)
2389 fprintf (sched_dump, ";;\tReady list (t =%3d): ",
2390 clock_var);
2391 debug_ready_list (&ready);
2394 if (!targetm.sched.use_dfa_pipeline_interface
2395 || !targetm.sched.use_dfa_pipeline_interface ())
2397 if (ready.n_ready == 0 || !can_issue_more
2398 || !(*current_sched_info->schedule_more_p) ())
2399 break;
2400 insn = ready_remove_first (&ready);
2401 cost = actual_hazard (insn_unit (insn), insn, clock_var, 0);
2403 else
2405 if (ready.n_ready == 0
2406 && can_issue_more
2407 && reload_completed)
2409 /* Allow scheduling insns directly from the queue in case
2410 there's nothing better to do (ready list is empty) but
2411 there are still vacant dispatch slots in the current cycle. */
2412 if (sched_verbose >= 6)
2413 fprintf(sched_dump,";;\t\tSecond chance\n");
2414 memcpy (temp_state, curr_state, dfa_state_size);
2415 if (early_queue_to_ready (temp_state, &ready))
2416 ready_sort (&ready);
2419 if (ready.n_ready == 0 || !can_issue_more
2420 || state_dead_lock_p (curr_state)
2421 || !(*current_sched_info->schedule_more_p) ())
2422 break;
2424 /* Select and remove the insn from the ready list. */
2425 if (sort_p)
2426 insn = choose_ready (&ready);
2427 else
2428 insn = ready_remove_first (&ready);
2430 if (targetm.sched.dfa_new_cycle
2431 && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
2432 insn, last_clock_var,
2433 clock_var, &sort_p))
2435 ready_add (&ready, insn);
2436 break;
2439 sort_p = TRUE;
2440 memcpy (temp_state, curr_state, dfa_state_size);
2441 if (recog_memoized (insn) < 0)
2443 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
2444 || asm_noperands (PATTERN (insn)) >= 0);
2445 if (!first_cycle_insn_p && asm_p)
2446 /* This is asm insn which is tryed to be issued on the
2447 cycle not first. Issue it on the next cycle. */
2448 cost = 1;
2449 else
2450 /* A USE insn, or something else we don't need to
2451 understand. We can't pass these directly to
2452 state_transition because it will trigger a
2453 fatal error for unrecognizable insns. */
2454 cost = 0;
2456 else
2458 cost = state_transition (temp_state, insn);
2459 if (cost < 0)
2460 cost = 0;
2461 else if (cost == 0)
2462 cost = 1;
2467 if (cost >= 1)
2469 queue_insn (insn, cost);
2470 continue;
2473 if (! (*current_sched_info->can_schedule_ready_p) (insn))
2474 goto next;
2476 last_scheduled_insn = move_insn (insn, last_scheduled_insn);
2478 if (targetm.sched.use_dfa_pipeline_interface
2479 && targetm.sched.use_dfa_pipeline_interface ())
2481 if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
2482 cycle_issued_insns++;
2483 memcpy (curr_state, temp_state, dfa_state_size);
2486 if (targetm.sched.variable_issue)
2487 can_issue_more =
2488 targetm.sched.variable_issue (sched_dump, sched_verbose,
2489 insn, can_issue_more);
2490 /* A naked CLOBBER or USE generates no instruction, so do
2491 not count them against the issue rate. */
2492 else if (GET_CODE (PATTERN (insn)) != USE
2493 && GET_CODE (PATTERN (insn)) != CLOBBER)
2494 can_issue_more--;
2496 advance = schedule_insn (insn, &ready, clock_var);
2498 /* After issuing an asm insn we should start a new cycle. */
2499 if (advance == 0 && asm_p)
2500 advance = 1;
2501 if (advance != 0)
2502 break;
2504 next:
2505 first_cycle_insn_p = 0;
2507 /* Sort the ready list based on priority. This must be
2508 redone here, as schedule_insn may have readied additional
2509 insns that will not be sorted correctly. */
2510 if (ready.n_ready > 0)
2511 ready_sort (&ready);
2513 if (targetm.sched.reorder2
2514 && (ready.n_ready == 0
2515 || !SCHED_GROUP_P (ready_element (&ready, 0))))
2517 can_issue_more =
2518 targetm.sched.reorder2 (sched_dump, sched_verbose,
2519 ready.n_ready
2520 ? ready_lastpos (&ready) : NULL,
2521 &ready.n_ready, clock_var);
2525 if ((!targetm.sched.use_dfa_pipeline_interface
2526 || !targetm.sched.use_dfa_pipeline_interface ())
2527 && sched_verbose)
2528 /* Debug info. */
2529 visualize_scheduled_insns (clock_var);
2532 if (targetm.sched.md_finish)
2533 targetm.sched.md_finish (sched_dump, sched_verbose);
2535 /* Debug info. */
2536 if (sched_verbose)
2538 fprintf (sched_dump, ";;\tReady list (final): ");
2539 debug_ready_list (&ready);
2540 if (!targetm.sched.use_dfa_pipeline_interface
2541 || !targetm.sched.use_dfa_pipeline_interface ())
2542 print_block_visualization ("");
2545 /* Sanity check -- queue must be empty now. Meaningless if region has
2546 multiple bbs. */
2547 if (current_sched_info->queue_must_finish_empty && q_size != 0)
2548 abort ();
2550 /* Update head/tail boundaries. */
2551 head = NEXT_INSN (prev_head);
2552 tail = last_scheduled_insn;
2554 if (!reload_completed)
2556 rtx insn, link, next;
2558 /* INSN_TICK (minimum clock tick at which the insn becomes
2559 ready) may be not correct for the insn in the subsequent
2560 blocks of the region. We should use a correct value of
2561 `clock_var' or modify INSN_TICK. It is better to keep
2562 clock_var value equal to 0 at the start of a basic block.
2563 Therefore we modify INSN_TICK here. */
2564 for (insn = head; insn != tail; insn = NEXT_INSN (insn))
2565 if (INSN_P (insn))
2567 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
2569 next = XEXP (link, 0);
2570 INSN_TICK (next) -= clock_var;
2575 /* Restore-other-notes: NOTE_LIST is the end of a chain of notes
2576 previously found among the insns. Insert them at the beginning
2577 of the insns. */
2578 if (note_list != 0)
2580 rtx note_head = note_list;
2582 while (PREV_INSN (note_head))
2584 note_head = PREV_INSN (note_head);
2587 PREV_INSN (note_head) = PREV_INSN (head);
2588 NEXT_INSN (PREV_INSN (head)) = note_head;
2589 PREV_INSN (head) = note_list;
2590 NEXT_INSN (note_list) = head;
2591 head = note_head;
2594 /* Debugging. */
2595 if (sched_verbose)
2597 fprintf (sched_dump, ";; total time = %d\n;; new head = %d\n",
2598 clock_var, INSN_UID (head));
2599 fprintf (sched_dump, ";; new tail = %d\n\n",
2600 INSN_UID (tail));
2601 visualize_free ();
2604 current_sched_info->head = head;
2605 current_sched_info->tail = tail;
2607 free (ready.vec);
2609 if (targetm.sched.use_dfa_pipeline_interface
2610 && targetm.sched.use_dfa_pipeline_interface ())
2612 free (ready_try);
2613 for (i = 0; i <= rgn_n_insns; i++)
2614 free (choice_stack [i].state);
2615 free (choice_stack);
2619 /* Set_priorities: compute priority of each insn in the block. */
2622 set_priorities (rtx head, rtx tail)
2624 rtx insn;
2625 int n_insn;
2626 int sched_max_insns_priority =
2627 current_sched_info->sched_max_insns_priority;
2628 rtx prev_head;
2630 prev_head = PREV_INSN (head);
2632 if (head == tail && (! INSN_P (head)))
2633 return 0;
2635 n_insn = 0;
2636 sched_max_insns_priority = 0;
2637 for (insn = tail; insn != prev_head; insn = PREV_INSN (insn))
2639 if (NOTE_P (insn))
2640 continue;
2642 n_insn++;
2643 (void) priority (insn);
2645 if (INSN_PRIORITY_KNOWN (insn))
2646 sched_max_insns_priority =
2647 MAX (sched_max_insns_priority, INSN_PRIORITY (insn));
2649 sched_max_insns_priority += 1;
2650 current_sched_info->sched_max_insns_priority =
2651 sched_max_insns_priority;
2653 return n_insn;
2656 /* Initialize some global state for the scheduler. DUMP_FILE is to be used
2657 for debugging output. */
2659 void
2660 sched_init (FILE *dump_file)
2662 int luid;
2663 basic_block b;
2664 rtx insn;
2665 int i;
2667 /* Disable speculative loads in their presence if cc0 defined. */
2668 #ifdef HAVE_cc0
2669 flag_schedule_speculative_load = 0;
2670 #endif
2672 /* Set dump and sched_verbose for the desired debugging output. If no
2673 dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
2674 For -fsched-verbose=N, N>=10, print everything to stderr. */
2675 sched_verbose = sched_verbose_param;
2676 if (sched_verbose_param == 0 && dump_file)
2677 sched_verbose = 1;
2678 sched_dump = ((sched_verbose_param >= 10 || !dump_file)
2679 ? stderr : dump_file);
2681 /* Initialize issue_rate. */
2682 if (targetm.sched.issue_rate)
2683 issue_rate = targetm.sched.issue_rate ();
2684 else
2685 issue_rate = 1;
2687 if (cached_issue_rate != issue_rate)
2689 cached_issue_rate = issue_rate;
2690 /* To invalidate max_lookahead_tries: */
2691 cached_first_cycle_multipass_dfa_lookahead = 0;
2694 /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
2695 pseudos which do not cross calls. */
2696 old_max_uid = get_max_uid () + 1;
2698 h_i_d = xcalloc (old_max_uid, sizeof (*h_i_d));
2700 for (i = 0; i < old_max_uid; i++)
2701 h_i_d [i].cost = -1;
2703 if (targetm.sched.use_dfa_pipeline_interface
2704 && targetm.sched.use_dfa_pipeline_interface ())
2706 if (targetm.sched.init_dfa_pre_cycle_insn)
2707 targetm.sched.init_dfa_pre_cycle_insn ();
2709 if (targetm.sched.init_dfa_post_cycle_insn)
2710 targetm.sched.init_dfa_post_cycle_insn ();
2712 dfa_start ();
2713 dfa_state_size = state_size ();
2714 curr_state = xmalloc (dfa_state_size);
2717 h_i_d[0].luid = 0;
2718 luid = 1;
2719 FOR_EACH_BB (b)
2720 for (insn = BB_HEAD (b); ; insn = NEXT_INSN (insn))
2722 INSN_LUID (insn) = luid;
2724 /* Increment the next luid, unless this is a note. We don't
2725 really need separate IDs for notes and we don't want to
2726 schedule differently depending on whether or not there are
2727 line-number notes, i.e., depending on whether or not we're
2728 generating debugging information. */
2729 if (!NOTE_P (insn))
2730 ++luid;
2732 if (insn == BB_END (b))
2733 break;
2736 init_dependency_caches (luid);
2738 init_alias_analysis ();
2740 if (write_symbols != NO_DEBUG)
2742 rtx line;
2744 line_note_head = xcalloc (last_basic_block, sizeof (rtx));
2746 /* Save-line-note-head:
2747 Determine the line-number at the start of each basic block.
2748 This must be computed and saved now, because after a basic block's
2749 predecessor has been scheduled, it is impossible to accurately
2750 determine the correct line number for the first insn of the block. */
2752 FOR_EACH_BB (b)
2754 for (line = BB_HEAD (b); line; line = PREV_INSN (line))
2755 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2757 line_note_head[b->index] = line;
2758 break;
2760 /* Do a forward search as well, since we won't get to see the first
2761 notes in a basic block. */
2762 for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
2764 if (INSN_P (line))
2765 break;
2766 if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
2767 line_note_head[b->index] = line;
2772 if ((!targetm.sched.use_dfa_pipeline_interface
2773 || !targetm.sched.use_dfa_pipeline_interface ())
2774 && sched_verbose)
2775 /* Find units used in this function, for visualization. */
2776 init_target_units ();
2778 /* ??? Add a NOTE after the last insn of the last basic block. It is not
2779 known why this is done. */
2781 insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
2782 if (NEXT_INSN (insn) == 0
2783 || (!NOTE_P (insn)
2784 && !LABEL_P (insn)
2785 /* Don't emit a NOTE if it would end up before a BARRIER. */
2786 && !BARRIER_P (NEXT_INSN (insn))))
2788 emit_note_after (NOTE_INSN_DELETED, BB_END (EXIT_BLOCK_PTR->prev_bb));
2789 /* Make insn to appear outside BB. */
2790 BB_END (EXIT_BLOCK_PTR->prev_bb) = PREV_INSN (BB_END (EXIT_BLOCK_PTR->prev_bb));
2793 /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
2794 removing death notes. */
2795 FOR_EACH_BB_REVERSE (b)
2796 find_insn_reg_weight (b->index);
2798 if (targetm.sched.md_init_global)
2799 targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
2802 /* Free global data used during insn scheduling. */
2804 void
2805 sched_finish (void)
2807 free (h_i_d);
2809 if (targetm.sched.use_dfa_pipeline_interface
2810 && targetm.sched.use_dfa_pipeline_interface ())
2812 free (curr_state);
2813 dfa_finish ();
2815 free_dependency_caches ();
2816 end_alias_analysis ();
2817 if (write_symbols != NO_DEBUG)
2818 free (line_note_head);
2820 if (targetm.sched.md_finish_global)
2821 targetm.sched.md_finish_global (sched_dump, sched_verbose);
2823 #endif /* INSN_SCHEDULING */