Do not report -Wnested-extern errors for __FUNCTION__/__PRETTY_FUNCTION__.
[official-gcc.git] / gcc / sched.c
blobc02d349192c56c9896a784a15afa4872096329db
1 /* Instruction scheduling pass.
2 Copyright (C) 1992 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com)
4 Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 /* Instruction scheduling pass.
24 This pass implements list scheduling within basic blocks. It is
25 run after flow analysis, but before register allocation. The
26 scheduler works as follows:
28 We compute insn priorities based on data dependencies. Flow
29 analysis only creates a fraction of the data-dependencies we must
30 observe: namely, only those dependencies which the combiner can be
31 expected to use. For this pass, we must therefore create the
32 remaining dependencies we need to observe: register dependencies,
33 memory dependencies, dependencies to keep function calls in order,
34 and the dependence between a conditional branch and the setting of
35 condition codes are all dealt with here.
37 The scheduler first traverses the data flow graph, starting with
38 the last instruction, and proceeding to the first, assigning
39 values to insn_priority as it goes. This sorts the instructions
40 topologically by data dependence.
42 Once priorities have been established, we order the insns using
43 list scheduling. This works as follows: starting with a list of
44 all the ready insns, and sorted according to priority number, we
45 schedule the insn from the end of the list by placing its
46 predecessors in the list according to their priority order. We
47 consider this insn scheduled by setting the pointer to the "end" of
48 the list to point to the previous insn. When an insn has no
49 predecessors, we either queue it until sufficient time has elapsed
50 or add it to the ready list. As the instructions are scheduled or
51 when stalls are introduced, the queue advances and dumps insns into
52 the ready list. When all insns down to the lowest priority have
53 been scheduled, the critical path of the basic block has been made
54 as short as possible. The remaining insns are then scheduled in
55 remaining slots.
57 Function unit conflicts are resolved during reverse list scheduling
58 by tracking the time when each insn is committed to the schedule
59 and from that, the time the function units it uses must be free.
60 As insns on the ready list are considered for scheduling, those
61 that would result in a blockage of the already committed insns are
62 queued until no blockage will result. Among the remaining insns on
63 the ready list to be considered, the first one with the largest
64 potential for causing a subsequent blockage is chosen.
66 The following list shows the order in which we want to break ties
67 among insns in the ready list:
69 1. choose insn with lowest conflict cost, ties broken by
70 2. choose insn with the longest path to end of bb, ties broken by
71 3. choose insn that kills the most registers, ties broken by
72 4. choose insn that conflicts with the most ready insns, or finally
73 5. choose insn with lowest UID.
75 Memory references complicate matters. Only if we can be certain
76 that memory references are not part of the data dependency graph
77 (via true, anti, or output dependence), can we move operations past
78 memory references. To first approximation, reads can be done
79 independently, while writes introduce dependencies. Better
80 approximations will yield fewer dependencies.
82 Dependencies set up by memory references are treated in exactly the
83 same way as other dependencies, by using LOG_LINKS.
85 Having optimized the critical path, we may have also unduly
86 extended the lifetimes of some registers. If an operation requires
87 that constants be loaded into registers, it is certainly desirable
88 to load those constants as early as necessary, but no earlier.
89 I.e., it will not do to load up a bunch of registers at the
90 beginning of a basic block only to use them at the end, if they
91 could be loaded later, since this may result in excessive register
92 utilization.
94 Note that since branches are never in basic blocks, but only end
95 basic blocks, this pass will not do any branch scheduling. But
96 that is ok, since we can use GNU's delayed branch scheduling
97 pass to take care of this case.
99 Also note that no further optimizations based on algebraic identities
100 are performed, so this pass would be a good one to perform instruction
101 splitting, such as breaking up a multiply instruction into shifts
102 and adds where that is profitable.
104 Given the memory aliasing analysis that this pass should perform,
105 it should be possible to remove redundant stores to memory, and to
106 load values from registers instead of hitting memory.
108 This pass must update information that subsequent passes expect to be
109 correct. Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
110 reg_n_calls_crossed, and reg_live_length. Also, basic_block_head,
111 basic_block_end.
113 The information in the line number notes is carefully retained by this
114 pass. All other NOTE insns are grouped in their same relative order at
115 the beginning of basic blocks that have been scheduled. */
117 #include <stdio.h>
118 #include "config.h"
119 #include "rtl.h"
120 #include "basic-block.h"
121 #include "regs.h"
122 #include "hard-reg-set.h"
123 #include "flags.h"
124 #include "insn-config.h"
125 #include "insn-attr.h"
127 #ifdef INSN_SCHEDULING
128 /* Arrays set up by scheduling for the same respective purposes as
129 similar-named arrays set up by flow analysis. We work with these
130 arrays during the scheduling pass so we can compare values against
131 unscheduled code.
133 Values of these arrays are copied at the end of this pass into the
134 arrays set up by flow analysis. */
135 static short *sched_reg_n_deaths;
136 static int *sched_reg_n_calls_crossed;
137 static int *sched_reg_live_length;
139 /* Element N is the next insn that sets (hard or pseudo) register
140 N within the current basic block; or zero, if there is no
141 such insn. Needed for new registers which may be introduced
142 by splitting insns. */
143 static rtx *reg_last_uses;
144 static rtx *reg_last_sets;
146 /* Vector indexed by INSN_UID giving the original ordering of the insns. */
147 static int *insn_luid;
148 #define INSN_LUID(INSN) (insn_luid[INSN_UID (INSN)])
150 /* Vector indexed by INSN_UID giving each instruction a priority. */
151 static int *insn_priority;
152 #define INSN_PRIORITY(INSN) (insn_priority[INSN_UID (INSN)])
154 static short *insn_costs;
155 #define INSN_COST(INSN) insn_costs[INSN_UID (INSN)]
157 /* Vector indexed by INSN_UID giving an encoding of the function units
158 used. */
159 static short *insn_units;
160 #define INSN_UNIT(INSN) insn_units[INSN_UID (INSN)]
162 /* Vector indexed by INSN_UID giving an encoding of the blockage range
163 function. The unit and the range are encoded. */
164 static unsigned int *insn_blockage;
165 #define INSN_BLOCKAGE(INSN) insn_blockage[INSN_UID (INSN)]
166 #define UNIT_BITS 5
167 #define BLOCKAGE_MASK ((1 << BLOCKAGE_BITS) - 1)
168 #define ENCODE_BLOCKAGE(U,R) \
169 ((((U) << UNIT_BITS) << BLOCKAGE_BITS \
170 | MIN_BLOCKAGE_COST (R)) << BLOCKAGE_BITS \
171 | MAX_BLOCKAGE_COST (R))
172 #define UNIT_BLOCKED(B) ((B) >> (2 * BLOCKAGE_BITS))
173 #define BLOCKAGE_RANGE(B) \
174 (((((B) >> BLOCKAGE_BITS) & BLOCKAGE_MASK) << (HOST_BITS_PER_INT / 2)) \
175 | (B) & BLOCKAGE_MASK)
177 /* Encodings of the `<name>_unit_blockage_range' function. */
178 #define MIN_BLOCKAGE_COST(R) ((R) >> (HOST_BITS_PER_INT / 2))
179 #define MAX_BLOCKAGE_COST(R) ((R) & ((1 << (HOST_BITS_PER_INT / 2)) - 1))
181 #define DONE_PRIORITY -1
182 #define MAX_PRIORITY 0x7fffffff
183 #define TAIL_PRIORITY 0x7ffffffe
184 #define LAUNCH_PRIORITY 0x7f000001
185 #define DONE_PRIORITY_P(INSN) (INSN_PRIORITY (INSN) < 0)
186 #define LOW_PRIORITY_P(INSN) ((INSN_PRIORITY (INSN) & 0x7f000000) == 0)
188 /* Vector indexed by INSN_UID giving number of insns referring to this insn. */
189 static int *insn_ref_count;
190 #define INSN_REF_COUNT(INSN) (insn_ref_count[INSN_UID (INSN)])
192 /* Vector indexed by INSN_UID giving line-number note in effect for each
193 insn. For line-number notes, this indicates whether the note may be
194 reused. */
195 static rtx *line_note;
196 #define LINE_NOTE(INSN) (line_note[INSN_UID (INSN)])
198 /* Vector indexed by basic block number giving the starting line-number
199 for each basic block. */
200 static rtx *line_note_head;
202 /* List of important notes we must keep around. This is a pointer to the
203 last element in the list. */
204 static rtx note_list;
206 /* Regsets telling whether a given register is live or dead before the last
207 scheduled insn. Must scan the instructions once before scheduling to
208 determine what registers are live or dead at the end of the block. */
209 static regset bb_dead_regs;
210 static regset bb_live_regs;
212 /* Regset telling whether a given register is live after the insn currently
213 being scheduled. Before processing an insn, this is equal to bb_live_regs
214 above. This is used so that we can find registers that are newly born/dead
215 after processing an insn. */
216 static regset old_live_regs;
218 /* The chain of REG_DEAD notes. REG_DEAD notes are removed from all insns
219 during the initial scan and reused later. If there are not exactly as
220 many REG_DEAD notes in the post scheduled code as there were in the
221 prescheduled code then we trigger an abort because this indicates a bug. */
222 static rtx dead_notes;
224 /* Queues, etc. */
226 /* An instruction is ready to be scheduled when all insns following it
227 have already been scheduled. It is important to ensure that all
228 insns which use its result will not be executed until its result
229 has been computed. An insn is maintained in one of four structures:
231 (P) the "Pending" set of insns which cannot be scheduled until
232 their dependencies have been satisfied.
233 (Q) the "Queued" set of insns that can be scheduled when sufficient
234 time has passed.
235 (R) the "Ready" list of unscheduled, uncommitted insns.
236 (S) the "Scheduled" list of insns.
238 Initially, all insns are either "Pending" or "Ready" depending on
239 whether their dependencies are satisfied.
241 Insns move from the "Ready" list to the "Scheduled" list as they
242 are committed to the schedule. As this occurs, the insns in the
243 "Pending" list have their dependencies satisfied and move to either
244 the "Ready" list or the "Queued" set depending on whether
245 sufficient time has passed to make them ready. As time passes,
246 insns move from the "Queued" set to the "Ready" list. Insns may
247 move from the "Ready" list to the "Queued" set if they are blocked
248 due to a function unit conflict.
250 The "Pending" list (P) are the insns in the LOG_LINKS of the unscheduled
251 insns, i.e., those that are ready, queued, and pending.
252 The "Queued" set (Q) is implemented by the variable `insn_queue'.
253 The "Ready" list (R) is implemented by the variables `ready' and
254 `n_ready'.
255 The "Scheduled" list (S) is the new insn chain built by this pass.
257 The transition (R->S) is implemented in the scheduling loop in
258 `schedule_block' when the best insn to schedule is chosen.
259 The transition (R->Q) is implemented in `schedule_select' when an
260 insn is found to to have a function unit conflict with the already
261 committed insns.
262 The transitions (P->R and P->Q) are implemented in `schedule_insn' as
263 insns move from the ready list to the scheduled list.
264 The transition (Q->R) is implemented at the top of the scheduling
265 loop in `schedule_block' as time passes or stalls are introduced. */
267 /* Implement a circular buffer to delay instructions until sufficient
268 time has passed. INSN_QUEUE_SIZE is a power of two larger than
269 MAX_BLOCKAGE and MAX_READY_COST computed by genattr.c. This is the
270 longest time an isnsn may be queued. */
271 static rtx insn_queue[INSN_QUEUE_SIZE];
272 static int q_ptr = 0;
273 static int q_size = 0;
274 #define NEXT_Q(X) (((X)+1) & (INSN_QUEUE_SIZE-1))
275 #define NEXT_Q_AFTER(X,C) (((X)+C) & (INSN_QUEUE_SIZE-1))
277 /* Vector indexed by INSN_UID giving the minimum clock tick at which
278 the insn becomes ready. This is used to note timing constraints for
279 insns in the pending list. */
280 static int *insn_tick;
281 #define INSN_TICK(INSN) (insn_tick[INSN_UID (INSN)])
283 /* Forward declarations. */
284 static void sched_analyze_2 ();
285 static void schedule_block ();
287 /* Main entry point of this file. */
288 void schedule_insns ();
289 #endif /* INSN_SCHEDULING */
291 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
293 /* Vector indexed by N giving the initial (unchanging) value known
294 for pseudo-register N. */
295 static rtx *reg_known_value;
297 /* Vector recording for each reg_known_value whether it is due to a
298 REG_EQUIV note. Future passes (viz., reload) may replace the
299 pseudo with the equivalent expression and so we account for the
300 dependences that would be introduced if that happens. */
301 /* ??? This is a problem only on the Convex. The REG_EQUIV notes created in
302 assign_parms mention the arg pointer, and there are explicit insns in the
303 RTL that modify the arg pointer. Thus we must ensure that such insns don't
304 get scheduled across each other because that would invalidate the REG_EQUIV
305 notes. One could argue that the REG_EQUIV notes are wrong, but solving
306 the problem in the scheduler will likely give better code, so we do it
307 here. */
308 static char *reg_known_equiv_p;
310 /* Indicates number of valid entries in reg_known_value. */
311 static int reg_known_value_size;
313 static rtx
314 canon_rtx (x)
315 rtx x;
317 if (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER
318 && REGNO (x) <= reg_known_value_size)
319 return reg_known_value[REGNO (x)];
320 else if (GET_CODE (x) == PLUS)
322 rtx x0 = canon_rtx (XEXP (x, 0));
323 rtx x1 = canon_rtx (XEXP (x, 1));
325 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
327 /* We can tolerate LO_SUMs being offset here; these
328 rtl are used for nothing other than comparisons. */
329 if (GET_CODE (x0) == CONST_INT)
330 return plus_constant_for_output (x1, INTVAL (x0));
331 else if (GET_CODE (x1) == CONST_INT)
332 return plus_constant_for_output (x0, INTVAL (x1));
333 return gen_rtx (PLUS, GET_MODE (x), x0, x1);
336 return x;
339 /* Set up all info needed to perform alias analysis on memory references. */
341 void
342 init_alias_analysis ()
344 int maxreg = max_reg_num ();
345 rtx insn;
346 rtx note;
347 rtx set;
349 reg_known_value_size = maxreg;
351 reg_known_value
352 = (rtx *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx))
353 - FIRST_PSEUDO_REGISTER;
354 bzero (reg_known_value+FIRST_PSEUDO_REGISTER,
355 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (rtx));
357 reg_known_equiv_p
358 = (char *) oballoc ((maxreg-FIRST_PSEUDO_REGISTER) * sizeof (char))
359 - FIRST_PSEUDO_REGISTER;
360 bzero (reg_known_equiv_p+FIRST_PSEUDO_REGISTER,
361 (maxreg-FIRST_PSEUDO_REGISTER) * sizeof (char));
363 /* Fill in the entries with known constant values. */
364 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
365 if ((set = single_set (insn)) != 0
366 && GET_CODE (SET_DEST (set)) == REG
367 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
368 && (((note = find_reg_note (insn, REG_EQUAL, 0)) != 0
369 && reg_n_sets[REGNO (SET_DEST (set))] == 1)
370 || (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != 0)
371 && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
373 int regno = REGNO (SET_DEST (set));
374 reg_known_value[regno] = XEXP (note, 0);
375 reg_known_equiv_p[regno] = REG_NOTE_KIND (note) == REG_EQUIV;
378 /* Fill in the remaining entries. */
379 while (--maxreg >= FIRST_PSEUDO_REGISTER)
380 if (reg_known_value[maxreg] == 0)
381 reg_known_value[maxreg] = regno_reg_rtx[maxreg];
384 /* Return 1 if X and Y are identical-looking rtx's.
386 We use the data in reg_known_value above to see if two registers with
387 different numbers are, in fact, equivalent. */
389 static int
390 rtx_equal_for_memref_p (x, y)
391 rtx x, y;
393 register int i;
394 register int j;
395 register enum rtx_code code;
396 register char *fmt;
398 if (x == 0 && y == 0)
399 return 1;
400 if (x == 0 || y == 0)
401 return 0;
402 x = canon_rtx (x);
403 y = canon_rtx (y);
405 if (x == y)
406 return 1;
408 code = GET_CODE (x);
409 /* Rtx's of different codes cannot be equal. */
410 if (code != GET_CODE (y))
411 return 0;
413 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
414 (REG:SI x) and (REG:HI x) are NOT equivalent. */
416 if (GET_MODE (x) != GET_MODE (y))
417 return 0;
419 /* REG, LABEL_REF, and SYMBOL_REF can be compared nonrecursively. */
421 if (code == REG)
422 return REGNO (x) == REGNO (y);
423 if (code == LABEL_REF)
424 return XEXP (x, 0) == XEXP (y, 0);
425 if (code == SYMBOL_REF)
426 return XSTR (x, 0) == XSTR (y, 0);
428 /* Compare the elements. If any pair of corresponding elements
429 fail to match, return 0 for the whole things. */
431 fmt = GET_RTX_FORMAT (code);
432 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
434 switch (fmt[i])
436 case 'w':
437 if (XWINT (x, i) != XWINT (y, i))
438 return 0;
439 break;
441 case 'n':
442 case 'i':
443 if (XINT (x, i) != XINT (y, i))
444 return 0;
445 break;
447 case 'V':
448 case 'E':
449 /* Two vectors must have the same length. */
450 if (XVECLEN (x, i) != XVECLEN (y, i))
451 return 0;
453 /* And the corresponding elements must match. */
454 for (j = 0; j < XVECLEN (x, i); j++)
455 if (rtx_equal_for_memref_p (XVECEXP (x, i, j), XVECEXP (y, i, j)) == 0)
456 return 0;
457 break;
459 case 'e':
460 if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0)
461 return 0;
462 break;
464 case 'S':
465 case 's':
466 if (strcmp (XSTR (x, i), XSTR (y, i)))
467 return 0;
468 break;
470 case 'u':
471 /* These are just backpointers, so they don't matter. */
472 break;
474 case '0':
475 break;
477 /* It is believed that rtx's at this level will never
478 contain anything but integers and other rtx's,
479 except for within LABEL_REFs and SYMBOL_REFs. */
480 default:
481 abort ();
484 return 1;
487 /* Given an rtx X, find a SYMBOL_REF or LABEL_REF within
488 X and return it, or return 0 if none found. */
490 static rtx
491 find_symbolic_term (x)
492 rtx x;
494 register int i;
495 register enum rtx_code code;
496 register char *fmt;
498 code = GET_CODE (x);
499 if (code == SYMBOL_REF || code == LABEL_REF)
500 return x;
501 if (GET_RTX_CLASS (code) == 'o')
502 return 0;
504 fmt = GET_RTX_FORMAT (code);
505 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
507 rtx t;
509 if (fmt[i] == 'e')
511 t = find_symbolic_term (XEXP (x, i));
512 if (t != 0)
513 return t;
515 else if (fmt[i] == 'E')
516 break;
518 return 0;
521 /* Return nonzero if X and Y (memory addresses) could reference the
522 same location in memory. C is an offset accumulator. When
523 C is nonzero, we are testing aliases between X and Y + C.
524 XSIZE is the size in bytes of the X reference,
525 similarly YSIZE is the size in bytes for Y.
527 If XSIZE or YSIZE is zero, we do not know the amount of memory being
528 referenced (the reference was BLKmode), so make the most pessimistic
529 assumptions.
531 We recognize the following cases of non-conflicting memory:
533 (1) addresses involving the frame pointer cannot conflict
534 with addresses involving static variables.
535 (2) static variables with different addresses cannot conflict.
537 Nice to notice that varying addresses cannot conflict with fp if no
538 local variables had their addresses taken, but that's too hard now. */
540 /* ??? In Fortran, references to a array parameter can never conflict with
541 another array parameter. */
543 static int
544 memrefs_conflict_p (xsize, x, ysize, y, c)
545 rtx x, y;
546 int xsize, ysize;
547 HOST_WIDE_INT c;
549 if (GET_CODE (x) == HIGH)
550 x = XEXP (x, 0);
551 else if (GET_CODE (x) == LO_SUM)
552 x = XEXP (x, 1);
553 else
554 x = canon_rtx (x);
555 if (GET_CODE (y) == HIGH)
556 y = XEXP (y, 0);
557 else if (GET_CODE (y) == LO_SUM)
558 y = XEXP (y, 1);
559 else
560 y = canon_rtx (y);
562 if (rtx_equal_for_memref_p (x, y))
563 return (xsize == 0 || ysize == 0 ||
564 (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
566 if (y == frame_pointer_rtx || y == stack_pointer_rtx)
568 rtx t = y;
569 int tsize = ysize;
570 y = x; ysize = xsize;
571 x = t; xsize = tsize;
574 if (x == frame_pointer_rtx || x == stack_pointer_rtx)
576 rtx y1;
578 if (CONSTANT_P (y))
579 return 0;
581 if (GET_CODE (y) == PLUS
582 && canon_rtx (XEXP (y, 0)) == x
583 && (y1 = canon_rtx (XEXP (y, 1)))
584 && GET_CODE (y1) == CONST_INT)
586 c += INTVAL (y1);
587 return (xsize == 0 || ysize == 0
588 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
591 if (GET_CODE (y) == PLUS
592 && (y1 = canon_rtx (XEXP (y, 0)))
593 && CONSTANT_P (y1))
594 return 0;
596 return 1;
599 if (GET_CODE (x) == PLUS)
601 /* The fact that X is canonicalized means that this
602 PLUS rtx is canonicalized. */
603 rtx x0 = XEXP (x, 0);
604 rtx x1 = XEXP (x, 1);
606 if (GET_CODE (y) == PLUS)
608 /* The fact that Y is canonicalized means that this
609 PLUS rtx is canonicalized. */
610 rtx y0 = XEXP (y, 0);
611 rtx y1 = XEXP (y, 1);
613 if (rtx_equal_for_memref_p (x1, y1))
614 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
615 if (rtx_equal_for_memref_p (x0, y0))
616 return memrefs_conflict_p (xsize, x1, ysize, y1, c);
617 if (GET_CODE (x1) == CONST_INT)
618 if (GET_CODE (y1) == CONST_INT)
619 return memrefs_conflict_p (xsize, x0, ysize, y0,
620 c - INTVAL (x1) + INTVAL (y1));
621 else
622 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
623 else if (GET_CODE (y1) == CONST_INT)
624 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
626 /* Handle case where we cannot understand iteration operators,
627 but we notice that the base addresses are distinct objects. */
628 x = find_symbolic_term (x);
629 if (x == 0)
630 return 1;
631 y = find_symbolic_term (y);
632 if (y == 0)
633 return 1;
634 return rtx_equal_for_memref_p (x, y);
636 else if (GET_CODE (x1) == CONST_INT)
637 return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
639 else if (GET_CODE (y) == PLUS)
641 /* The fact that Y is canonicalized means that this
642 PLUS rtx is canonicalized. */
643 rtx y0 = XEXP (y, 0);
644 rtx y1 = XEXP (y, 1);
646 if (GET_CODE (y1) == CONST_INT)
647 return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
648 else
649 return 1;
652 if (GET_CODE (x) == GET_CODE (y))
653 switch (GET_CODE (x))
655 case MULT:
657 /* Handle cases where we expect the second operands to be the
658 same, and check only whether the first operand would conflict
659 or not. */
660 rtx x0, y0;
661 rtx x1 = canon_rtx (XEXP (x, 1));
662 rtx y1 = canon_rtx (XEXP (y, 1));
663 if (! rtx_equal_for_memref_p (x1, y1))
664 return 1;
665 x0 = canon_rtx (XEXP (x, 0));
666 y0 = canon_rtx (XEXP (y, 0));
667 if (rtx_equal_for_memref_p (x0, y0))
668 return (xsize == 0 || ysize == 0
669 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
671 /* Can't properly adjust our sizes. */
672 if (GET_CODE (x1) != CONST_INT)
673 return 1;
674 xsize /= INTVAL (x1);
675 ysize /= INTVAL (x1);
676 c /= INTVAL (x1);
677 return memrefs_conflict_p (xsize, x0, ysize, y0, c);
681 if (CONSTANT_P (x))
683 if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT)
685 c += (INTVAL (y) - INTVAL (x));
686 return (xsize == 0 || ysize == 0
687 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
690 if (GET_CODE (x) == CONST)
692 if (GET_CODE (y) == CONST)
693 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
694 ysize, canon_rtx (XEXP (y, 0)), c);
695 else
696 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
697 ysize, y, c);
699 if (GET_CODE (y) == CONST)
700 return memrefs_conflict_p (xsize, x, ysize,
701 canon_rtx (XEXP (y, 0)), c);
703 if (CONSTANT_P (y))
704 return (rtx_equal_for_memref_p (x, y)
705 && (xsize == 0 || ysize == 0
706 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)));
708 return 1;
710 return 1;
713 /* Functions to compute memory dependencies.
715 Since we process the insns in execution order, we can build tables
716 to keep track of what registers are fixed (and not aliased), what registers
717 are varying in known ways, and what registers are varying in unknown
718 ways.
720 If both memory references are volatile, then there must always be a
721 dependence between the two references, since their order can not be
722 changed. A volatile and non-volatile reference can be interchanged
723 though.
725 A MEM_IN_STRUCT reference at a non-QImode varying address can never
726 conflict with a non-MEM_IN_STRUCT reference at a fixed address. We must
727 allow QImode aliasing because the ANSI C standard allows character
728 pointers to alias anything. We are assuming that characters are
729 always QImode here. */
731 /* Read dependence: X is read after read in MEM takes place. There can
732 only be a dependence here if both reads are volatile. */
735 read_dependence (mem, x)
736 rtx mem;
737 rtx x;
739 return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
742 /* True dependence: X is read after store in MEM takes place. */
745 true_dependence (mem, x)
746 rtx mem;
747 rtx x;
749 /* If X is an unchanging read, then it can't possibly conflict with any
750 non-unchanging store. It may conflict with an unchanging write though,
751 because there may be a single store to this address to initialize it.
752 Just fall through to the code below to resolve the case where we have
753 both an unchanging read and an unchanging write. This won't handle all
754 cases optimally, but the possible performance loss should be
755 negligible. */
756 if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem))
757 return 0;
759 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
760 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
761 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
762 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
763 && GET_MODE (mem) != QImode
764 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
765 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
766 && GET_MODE (x) != QImode
767 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
770 /* Anti dependence: X is written after read in MEM takes place. */
773 anti_dependence (mem, x)
774 rtx mem;
775 rtx x;
777 /* If MEM is an unchanging read, then it can't possibly conflict with
778 the store to X, because there is at most one store to MEM, and it must
779 have occured somewhere before MEM. */
780 if (RTX_UNCHANGING_P (mem))
781 return 0;
783 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
784 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
785 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
786 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
787 && GET_MODE (mem) != QImode
788 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
789 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
790 && GET_MODE (x) != QImode
791 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
794 /* Output dependence: X is written after store in MEM takes place. */
797 output_dependence (mem, x)
798 rtx mem;
799 rtx x;
801 return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
802 || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0),
803 SIZE_FOR_MODE (x), XEXP (x, 0), 0)
804 && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem)
805 && GET_MODE (mem) != QImode
806 && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x))
807 && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x)
808 && GET_MODE (x) != QImode
809 && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));
812 /* Helper functions for instruction scheduling. */
814 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
815 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
816 of dependence that this link represents. */
818 void
819 add_dependence (insn, elem, dep_type)
820 rtx insn;
821 rtx elem;
822 enum reg_note dep_type;
824 rtx link, next;
826 /* Don't depend an insn on itself. */
827 if (insn == elem)
828 return;
830 /* If elem is part of a sequence that must be scheduled together, then
831 make the dependence point to the last insn of the sequence.
832 When HAVE_cc0, it is possible for NOTEs to exist between users and
833 setters of the condition codes, so we must skip past notes here.
834 Otherwise, NOTEs are impossible here. */
836 next = NEXT_INSN (elem);
838 #ifdef HAVE_cc0
839 while (next && GET_CODE (next) == NOTE)
840 next = NEXT_INSN (next);
841 #endif
843 if (next && SCHED_GROUP_P (next))
845 /* Notes will never intervene here though, so don't bother checking
846 for them. */
847 /* We must reject CODE_LABELs, so that we don't get confused by one
848 that has LABEL_PRESERVE_P set, which is represented by the same
849 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
850 SCHED_GROUP_P. */
851 while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))
852 && GET_CODE (NEXT_INSN (next)) != CODE_LABEL)
853 next = NEXT_INSN (next);
855 /* Again, don't depend an insn on itself. */
856 if (insn == next)
857 return;
859 /* Make the dependence to NEXT, the last insn of the group, instead
860 of the original ELEM. */
861 elem = next;
864 /* Check that we don't already have this dependence. */
865 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
866 if (XEXP (link, 0) == elem)
868 /* If this is a more restrictive type of dependence than the existing
869 one, then change the existing dependence to this type. */
870 if ((int) dep_type < (int) REG_NOTE_KIND (link))
871 PUT_REG_NOTE_KIND (link, dep_type);
872 return;
874 /* Might want to check one level of transitivity to save conses. */
876 link = rtx_alloc (INSN_LIST);
877 /* Insn dependency, not data dependency. */
878 PUT_REG_NOTE_KIND (link, dep_type);
879 XEXP (link, 0) = elem;
880 XEXP (link, 1) = LOG_LINKS (insn);
881 LOG_LINKS (insn) = link;
884 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
885 of INSN. Abort if not found. */
886 void
887 remove_dependence (insn, elem)
888 rtx insn;
889 rtx elem;
891 rtx prev, link;
892 int found = 0;
894 for (prev = 0, link = LOG_LINKS (insn); link;
895 prev = link, link = XEXP (link, 1))
897 if (XEXP (link, 0) == elem)
899 if (prev)
900 XEXP (prev, 1) = XEXP (link, 1);
901 else
902 LOG_LINKS (insn) = XEXP (link, 1);
903 found = 1;
907 if (! found)
908 abort ();
909 return;
912 #ifndef INSN_SCHEDULING
913 void schedule_insns () {}
914 #else
915 #ifndef __GNUC__
916 #define __inline
917 #endif
919 /* Computation of memory dependencies. */
921 /* The *_insns and *_mems are paired lists. Each pending memory operation
922 will have a pointer to the MEM rtx on one list and a pointer to the
923 containing insn on the other list in the same place in the list. */
925 /* We can't use add_dependence like the old code did, because a single insn
926 may have multiple memory accesses, and hence needs to be on the list
927 once for each memory access. Add_dependence won't let you add an insn
928 to a list more than once. */
930 /* An INSN_LIST containing all insns with pending read operations. */
931 static rtx pending_read_insns;
933 /* An EXPR_LIST containing all MEM rtx's which are pending reads. */
934 static rtx pending_read_mems;
936 /* An INSN_LIST containing all insns with pending write operations. */
937 static rtx pending_write_insns;
939 /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
940 static rtx pending_write_mems;
942 /* Indicates the combined length of the two pending lists. We must prevent
943 these lists from ever growing too large since the number of dependencies
944 produced is at least O(N*N), and execution time is at least O(4*N*N), as
945 a function of the length of these pending lists. */
947 static int pending_lists_length;
949 /* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */
951 static rtx unused_insn_list;
953 /* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */
955 static rtx unused_expr_list;
957 /* The last insn upon which all memory references must depend.
958 This is an insn which flushed the pending lists, creating a dependency
959 between it and all previously pending memory references. This creates
960 a barrier (or a checkpoint) which no memory reference is allowed to cross.
962 This includes all non constant CALL_INSNs. When we do interprocedural
963 alias analysis, this restriction can be relaxed.
964 This may also be an INSN that writes memory if the pending lists grow
965 too large. */
967 static rtx last_pending_memory_flush;
969 /* The last function call we have seen. All hard regs, and, of course,
970 the last function call, must depend on this. */
972 static rtx last_function_call;
974 /* The LOG_LINKS field of this is a list of insns which use a pseudo register
975 that does not already cross a call. We create dependencies between each
976 of those insn and the next call insn, to ensure that they won't cross a call
977 after scheduling is done. */
979 static rtx sched_before_next_call;
981 /* Pointer to the last instruction scheduled. Used by rank_for_schedule,
982 so that insns independent of the last scheduled insn will be preferred
983 over dependent instructions. */
985 static rtx last_scheduled_insn;
987 /* Process an insn's memory dependencies. There are four kinds of
988 dependencies:
990 (0) read dependence: read follows read
991 (1) true dependence: read follows write
992 (2) anti dependence: write follows read
993 (3) output dependence: write follows write
995 We are careful to build only dependencies which actually exist, and
996 use transitivity to avoid building too many links. */
998 /* Return the INSN_LIST containing INSN in LIST, or NULL
999 if LIST does not contain INSN. */
1001 __inline static rtx
1002 find_insn_list (insn, list)
1003 rtx insn;
1004 rtx list;
1006 while (list)
1008 if (XEXP (list, 0) == insn)
1009 return list;
1010 list = XEXP (list, 1);
1012 return 0;
1015 /* Compute the function units used by INSN. This caches the value
1016 returned by function_units_used. A function unit is encoded as the
1017 unit number if the value is non-negative and the compliment of a
1018 mask if the value is negative. A function unit index is the
1019 non-negative encoding. */
1021 __inline static int
1022 insn_unit (insn)
1023 rtx insn;
1025 register int unit = INSN_UNIT (insn);
1027 if (unit == 0)
1029 recog_memoized (insn);
1031 /* A USE insn, or something else we don't need to understand.
1032 We can't pass these directly to function_units_used because it will
1033 trigger a fatal error for unrecognizable insns. */
1034 if (INSN_CODE (insn) < 0)
1035 unit = -1;
1036 else
1038 unit = function_units_used (insn);
1039 /* Increment non-negative values so we can cache zero. */
1040 if (unit >= 0) unit++;
1042 /* We only cache 16 bits of the result, so if the value is out of
1043 range, don't cache it. */
1044 if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT
1045 || unit >= 0
1046 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0)
1047 INSN_UNIT (insn) = unit;
1049 return (unit > 0 ? unit - 1 : unit);
1052 /* Compute the blockage range for executing INSN on UNIT. This caches
1053 the value returned by the blockage_range_function for the unit.
1054 These values are encoded in an int where the upper half gives the
1055 minimum value and the lower half gives the maximum value. */
1057 __inline static unsigned int
1058 blockage_range (unit, insn)
1059 int unit;
1060 rtx insn;
1062 unsigned int blockage = INSN_BLOCKAGE (insn);
1063 unsigned int range;
1065 if (UNIT_BLOCKED (blockage) != unit + 1)
1067 range = function_units[unit].blockage_range_function (insn);
1068 /* We only cache the blockage range for one unit and then only if
1069 the values fit. */
1070 if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS)
1071 INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range);
1073 else
1074 range = BLOCKAGE_RANGE (blockage);
1076 return range;
1079 /* A vector indexed by function unit instance giving the last insn to use
1080 the unit. The value of the function unit instance index for unit U
1081 instance I is (U + I * FUNCTION_UNITS_SIZE). */
1082 static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1084 /* A vector indexed by function unit instance giving the minimum time when
1085 the unit will unblock based on the maximum blockage cost. */
1086 static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];
1088 /* A vector indexed by function unit number giving the number of insns
1089 that remain to use the unit. */
1090 static int unit_n_insns[FUNCTION_UNITS_SIZE];
1092 /* Reset the function unit state to the null state. */
1094 static void
1095 clear_units ()
1097 int unit;
1099 bzero (unit_last_insn, sizeof (unit_last_insn));
1100 bzero (unit_tick, sizeof (unit_tick));
1101 bzero (unit_n_insns, sizeof (unit_n_insns));
1104 /* Record an insn as one that will use the units encoded by UNIT. */
1106 __inline static void
1107 prepare_unit (unit)
1108 int unit;
1110 int i;
1112 if (unit >= 0)
1113 unit_n_insns[unit]++;
1114 else
1115 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1116 if ((unit & 1) != 0)
1117 prepare_unit (i);
1120 /* Return the actual hazard cost of executing INSN on the unit UNIT,
1121 instance INSTANCE at time CLOCK if the previous actual hazard cost
1122 was COST. */
1124 __inline static int
1125 actual_hazard_this_instance (unit, instance, insn, clock, cost)
1126 int unit, instance, clock, cost;
1127 rtx insn;
1129 int i;
1130 int tick = unit_tick[instance];
1132 if (tick - clock > cost)
1134 /* The scheduler is operating in reverse, so INSN is the executing
1135 insn and the unit's last insn is the candidate insn. We want a
1136 more exact measure of the blockage if we execute INSN at CLOCK
1137 given when we committed the execution of the unit's last insn.
1139 The blockage value is given by either the unit's max blockage
1140 constant, blockage range function, or blockage function. Use
1141 the most exact form for the given unit. */
1143 if (function_units[unit].blockage_range_function)
1145 if (function_units[unit].blockage_function)
1146 tick += (function_units[unit].blockage_function
1147 (insn, unit_last_insn[instance])
1148 - function_units[unit].max_blockage);
1149 else
1150 tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn))
1151 - function_units[unit].max_blockage);
1153 if (tick - clock > cost)
1154 cost = tick - clock;
1156 return cost;
1159 /* Record INSN as having begun execution on the units encoded by UNIT at
1160 time CLOCK. */
1162 __inline static void
1163 schedule_unit (unit, insn, clock)
1164 int unit, clock;
1165 rtx insn;
1167 int i;
1169 if (unit >= 0)
1171 int instance = unit;
1172 #if MAX_MULTIPLICITY > 1
1173 /* Find the first free instance of the function unit and use that
1174 one. We assume that one is free. */
1175 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1177 if (! actual_hazard_this_instance (unit, instance, insn, clock, 0))
1178 break;
1179 instance += FUNCTION_UNITS_SIZE;
1181 #endif
1182 unit_last_insn[instance] = insn;
1183 unit_tick[instance] = (clock + function_units[unit].max_blockage);
1185 else
1186 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1187 if ((unit & 1) != 0)
1188 schedule_unit (i, insn, clock);
1191 /* Return the actual hazard cost of executing INSN on the units encoded by
1192 UNIT at time CLOCK if the previous actual hazard cost was COST. */
1194 __inline static int
1195 actual_hazard (unit, insn, clock, cost)
1196 int unit, clock, cost;
1197 rtx insn;
1199 int i;
1201 if (unit >= 0)
1203 /* Find the instance of the function unit with the minimum hazard. */
1204 int instance = unit;
1205 int best = instance;
1206 int best_cost = actual_hazard_this_instance (unit, instance, insn,
1207 clock, cost);
1208 int this_cost;
1210 #if MAX_MULTIPLICITY > 1
1211 if (best_cost > cost)
1213 for (i = function_units[unit].multiplicity - 1; i > 0; i--)
1215 instance += FUNCTION_UNITS_SIZE;
1216 this_cost = actual_hazard_this_instance (unit, instance, insn,
1217 clock, cost);
1218 if (this_cost < best_cost)
1220 best = instance;
1221 best_cost = this_cost;
1222 if (this_cost <= cost)
1223 break;
1227 #endif
1228 cost = MAX (cost, best_cost);
1230 else
1231 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1232 if ((unit & 1) != 0)
1233 cost = actual_hazard (i, insn, clock, cost);
1235 return cost;
1238 /* Return the potential hazard cost of executing an instruction on the
1239 units encoded by UNIT if the previous potential hazard cost was COST.
1240 An insn with a large blockage time is chosen in preference to one
1241 with a smaller time; an insn that uses a unit that is more likely
1242 to be used is chosen in preference to one with a unit that is less
1243 used. We are trying to minimize a subsequent actual hazard. */
1245 __inline static int
1246 potential_hazard (unit, insn, cost)
1247 int unit, cost;
1248 rtx insn;
1250 int i, ncost;
1251 unsigned int minb, maxb;
1253 if (unit >= 0)
1255 minb = maxb = function_units[unit].max_blockage;
1256 if (maxb > 1)
1258 if (function_units[unit].blockage_range_function)
1260 maxb = minb = blockage_range (unit, insn);
1261 maxb = MAX_BLOCKAGE_COST (maxb);
1262 minb = MIN_BLOCKAGE_COST (minb);
1265 if (maxb > 1)
1267 /* Make the number of instructions left dominate. Make the
1268 minimum delay dominate the maximum delay. If all these
1269 are the same, use the unit number to add an arbitrary
1270 ordering. Other terms can be added. */
1271 ncost = minb * 0x40 + maxb;
1272 ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit;
1273 if (ncost > cost)
1274 cost = ncost;
1278 else
1279 for (i = 0, unit = ~unit; unit; i++, unit >>= 1)
1280 if ((unit & 1) != 0)
1281 cost = potential_hazard (i, insn, cost);
1283 return cost;
1286 /* Compute cost of executing INSN given the dependence LINK on the insn USED.
1287 This is the number of virtual cycles taken between instruction issue and
1288 instruction results. */
1290 __inline static int
1291 insn_cost (insn, link, used)
1292 rtx insn, link, used;
1294 register int cost = INSN_COST (insn);
1296 if (cost == 0)
1298 recog_memoized (insn);
1300 /* A USE insn, or something else we don't need to understand.
1301 We can't pass these directly to result_ready_cost because it will
1302 trigger a fatal error for unrecognizable insns. */
1303 if (INSN_CODE (insn) < 0)
1305 INSN_COST (insn) = 1;
1306 return 1;
1308 else
1310 cost = result_ready_cost (insn);
1312 if (cost < 1)
1313 cost = 1;
1315 INSN_COST (insn) = cost;
1319 /* A USE insn should never require the value used to be computed. This
1320 allows the computation of a function's result and parameter values to
1321 overlap the return and call. */
1322 recog_memoized (used);
1323 if (INSN_CODE (used) < 0)
1324 LINK_COST_FREE (link) = 1;
1326 /* If some dependencies vary the cost, compute the adjustment. Most
1327 commonly, the adjustment is complete: either the cost is ignored
1328 (in the case of an output- or anti-dependence), or the cost is
1329 unchanged. These values are cached in the link as LINK_COST_FREE
1330 and LINK_COST_ZERO. */
1332 if (LINK_COST_FREE (link))
1333 cost = 1;
1334 #ifdef ADJUST_COST
1335 else if (! LINK_COST_ZERO (link))
1337 int ncost = cost;
1339 ADJUST_COST (used, link, insn, ncost);
1340 if (ncost <= 1)
1341 LINK_COST_FREE (link) = ncost = 1;
1342 if (cost == ncost)
1343 LINK_COST_ZERO (link) = 1;
1344 cost = ncost;
1346 #endif
1347 return cost;
1350 /* Compute the priority number for INSN. */
1352 static int
1353 priority (insn)
1354 rtx insn;
1356 if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
1358 int prev_priority;
1359 int max_priority;
1360 int this_priority = INSN_PRIORITY (insn);
1361 rtx prev;
1363 if (this_priority > 0)
1364 return this_priority;
1366 max_priority = 1;
1368 /* Nonzero if these insns must be scheduled together. */
1369 if (SCHED_GROUP_P (insn))
1371 prev = insn;
1372 while (SCHED_GROUP_P (prev))
1374 prev = PREV_INSN (prev);
1375 INSN_REF_COUNT (prev) += 1;
1379 for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1))
1381 rtx x = XEXP (prev, 0);
1383 /* A dependence pointing to a note is always obsolete, because
1384 sched_analyze_insn will have created any necessary new dependences
1385 which replace it. Notes can be created when instructions are
1386 deleted by insn splitting, or by register allocation. */
1387 if (GET_CODE (x) == NOTE)
1389 remove_dependence (insn, x);
1390 continue;
1393 /* Clear the link cost adjustment bits. */
1394 LINK_COST_FREE (prev) = 0;
1395 #ifdef ADJUST_COST
1396 LINK_COST_ZERO (prev) = 0;
1397 #endif
1399 /* This priority calculation was chosen because it results in the
1400 least instruction movement, and does not hurt the performance
1401 of the resulting code compared to the old algorithm.
1402 This makes the sched algorithm more stable, which results
1403 in better code, because there is less register pressure,
1404 cross jumping is more likely to work, and debugging is easier.
1406 When all instructions have a latency of 1, there is no need to
1407 move any instructions. Subtracting one here ensures that in such
1408 cases all instructions will end up with a priority of one, and
1409 hence no scheduling will be done.
1411 The original code did not subtract the one, and added the
1412 insn_cost of the current instruction to its priority (e.g.
1413 move the insn_cost call down to the end). */
1415 if (REG_NOTE_KIND (prev) == 0)
1416 /* Data dependence. */
1417 prev_priority = priority (x) + insn_cost (x, prev, insn) - 1;
1418 else
1419 /* Anti or output dependence. Don't add the latency of this
1420 insn's result, because it isn't being used. */
1421 prev_priority = priority (x);
1423 if (prev_priority > max_priority)
1424 max_priority = prev_priority;
1425 INSN_REF_COUNT (x) += 1;
1428 prepare_unit (insn_unit (insn));
1429 INSN_PRIORITY (insn) = max_priority;
1430 return INSN_PRIORITY (insn);
1432 return 0;
1435 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
1436 them to the unused_*_list variables, so that they can be reused. */
1438 static void
1439 free_pending_lists ()
1441 register rtx link, prev_link;
1443 if (pending_read_insns)
1445 prev_link = pending_read_insns;
1446 link = XEXP (prev_link, 1);
1448 while (link)
1450 prev_link = link;
1451 link = XEXP (link, 1);
1454 XEXP (prev_link, 1) = unused_insn_list;
1455 unused_insn_list = pending_read_insns;
1456 pending_read_insns = 0;
1459 if (pending_write_insns)
1461 prev_link = pending_write_insns;
1462 link = XEXP (prev_link, 1);
1464 while (link)
1466 prev_link = link;
1467 link = XEXP (link, 1);
1470 XEXP (prev_link, 1) = unused_insn_list;
1471 unused_insn_list = pending_write_insns;
1472 pending_write_insns = 0;
1475 if (pending_read_mems)
1477 prev_link = pending_read_mems;
1478 link = XEXP (prev_link, 1);
1480 while (link)
1482 prev_link = link;
1483 link = XEXP (link, 1);
1486 XEXP (prev_link, 1) = unused_expr_list;
1487 unused_expr_list = pending_read_mems;
1488 pending_read_mems = 0;
1491 if (pending_write_mems)
1493 prev_link = pending_write_mems;
1494 link = XEXP (prev_link, 1);
1496 while (link)
1498 prev_link = link;
1499 link = XEXP (link, 1);
1502 XEXP (prev_link, 1) = unused_expr_list;
1503 unused_expr_list = pending_write_mems;
1504 pending_write_mems = 0;
1508 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1509 The MEM is a memory reference contained within INSN, which we are saving
1510 so that we can do memory aliasing on it. */
1512 static void
1513 add_insn_mem_dependence (insn_list, mem_list, insn, mem)
1514 rtx *insn_list, *mem_list, insn, mem;
1516 register rtx link;
1518 if (unused_insn_list)
1520 link = unused_insn_list;
1521 unused_insn_list = XEXP (link, 1);
1523 else
1524 link = rtx_alloc (INSN_LIST);
1525 XEXP (link, 0) = insn;
1526 XEXP (link, 1) = *insn_list;
1527 *insn_list = link;
1529 if (unused_expr_list)
1531 link = unused_expr_list;
1532 unused_expr_list = XEXP (link, 1);
1534 else
1535 link = rtx_alloc (EXPR_LIST);
1536 XEXP (link, 0) = mem;
1537 XEXP (link, 1) = *mem_list;
1538 *mem_list = link;
1540 pending_lists_length++;
1543 /* Make a dependency between every memory reference on the pending lists
1544 and INSN, thus flushing the pending lists. */
1546 static void
1547 flush_pending_lists (insn)
1548 rtx insn;
1550 rtx link;
1552 while (pending_read_insns)
1554 add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
1556 link = pending_read_insns;
1557 pending_read_insns = XEXP (pending_read_insns, 1);
1558 XEXP (link, 1) = unused_insn_list;
1559 unused_insn_list = link;
1561 link = pending_read_mems;
1562 pending_read_mems = XEXP (pending_read_mems, 1);
1563 XEXP (link, 1) = unused_expr_list;
1564 unused_expr_list = link;
1566 while (pending_write_insns)
1568 add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
1570 link = pending_write_insns;
1571 pending_write_insns = XEXP (pending_write_insns, 1);
1572 XEXP (link, 1) = unused_insn_list;
1573 unused_insn_list = link;
1575 link = pending_write_mems;
1576 pending_write_mems = XEXP (pending_write_mems, 1);
1577 XEXP (link, 1) = unused_expr_list;
1578 unused_expr_list = link;
1580 pending_lists_length = 0;
1582 if (last_pending_memory_flush)
1583 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1585 last_pending_memory_flush = insn;
1588 /* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated
1589 by the write to the destination of X, and reads of everything mentioned. */
1591 static void
1592 sched_analyze_1 (x, insn)
1593 rtx x;
1594 rtx insn;
1596 register int regno;
1597 register rtx dest = SET_DEST (x);
1599 if (dest == 0)
1600 return;
1602 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
1603 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1605 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
1607 /* The second and third arguments are values read by this insn. */
1608 sched_analyze_2 (XEXP (dest, 1), insn);
1609 sched_analyze_2 (XEXP (dest, 2), insn);
1611 dest = SUBREG_REG (dest);
1614 if (GET_CODE (dest) == REG)
1616 register int offset, bit, i;
1618 regno = REGNO (dest);
1620 /* A hard reg in a wide mode may really be multiple registers.
1621 If so, mark all of them just like the first. */
1622 if (regno < FIRST_PSEUDO_REGISTER)
1624 i = HARD_REGNO_NREGS (regno, GET_MODE (dest));
1625 while (--i >= 0)
1627 rtx u;
1629 for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1))
1630 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1631 reg_last_uses[regno + i] = 0;
1632 if (reg_last_sets[regno + i])
1633 add_dependence (insn, reg_last_sets[regno + i],
1634 REG_DEP_OUTPUT);
1635 reg_last_sets[regno + i] = insn;
1636 if ((call_used_regs[i] || global_regs[i])
1637 && last_function_call)
1638 /* Function calls clobber all call_used regs. */
1639 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1642 else
1644 rtx u;
1646 for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
1647 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1648 reg_last_uses[regno] = 0;
1649 if (reg_last_sets[regno])
1650 add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT);
1651 reg_last_sets[regno] = insn;
1653 /* Pseudos that are REG_EQUIV to something may be replaced
1654 by that during reloading. We need only add dependencies for
1655 the address in the REG_EQUIV note. */
1656 if (! reload_completed
1657 && reg_known_equiv_p[regno]
1658 && GET_CODE (reg_known_value[regno]) == MEM)
1659 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1661 /* Don't let it cross a call after scheduling if it doesn't
1662 already cross one. */
1663 if (reg_n_calls_crossed[regno] == 0 && last_function_call)
1664 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1667 else if (GET_CODE (dest) == MEM)
1669 /* Writing memory. */
1671 if (pending_lists_length > 32)
1673 /* Flush all pending reads and writes to prevent the pending lists
1674 from getting any larger. Insn scheduling runs too slowly when
1675 these lists get long. The number 32 was chosen because it
1676 seems like a reasonable number. When compiling GCC with itself,
1677 this flush occurs 8 times for sparc, and 10 times for m88k using
1678 the number 32. */
1679 flush_pending_lists (insn);
1681 else
1683 rtx pending, pending_mem;
1685 pending = pending_read_insns;
1686 pending_mem = pending_read_mems;
1687 while (pending)
1689 /* If a dependency already exists, don't create a new one. */
1690 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1691 if (anti_dependence (XEXP (pending_mem, 0), dest))
1692 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1694 pending = XEXP (pending, 1);
1695 pending_mem = XEXP (pending_mem, 1);
1698 pending = pending_write_insns;
1699 pending_mem = pending_write_mems;
1700 while (pending)
1702 /* If a dependency already exists, don't create a new one. */
1703 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1704 if (output_dependence (XEXP (pending_mem, 0), dest))
1705 add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
1707 pending = XEXP (pending, 1);
1708 pending_mem = XEXP (pending_mem, 1);
1711 if (last_pending_memory_flush)
1712 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1714 add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
1715 insn, dest);
1717 sched_analyze_2 (XEXP (dest, 0), insn);
1720 /* Analyze reads. */
1721 if (GET_CODE (x) == SET)
1722 sched_analyze_2 (SET_SRC (x), insn);
1725 /* Analyze the uses of memory and registers in rtx X in INSN. */
1727 static void
1728 sched_analyze_2 (x, insn)
1729 rtx x;
1730 rtx insn;
1732 register int i;
1733 register int j;
1734 register enum rtx_code code;
1735 register char *fmt;
1737 if (x == 0)
1738 return;
1740 code = GET_CODE (x);
1742 switch (code)
1744 case CONST_INT:
1745 case CONST_DOUBLE:
1746 case SYMBOL_REF:
1747 case CONST:
1748 case LABEL_REF:
1749 /* Ignore constants. Note that we must handle CONST_DOUBLE here
1750 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
1751 this does not mean that this insn is using cc0. */
1752 return;
1754 #ifdef HAVE_cc0
1755 case CC0:
1757 rtx link, prev;
1759 /* There may be a note before this insn now, but all notes will
1760 be removed before we actually try to schedule the insns, so
1761 it won't cause a problem later. We must avoid it here though. */
1763 /* User of CC0 depends on immediately preceding insn. */
1764 SCHED_GROUP_P (insn) = 1;
1766 /* Make a copy of all dependencies on the immediately previous insn,
1767 and add to this insn. This is so that all the dependencies will
1768 apply to the group. Remove an explicit dependence on this insn
1769 as SCHED_GROUP_P now represents it. */
1771 prev = PREV_INSN (insn);
1772 while (GET_CODE (prev) == NOTE)
1773 prev = PREV_INSN (prev);
1775 if (find_insn_list (prev, LOG_LINKS (insn)))
1776 remove_dependence (insn, prev);
1778 for (link = LOG_LINKS (prev); link; link = XEXP (link, 1))
1779 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1781 return;
1783 #endif
1785 case REG:
1787 int regno = REGNO (x);
1788 if (regno < FIRST_PSEUDO_REGISTER)
1790 int i;
1792 i = HARD_REGNO_NREGS (regno, GET_MODE (x));
1793 while (--i >= 0)
1795 reg_last_uses[regno + i]
1796 = gen_rtx (INSN_LIST, VOIDmode,
1797 insn, reg_last_uses[regno + i]);
1798 if (reg_last_sets[regno + i])
1799 add_dependence (insn, reg_last_sets[regno + i], 0);
1800 if ((call_used_regs[regno + i] || global_regs[regno + i])
1801 && last_function_call)
1802 /* Function calls clobber all call_used regs. */
1803 add_dependence (insn, last_function_call, REG_DEP_ANTI);
1806 else
1808 reg_last_uses[regno]
1809 = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]);
1810 if (reg_last_sets[regno])
1811 add_dependence (insn, reg_last_sets[regno], 0);
1813 /* Pseudos that are REG_EQUIV to something may be replaced
1814 by that during reloading. We need only add dependencies for
1815 the address in the REG_EQUIV note. */
1816 if (! reload_completed
1817 && reg_known_equiv_p[regno]
1818 && GET_CODE (reg_known_value[regno]) == MEM)
1819 sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
1821 /* If the register does not already cross any calls, then add this
1822 insn to the sched_before_next_call list so that it will still
1823 not cross calls after scheduling. */
1824 if (reg_n_calls_crossed[regno] == 0)
1825 add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
1827 return;
1830 case MEM:
1832 /* Reading memory. */
1834 rtx pending, pending_mem;
1836 pending = pending_read_insns;
1837 pending_mem = pending_read_mems;
1838 while (pending)
1840 /* If a dependency already exists, don't create a new one. */
1841 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1842 if (read_dependence (XEXP (pending_mem, 0), x))
1843 add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
1845 pending = XEXP (pending, 1);
1846 pending_mem = XEXP (pending_mem, 1);
1849 pending = pending_write_insns;
1850 pending_mem = pending_write_mems;
1851 while (pending)
1853 /* If a dependency already exists, don't create a new one. */
1854 if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn)))
1855 if (true_dependence (XEXP (pending_mem, 0), x))
1856 add_dependence (insn, XEXP (pending, 0), 0);
1858 pending = XEXP (pending, 1);
1859 pending_mem = XEXP (pending_mem, 1);
1861 if (last_pending_memory_flush)
1862 add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI);
1864 /* Always add these dependencies to pending_reads, since
1865 this insn may be followed by a write. */
1866 add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
1867 insn, x);
1869 /* Take advantage of tail recursion here. */
1870 sched_analyze_2 (XEXP (x, 0), insn);
1871 return;
1874 case ASM_OPERANDS:
1875 case ASM_INPUT:
1876 case UNSPEC_VOLATILE:
1877 case TRAP_IF:
1879 rtx u;
1881 /* Traditional and volatile asm instructions must be considered to use
1882 and clobber all hard registers and all of memory. So must
1883 TRAP_IF and UNSPEC_VOLATILE operations. */
1884 if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
1886 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1888 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
1889 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
1890 reg_last_uses[i] = 0;
1891 if (reg_last_sets[i])
1892 add_dependence (insn, reg_last_sets[i], 0);
1893 reg_last_sets[i] = insn;
1896 flush_pending_lists (insn);
1899 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
1900 We can not just fall through here since then we would be confused
1901 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
1902 traditional asms unlike their normal usage. */
1904 if (code == ASM_OPERANDS)
1906 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1907 sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
1908 return;
1910 break;
1913 case PRE_DEC:
1914 case POST_DEC:
1915 case PRE_INC:
1916 case POST_INC:
1917 /* These both read and modify the result. We must handle them as writes
1918 to get proper dependencies for following instructions. We must handle
1919 them as reads to get proper dependencies from this to previous
1920 instructions. Thus we need to pass them to both sched_analyze_1
1921 and sched_analyze_2. We must call sched_analyze_2 first in order
1922 to get the proper antecedent for the read. */
1923 sched_analyze_2 (XEXP (x, 0), insn);
1924 sched_analyze_1 (x, insn);
1925 return;
1928 /* Other cases: walk the insn. */
1929 fmt = GET_RTX_FORMAT (code);
1930 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1932 if (fmt[i] == 'e')
1933 sched_analyze_2 (XEXP (x, i), insn);
1934 else if (fmt[i] == 'E')
1935 for (j = 0; j < XVECLEN (x, i); j++)
1936 sched_analyze_2 (XVECEXP (x, i, j), insn);
1940 /* Analyze an INSN with pattern X to find all dependencies. */
1942 static void
1943 sched_analyze_insn (x, insn)
1944 rtx x, insn;
1946 register RTX_CODE code = GET_CODE (x);
1947 rtx link;
1949 if (code == SET || code == CLOBBER)
1950 sched_analyze_1 (x, insn);
1951 else if (code == PARALLEL)
1953 register int i;
1954 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1956 code = GET_CODE (XVECEXP (x, 0, i));
1957 if (code == SET || code == CLOBBER)
1958 sched_analyze_1 (XVECEXP (x, 0, i), insn);
1959 else
1960 sched_analyze_2 (XVECEXP (x, 0, i), insn);
1963 else
1964 sched_analyze_2 (x, insn);
1966 /* Handle function calls and function returns created by the epilogue
1967 threading code. */
1968 if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN)
1970 rtx dep_insn;
1971 rtx prev_dep_insn;
1973 /* When scheduling instructions, we make sure calls don't lose their
1974 accompanying USE insns by depending them one on another in order.
1976 Also, we must do the same thing for returns created by the epilogue
1977 threading code. Note this code works only in this special case,
1978 because other passes make no guarantee that they will never emit
1979 an instruction between a USE and a RETURN. There is such a guarantee
1980 for USE instructions immediately before a call. */
1982 prev_dep_insn = insn;
1983 dep_insn = PREV_INSN (insn);
1984 while (GET_CODE (dep_insn) == INSN
1985 && GET_CODE (PATTERN (dep_insn)) == USE)
1987 SCHED_GROUP_P (prev_dep_insn) = 1;
1989 /* Make a copy of all dependencies on dep_insn, and add to insn.
1990 This is so that all of the dependencies will apply to the
1991 group. */
1993 for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1))
1994 add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link));
1996 prev_dep_insn = dep_insn;
1997 dep_insn = PREV_INSN (dep_insn);
2002 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
2003 for every dependency. */
2005 static int
2006 sched_analyze (head, tail)
2007 rtx head, tail;
2009 register rtx insn;
2010 register int n_insns = 0;
2011 register rtx u;
2012 register int luid = 0;
2014 for (insn = head; ; insn = NEXT_INSN (insn))
2016 INSN_LUID (insn) = luid++;
2018 if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
2020 sched_analyze_insn (PATTERN (insn), insn);
2021 n_insns += 1;
2023 else if (GET_CODE (insn) == CALL_INSN)
2025 rtx dest = 0;
2026 rtx x;
2027 register int i;
2029 /* Any instruction using a hard register which may get clobbered
2030 by a call needs to be marked as dependent on this call.
2031 This prevents a use of a hard return reg from being moved
2032 past a void call (i.e. it does not explicitly set the hard
2033 return reg). */
2035 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2036 if (call_used_regs[i] || global_regs[i])
2038 for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
2039 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2040 reg_last_uses[i] = 0;
2041 if (reg_last_sets[i])
2042 add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI);
2043 reg_last_sets[i] = insn;
2044 /* Insn, being a CALL_INSN, magically depends on
2045 `last_function_call' already. */
2048 /* For each insn which shouldn't cross a call, add a dependence
2049 between that insn and this call insn. */
2050 x = LOG_LINKS (sched_before_next_call);
2051 while (x)
2053 add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
2054 x = XEXP (x, 1);
2056 LOG_LINKS (sched_before_next_call) = 0;
2058 sched_analyze_insn (PATTERN (insn), insn);
2060 /* We don't need to flush memory for a function call which does
2061 not involve memory. */
2062 if (! CONST_CALL_P (insn))
2064 /* In the absence of interprocedural alias analysis,
2065 we must flush all pending reads and writes, and
2066 start new dependencies starting from here. */
2067 flush_pending_lists (insn);
2070 /* Depend this function call (actually, the user of this
2071 function call) on all hard register clobberage. */
2072 last_function_call = insn;
2073 n_insns += 1;
2076 if (insn == tail)
2077 return n_insns;
2081 /* Called when we see a set of a register. If death is true, then we are
2082 scanning backwards. Mark that register as unborn. If nobody says
2083 otherwise, that is how things will remain. If death is false, then we
2084 are scanning forwards. Mark that register as being born. */
2086 static void
2087 sched_note_set (b, x, death)
2088 int b;
2089 rtx x;
2090 int death;
2092 register int regno, j;
2093 register rtx reg = SET_DEST (x);
2094 int subreg_p = 0;
2096 if (reg == 0)
2097 return;
2099 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART
2100 || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT)
2102 /* Must treat modification of just one hardware register of a multi-reg
2103 value or just a byte field of a register exactly the same way that
2104 mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg
2105 does not kill the entire register. */
2106 if (GET_CODE (reg) != SUBREG
2107 || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg))
2108 subreg_p = 1;
2110 reg = SUBREG_REG (reg);
2113 if (GET_CODE (reg) != REG)
2114 return;
2116 /* Global registers are always live, so the code below does not apply
2117 to them. */
2119 regno = REGNO (reg);
2120 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2122 register int offset = regno / REGSET_ELT_BITS;
2123 register REGSET_ELT_TYPE bit
2124 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2126 if (death)
2128 /* If we only set part of the register, then this set does not
2129 kill it. */
2130 if (subreg_p)
2131 return;
2133 /* Try killing this register. */
2134 if (regno < FIRST_PSEUDO_REGISTER)
2136 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2137 while (--j >= 0)
2139 offset = (regno + j) / REGSET_ELT_BITS;
2140 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2142 bb_live_regs[offset] &= ~bit;
2143 bb_dead_regs[offset] |= bit;
2146 else
2148 bb_live_regs[offset] &= ~bit;
2149 bb_dead_regs[offset] |= bit;
2152 else
2154 /* Make the register live again. */
2155 if (regno < FIRST_PSEUDO_REGISTER)
2157 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2158 while (--j >= 0)
2160 offset = (regno + j) / REGSET_ELT_BITS;
2161 bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2163 bb_live_regs[offset] |= bit;
2164 bb_dead_regs[offset] &= ~bit;
2167 else
2169 bb_live_regs[offset] |= bit;
2170 bb_dead_regs[offset] &= ~bit;
2176 /* Macros and functions for keeping the priority queue sorted, and
2177 dealing with queueing and unqueueing of instructions. */
2179 #define SCHED_SORT(READY, NEW_READY, OLD_READY) \
2180 do { if ((NEW_READY) - (OLD_READY) == 1) \
2181 swap_sort (READY, NEW_READY); \
2182 else if ((NEW_READY) - (OLD_READY) > 1) \
2183 qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \
2184 while (0)
2186 /* Returns a positive value if y is preferred; returns a negative value if
2187 x is preferred. Should never return 0, since that will make the sort
2188 unstable. */
2190 static int
2191 rank_for_schedule (x, y)
2192 rtx *x, *y;
2194 rtx tmp = *y;
2195 rtx tmp2 = *x;
2196 rtx link;
2197 int tmp_class, tmp2_class;
2198 int value;
2200 /* Choose the instruction with the highest priority, if different. */
2201 if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2))
2202 return value;
2204 if (last_scheduled_insn)
2206 /* Classify the instructions into three classes:
2207 1) Data dependent on last schedule insn.
2208 2) Anti/Output dependent on last scheduled insn.
2209 3) Independent of last scheduled insn, or has latency of one.
2210 Choose the insn from the highest numbered class if different. */
2211 link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn));
2212 if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1)
2213 tmp_class = 3;
2214 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
2215 tmp_class = 1;
2216 else
2217 tmp_class = 2;
2219 link = find_insn_list (tmp2, LOG_LINKS (last_scheduled_insn));
2220 if (link == 0 || insn_cost (tmp2, link, last_scheduled_insn) == 1)
2221 tmp2_class = 3;
2222 else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */
2223 tmp2_class = 1;
2224 else
2225 tmp2_class = 2;
2227 if (value = tmp_class - tmp2_class)
2228 return value;
2231 /* If insns are equally good, sort by INSN_LUID (original insn order),
2232 so that we make the sort stable. This minimizes instruction movement,
2233 thus minimizing sched's effect on debugging and cross-jumping. */
2234 return INSN_LUID (tmp) - INSN_LUID (tmp2);
2237 /* Resort the array A in which only element at index N may be out of order. */
2239 __inline static void
2240 swap_sort (a, n)
2241 rtx *a;
2242 int n;
2244 rtx insn = a[n-1];
2245 int i = n-2;
2247 while (i >= 0 && rank_for_schedule (a+i, &insn) >= 0)
2249 a[i+1] = a[i];
2250 i -= 1;
2252 a[i+1] = insn;
2255 static int max_priority;
2257 /* Add INSN to the insn queue so that it fires at least N_CYCLES
2258 before the currently executing insn. */
2260 __inline static void
2261 queue_insn (insn, n_cycles)
2262 rtx insn;
2263 int n_cycles;
2265 int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
2266 NEXT_INSN (insn) = insn_queue[next_q];
2267 insn_queue[next_q] = insn;
2268 q_size += 1;
2271 /* Return nonzero if PAT is the pattern of an insn which makes a
2272 register live. */
2274 __inline static int
2275 birthing_insn_p (pat)
2276 rtx pat;
2278 int j;
2280 if (reload_completed == 1)
2281 return 0;
2283 if (GET_CODE (pat) == SET
2284 && GET_CODE (SET_DEST (pat)) == REG)
2286 rtx dest = SET_DEST (pat);
2287 int i = REGNO (dest);
2288 int offset = i / REGSET_ELT_BITS;
2289 REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
2291 /* It would be more accurate to use refers_to_regno_p or
2292 reg_mentioned_p to determine when the dest is not live before this
2293 insn. */
2295 if (bb_live_regs[offset] & bit)
2296 return (reg_n_sets[i] == 1);
2298 return 0;
2300 if (GET_CODE (pat) == PARALLEL)
2302 for (j = 0; j < XVECLEN (pat, 0); j++)
2303 if (birthing_insn_p (XVECEXP (pat, 0, j)))
2304 return 1;
2306 return 0;
2309 /* PREV is an insn that is ready to execute. Adjust its priority if that
2310 will help shorten register lifetimes. */
2312 __inline static void
2313 adjust_priority (prev)
2314 rtx prev;
2316 /* Trying to shorten register lives after reload has completed
2317 is useless and wrong. It gives inaccurate schedules. */
2318 if (reload_completed == 0)
2320 rtx note;
2321 int n_deaths = 0;
2323 /* ??? This code has no effect, because REG_DEAD notes are removed
2324 before we ever get here. */
2325 for (note = REG_NOTES (prev); note; note = XEXP (note, 1))
2326 if (REG_NOTE_KIND (note) == REG_DEAD)
2327 n_deaths += 1;
2329 /* Defer scheduling insns which kill registers, since that
2330 shortens register lives. Prefer scheduling insns which
2331 make registers live for the same reason. */
2332 switch (n_deaths)
2334 default:
2335 INSN_PRIORITY (prev) >>= 3;
2336 break;
2337 case 3:
2338 INSN_PRIORITY (prev) >>= 2;
2339 break;
2340 case 2:
2341 case 1:
2342 INSN_PRIORITY (prev) >>= 1;
2343 break;
2344 case 0:
2345 if (birthing_insn_p (PATTERN (prev)))
2347 int max = max_priority;
2349 if (max > INSN_PRIORITY (prev))
2350 INSN_PRIORITY (prev) = max;
2352 break;
2357 /* INSN is the "currently executing insn". Launch each insn which was
2358 waiting on INSN (in the backwards dataflow sense). READY is a
2359 vector of insns which are ready to fire. N_READY is the number of
2360 elements in READY. CLOCK is the current virtual cycle. */
2362 static int
2363 schedule_insn (insn, ready, n_ready, clock)
2364 rtx insn;
2365 rtx *ready;
2366 int n_ready;
2367 int clock;
2369 rtx link;
2370 int new_ready = n_ready;
2372 if (MAX_BLOCKAGE > 1)
2373 schedule_unit (insn_unit (insn), insn, clock);
2375 if (LOG_LINKS (insn) == 0)
2376 return n_ready;
2378 /* This is used by the function adjust_priority above. */
2379 if (n_ready > 0)
2380 max_priority = MAX (INSN_PRIORITY (ready[0]), INSN_PRIORITY (insn));
2381 else
2382 max_priority = INSN_PRIORITY (insn);
2384 for (link = LOG_LINKS (insn); link != 0; link = XEXP (link, 1))
2386 rtx prev = XEXP (link, 0);
2387 int cost = insn_cost (prev, link, insn);
2389 if ((INSN_REF_COUNT (prev) -= 1) != 0)
2391 /* We satisfied one requirement to fire PREV. Record the earliest
2392 time when PREV can fire. No need to do this if the cost is 1,
2393 because PREV can fire no sooner than the next cycle. */
2394 if (cost > 1)
2395 INSN_TICK (prev) = MAX (INSN_TICK (prev), clock + cost);
2397 else
2399 /* We satisfied the last requirement to fire PREV. Ensure that all
2400 timing requirements are satisfied. */
2401 if (INSN_TICK (prev) - clock > cost)
2402 cost = INSN_TICK (prev) - clock;
2404 /* Adjust the priority of PREV and either put it on the ready
2405 list or queue it. */
2406 adjust_priority (prev);
2407 if (cost <= 1)
2408 ready[new_ready++] = prev;
2409 else
2410 queue_insn (prev, cost);
2414 return new_ready;
2417 /* Given N_READY insns in the ready list READY at time CLOCK, queue
2418 those that are blocked due to function unit hazards and rearrange
2419 the remaining ones to minimize subsequent function unit hazards. */
2421 static int
2422 schedule_select (ready, n_ready, clock, file)
2423 rtx *ready;
2424 int n_ready, clock;
2425 FILE *file;
2427 int pri = INSN_PRIORITY (ready[0]);
2428 int i, j, k, q, cost, best_cost, best_insn = 0, new_ready = n_ready;
2429 rtx insn;
2431 /* Work down the ready list in groups of instructions with the same
2432 priority value. Queue insns in the group that are blocked and
2433 select among those that remain for the one with the largest
2434 potential hazard. */
2435 for (i = 0; i < n_ready; i = j)
2437 int opri = pri;
2438 for (j = i + 1; j < n_ready; j++)
2439 if ((pri = INSN_PRIORITY (ready[j])) != opri)
2440 break;
2442 /* Queue insns in the group that are blocked. */
2443 for (k = i, q = 0; k < j; k++)
2445 insn = ready[k];
2446 if ((cost = actual_hazard (insn_unit (insn), insn, clock, 0)) != 0)
2448 q++;
2449 ready[k] = 0;
2450 queue_insn (insn, cost);
2451 if (file)
2452 fprintf (file, "\n;; blocking insn %d for %d cycles",
2453 INSN_UID (insn), cost);
2456 new_ready -= q;
2458 /* Check the next group if all insns were queued. */
2459 if (j - i - q == 0)
2460 continue;
2462 /* If more than one remains, select the first one with the largest
2463 potential hazard. */
2464 else if (j - i - q > 1)
2466 best_cost = -1;
2467 for (k = i; k < j; k++)
2469 if ((insn = ready[k]) == 0)
2470 continue;
2471 if ((cost = potential_hazard (insn_unit (insn), insn, 0))
2472 > best_cost)
2474 best_cost = cost;
2475 best_insn = k;
2479 /* We have found a suitable insn to schedule. */
2480 break;
2483 /* Move the best insn to be front of the ready list. */
2484 if (best_insn != 0)
2486 if (file)
2488 fprintf (file, ", now");
2489 for (i = 0; i < n_ready; i++)
2490 if (ready[i])
2491 fprintf (file, " %d", INSN_UID (ready[i]));
2492 fprintf (file, "\n;; insn %d has a greater potential hazard",
2493 INSN_UID (ready[best_insn]));
2495 for (i = best_insn; i > 0; i--)
2497 insn = ready[i-1];
2498 ready[i-1] = ready[i];
2499 ready[i] = insn;
2503 /* Compact the ready list. */
2504 if (new_ready < n_ready)
2505 for (i = j = 0; i < n_ready; i++)
2506 if (ready[i])
2507 ready[j++] = ready[i];
2509 return new_ready;
2512 /* Add a REG_DEAD note for REG to INSN, reusing a REG_DEAD note from the
2513 dead_notes list. */
2515 static void
2516 create_reg_dead_note (reg, insn)
2517 rtx reg, insn;
2519 rtx link, backlink;
2521 /* The number of registers killed after scheduling must be the same as the
2522 number of registers killed before scheduling. The number of REG_DEAD
2523 notes may not be conserved, i.e. two SImode hard register REG_DEAD notes
2524 might become one DImode hard register REG_DEAD note, but the number of
2525 registers killed will be conserved.
2527 We carefully remove REG_DEAD notes from the dead_notes list, so that
2528 there will be none left at the end. If we run out early, then there
2529 is a bug somewhere in flow, combine and/or sched. */
2531 if (dead_notes == 0)
2533 #if 1
2534 abort ();
2535 #else
2536 link = rtx_alloc (EXPR_LIST);
2537 PUT_REG_NOTE_KIND (link, REG_DEAD);
2538 #endif
2540 else
2542 /* Number of regs killed by REG. */
2543 int regs_killed = (REGNO (reg) >= FIRST_PSEUDO_REGISTER ? 1
2544 : HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)));
2545 /* Number of regs killed by REG_DEAD notes taken off the list. */
2546 int reg_note_regs;
2548 link = dead_notes;
2549 reg_note_regs = (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2550 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2551 GET_MODE (XEXP (link, 0))));
2552 while (reg_note_regs < regs_killed)
2554 link = XEXP (link, 1);
2555 reg_note_regs += (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
2556 : HARD_REGNO_NREGS (REGNO (XEXP (link, 0)),
2557 GET_MODE (XEXP (link, 0))));
2559 dead_notes = XEXP (link, 1);
2561 /* If we took too many regs kills off, put the extra ones back. */
2562 while (reg_note_regs > regs_killed)
2564 rtx temp_reg, temp_link;
2566 temp_reg = gen_rtx (REG, word_mode, 0);
2567 temp_link = rtx_alloc (EXPR_LIST);
2568 PUT_REG_NOTE_KIND (temp_link, REG_DEAD);
2569 XEXP (temp_link, 0) = temp_reg;
2570 XEXP (temp_link, 1) = dead_notes;
2571 dead_notes = temp_link;
2572 reg_note_regs--;
2576 XEXP (link, 0) = reg;
2577 XEXP (link, 1) = REG_NOTES (insn);
2578 REG_NOTES (insn) = link;
2581 /* Subroutine on attach_deaths_insn--handles the recursive search
2582 through INSN. If SET_P is true, then x is being modified by the insn. */
2584 static void
2585 attach_deaths (x, insn, set_p)
2586 rtx x;
2587 rtx insn;
2588 int set_p;
2590 register int i;
2591 register int j;
2592 register enum rtx_code code;
2593 register char *fmt;
2595 if (x == 0)
2596 return;
2598 code = GET_CODE (x);
2600 switch (code)
2602 case CONST_INT:
2603 case CONST_DOUBLE:
2604 case LABEL_REF:
2605 case SYMBOL_REF:
2606 case CONST:
2607 case CODE_LABEL:
2608 case PC:
2609 case CC0:
2610 /* Get rid of the easy cases first. */
2611 return;
2613 case REG:
2615 /* If the register dies in this insn, queue that note, and mark
2616 this register as needing to die. */
2617 /* This code is very similar to mark_used_1 (if set_p is false)
2618 and mark_set_1 (if set_p is true) in flow.c. */
2620 register int regno = REGNO (x);
2621 register int offset = regno / REGSET_ELT_BITS;
2622 register REGSET_ELT_TYPE bit
2623 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
2624 REGSET_ELT_TYPE all_needed = (old_live_regs[offset] & bit);
2625 REGSET_ELT_TYPE some_needed = (old_live_regs[offset] & bit);
2627 if (set_p)
2628 return;
2630 if (regno < FIRST_PSEUDO_REGISTER)
2632 int n;
2634 n = HARD_REGNO_NREGS (regno, GET_MODE (x));
2635 while (--n > 0)
2637 some_needed |= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2638 & ((REGSET_ELT_TYPE) 1
2639 << ((regno + n) % REGSET_ELT_BITS)));
2640 all_needed &= (old_live_regs[(regno + n) / REGSET_ELT_BITS]
2641 & ((REGSET_ELT_TYPE) 1
2642 << ((regno + n) % REGSET_ELT_BITS)));
2646 /* If it wasn't live before we started, then add a REG_DEAD note.
2647 We must check the previous lifetime info not the current info,
2648 because we may have to execute this code several times, e.g.
2649 once for a clobber (which doesn't add a note) and later
2650 for a use (which does add a note).
2652 Always make the register live. We must do this even if it was
2653 live before, because this may be an insn which sets and uses
2654 the same register, in which case the register has already been
2655 killed, so we must make it live again.
2657 Global registers are always live, and should never have a REG_DEAD
2658 note added for them, so none of the code below applies to them. */
2660 if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno])
2662 /* Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2663 STACK_POINTER_REGNUM, since these are always considered to be
2664 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2665 if (regno != FRAME_POINTER_REGNUM
2666 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2667 && ! (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
2668 #endif
2669 && regno != STACK_POINTER_REGNUM)
2671 if (! all_needed && ! dead_or_set_p (insn, x))
2673 /* If none of the words in X is needed, make a REG_DEAD
2674 note. Otherwise, we must make partial REG_DEAD
2675 notes. */
2676 if (! some_needed)
2677 create_reg_dead_note (x, insn);
2678 else
2680 int i;
2682 /* Don't make a REG_DEAD note for a part of a
2683 register that is set in the insn. */
2684 for (i = HARD_REGNO_NREGS (regno, GET_MODE (x)) - 1;
2685 i >= 0; i--)
2686 if ((old_live_regs[(regno + i) / REGSET_ELT_BITS]
2687 & ((REGSET_ELT_TYPE) 1
2688 << ((regno +i) % REGSET_ELT_BITS))) == 0
2689 && ! dead_or_set_regno_p (insn, regno + i))
2690 create_reg_dead_note (gen_rtx (REG, word_mode,
2691 regno + i),
2692 insn);
2697 if (regno < FIRST_PSEUDO_REGISTER)
2699 int j = HARD_REGNO_NREGS (regno, GET_MODE (x));
2700 while (--j >= 0)
2702 offset = (regno + j) / REGSET_ELT_BITS;
2704 = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS);
2706 bb_dead_regs[offset] &= ~bit;
2707 bb_live_regs[offset] |= bit;
2710 else
2712 bb_dead_regs[offset] &= ~bit;
2713 bb_live_regs[offset] |= bit;
2716 return;
2719 case MEM:
2720 /* Handle tail-recursive case. */
2721 attach_deaths (XEXP (x, 0), insn, 0);
2722 return;
2724 case SUBREG:
2725 case STRICT_LOW_PART:
2726 /* These two cases preserve the value of SET_P, so handle them
2727 separately. */
2728 attach_deaths (XEXP (x, 0), insn, set_p);
2729 return;
2731 case ZERO_EXTRACT:
2732 case SIGN_EXTRACT:
2733 /* This case preserves the value of SET_P for the first operand, but
2734 clears it for the other two. */
2735 attach_deaths (XEXP (x, 0), insn, set_p);
2736 attach_deaths (XEXP (x, 1), insn, 0);
2737 attach_deaths (XEXP (x, 2), insn, 0);
2738 return;
2740 default:
2741 /* Other cases: walk the insn. */
2742 fmt = GET_RTX_FORMAT (code);
2743 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2745 if (fmt[i] == 'e')
2746 attach_deaths (XEXP (x, i), insn, 0);
2747 else if (fmt[i] == 'E')
2748 for (j = 0; j < XVECLEN (x, i); j++)
2749 attach_deaths (XVECEXP (x, i, j), insn, 0);
2754 /* After INSN has executed, add register death notes for each register
2755 that is dead after INSN. */
2757 static void
2758 attach_deaths_insn (insn)
2759 rtx insn;
2761 rtx x = PATTERN (insn);
2762 register RTX_CODE code = GET_CODE (x);
2764 if (code == SET)
2766 attach_deaths (SET_SRC (x), insn, 0);
2768 /* A register might die here even if it is the destination, e.g.
2769 it is the target of a volatile read and is otherwise unused.
2770 Hence we must always call attach_deaths for the SET_DEST. */
2771 attach_deaths (SET_DEST (x), insn, 1);
2773 else if (code == PARALLEL)
2775 register int i;
2776 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2778 code = GET_CODE (XVECEXP (x, 0, i));
2779 if (code == SET)
2781 attach_deaths (SET_SRC (XVECEXP (x, 0, i)), insn, 0);
2783 attach_deaths (SET_DEST (XVECEXP (x, 0, i)), insn, 1);
2785 /* Flow does not add REG_DEAD notes to registers that die in
2786 clobbers, so we can't either. */
2787 else if (code != CLOBBER)
2788 attach_deaths (XVECEXP (x, 0, i), insn, 0);
2791 /* Flow does not add REG_DEAD notes to registers that die in
2792 clobbers, so we can't either. */
2793 else if (code != CLOBBER)
2794 attach_deaths (x, insn, 0);
2797 /* Delete notes beginning with INSN and maybe put them in the chain
2798 of notes ended by NOTE_LIST.
2799 Returns the insn following the notes. */
2801 static rtx
2802 unlink_notes (insn, tail)
2803 rtx insn, tail;
2805 rtx prev = PREV_INSN (insn);
2807 while (insn != tail && GET_CODE (insn) == NOTE)
2809 rtx next = NEXT_INSN (insn);
2810 /* Delete the note from its current position. */
2811 if (prev)
2812 NEXT_INSN (prev) = next;
2813 if (next)
2814 PREV_INSN (next) = prev;
2816 if (write_symbols != NO_DEBUG && NOTE_LINE_NUMBER (insn) > 0)
2817 /* Record line-number notes so they can be reused. */
2818 LINE_NOTE (insn) = insn;
2819 else
2821 /* Insert the note at the end of the notes list. */
2822 PREV_INSN (insn) = note_list;
2823 if (note_list)
2824 NEXT_INSN (note_list) = insn;
2825 note_list = insn;
2828 insn = next;
2830 return insn;
2833 /* Data structure for keeping track of register information
2834 during that register's life. */
2836 struct sometimes
2838 short offset; short bit;
2839 short live_length; short calls_crossed;
2842 /* Constructor for `sometimes' data structure. */
2844 static int
2845 new_sometimes_live (regs_sometimes_live, offset, bit, sometimes_max)
2846 struct sometimes *regs_sometimes_live;
2847 int offset, bit;
2848 int sometimes_max;
2850 register struct sometimes *p;
2851 register int regno = offset * REGSET_ELT_BITS + bit;
2852 int i;
2854 /* There should never be a register greater than max_regno here. If there
2855 is, it means that a define_split has created a new pseudo reg. This
2856 is not allowed, since there will not be flow info available for any
2857 new register, so catch the error here. */
2858 if (regno >= max_regno)
2859 abort ();
2861 p = &regs_sometimes_live[sometimes_max];
2862 p->offset = offset;
2863 p->bit = bit;
2864 p->live_length = 0;
2865 p->calls_crossed = 0;
2866 sometimes_max++;
2867 return sometimes_max;
2870 /* Count lengths of all regs we are currently tracking,
2871 and find new registers no longer live. */
2873 static void
2874 finish_sometimes_live (regs_sometimes_live, sometimes_max)
2875 struct sometimes *regs_sometimes_live;
2876 int sometimes_max;
2878 int i;
2880 for (i = 0; i < sometimes_max; i++)
2882 register struct sometimes *p = &regs_sometimes_live[i];
2883 int regno;
2885 regno = p->offset * REGSET_ELT_BITS + p->bit;
2887 sched_reg_live_length[regno] += p->live_length;
2888 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
2892 /* Use modified list scheduling to rearrange insns in basic block
2893 B. FILE, if nonzero, is where we dump interesting output about
2894 this pass. */
2896 static void
2897 schedule_block (b, file)
2898 int b;
2899 FILE *file;
2901 rtx insn, last;
2902 rtx last_note = 0;
2903 rtx *ready, link;
2904 int i, j, n_ready = 0, new_ready, n_insns = 0;
2905 int sched_n_insns = 0;
2906 int clock;
2907 #define NEED_NOTHING 0
2908 #define NEED_HEAD 1
2909 #define NEED_TAIL 2
2910 int new_needs;
2912 /* HEAD and TAIL delimit the region being scheduled. */
2913 rtx head = basic_block_head[b];
2914 rtx tail = basic_block_end[b];
2915 /* PREV_HEAD and NEXT_TAIL are the boundaries of the insns
2916 being scheduled. When the insns have been ordered,
2917 these insns delimit where the new insns are to be
2918 spliced back into the insn chain. */
2919 rtx next_tail;
2920 rtx prev_head;
2922 /* Keep life information accurate. */
2923 register struct sometimes *regs_sometimes_live;
2924 int sometimes_max;
2926 if (file)
2927 fprintf (file, ";;\t -- basic block number %d from %d to %d --\n",
2928 b, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
2930 i = max_reg_num ();
2931 reg_last_uses = (rtx *) alloca (i * sizeof (rtx));
2932 bzero (reg_last_uses, i * sizeof (rtx));
2933 reg_last_sets = (rtx *) alloca (i * sizeof (rtx));
2934 bzero (reg_last_sets, i * sizeof (rtx));
2935 clear_units ();
2937 /* Remove certain insns at the beginning from scheduling,
2938 by advancing HEAD. */
2940 /* At the start of a function, before reload has run, don't delay getting
2941 parameters from hard registers into pseudo registers. */
2942 if (reload_completed == 0 && b == 0)
2944 while (head != tail
2945 && GET_CODE (head) == NOTE
2946 && NOTE_LINE_NUMBER (head) != NOTE_INSN_FUNCTION_BEG)
2947 head = NEXT_INSN (head);
2948 while (head != tail
2949 && GET_CODE (head) == INSN
2950 && GET_CODE (PATTERN (head)) == SET)
2952 rtx src = SET_SRC (PATTERN (head));
2953 while (GET_CODE (src) == SUBREG
2954 || GET_CODE (src) == SIGN_EXTEND
2955 || GET_CODE (src) == ZERO_EXTEND
2956 || GET_CODE (src) == SIGN_EXTRACT
2957 || GET_CODE (src) == ZERO_EXTRACT)
2958 src = XEXP (src, 0);
2959 if (GET_CODE (src) != REG
2960 || REGNO (src) >= FIRST_PSEUDO_REGISTER)
2961 break;
2962 /* Keep this insn from ever being scheduled. */
2963 INSN_REF_COUNT (head) = 1;
2964 head = NEXT_INSN (head);
2968 /* Don't include any notes or labels at the beginning of the
2969 basic block, or notes at the ends of basic blocks. */
2970 while (head != tail)
2972 if (GET_CODE (head) == NOTE)
2973 head = NEXT_INSN (head);
2974 else if (GET_CODE (tail) == NOTE)
2975 tail = PREV_INSN (tail);
2976 else if (GET_CODE (head) == CODE_LABEL)
2977 head = NEXT_INSN (head);
2978 else break;
2980 /* If the only insn left is a NOTE or a CODE_LABEL, then there is no need
2981 to schedule this block. */
2982 if (head == tail
2983 && (GET_CODE (head) == NOTE || GET_CODE (head) == CODE_LABEL))
2984 return;
2986 #if 0
2987 /* This short-cut doesn't work. It does not count call insns crossed by
2988 registers in reg_sometimes_live. It does not mark these registers as
2989 dead if they die in this block. It does not mark these registers live
2990 (or create new reg_sometimes_live entries if necessary) if they are born
2991 in this block.
2993 The easy solution is to just always schedule a block. This block only
2994 has one insn, so this won't slow down this pass by much. */
2996 if (head == tail)
2997 return;
2998 #endif
3000 /* Now HEAD through TAIL are the insns actually to be rearranged;
3001 Let PREV_HEAD and NEXT_TAIL enclose them. */
3002 prev_head = PREV_INSN (head);
3003 next_tail = NEXT_INSN (tail);
3005 /* Initialize basic block data structures. */
3006 dead_notes = 0;
3007 pending_read_insns = 0;
3008 pending_read_mems = 0;
3009 pending_write_insns = 0;
3010 pending_write_mems = 0;
3011 pending_lists_length = 0;
3012 last_pending_memory_flush = 0;
3013 last_function_call = 0;
3014 last_scheduled_insn = 0;
3016 LOG_LINKS (sched_before_next_call) = 0;
3018 n_insns += sched_analyze (head, tail);
3019 if (n_insns == 0)
3021 free_pending_lists ();
3022 return;
3025 /* Allocate vector to hold insns to be rearranged (except those
3026 insns which are controlled by an insn with SCHED_GROUP_P set).
3027 All these insns are included between ORIG_HEAD and ORIG_TAIL,
3028 as those variables ultimately are set up. */
3029 ready = (rtx *) alloca ((n_insns+1) * sizeof (rtx));
3031 /* TAIL is now the last of the insns to be rearranged.
3032 Put those insns into the READY vector. */
3033 insn = tail;
3035 /* For all branches, calls, uses, and cc0 setters, force them to remain
3036 in order at the end of the block by adding dependencies and giving
3037 the last a high priority. There may be notes present, and prev_head
3038 may also be a note.
3040 Branches must obviously remain at the end. Calls should remain at the
3041 end since moving them results in worse register allocation. Uses remain
3042 at the end to ensure proper register allocation. cc0 setters remaim
3043 at the end because they can't be moved away from their cc0 user. */
3044 last = 0;
3045 while (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == JUMP_INSN
3046 || (GET_CODE (insn) == INSN
3047 && (GET_CODE (PATTERN (insn)) == USE
3048 #ifdef HAVE_cc0
3049 || sets_cc0_p (PATTERN (insn))
3050 #endif
3052 || GET_CODE (insn) == NOTE)
3054 if (GET_CODE (insn) != NOTE)
3056 priority (insn);
3057 if (last == 0)
3059 ready[n_ready++] = insn;
3060 INSN_PRIORITY (insn) = TAIL_PRIORITY - i;
3061 INSN_REF_COUNT (insn) = 0;
3063 else if (! find_insn_list (insn, LOG_LINKS (last)))
3065 add_dependence (last, insn, REG_DEP_ANTI);
3066 INSN_REF_COUNT (insn)++;
3068 last = insn;
3070 /* Skip over insns that are part of a group. */
3071 while (SCHED_GROUP_P (insn))
3073 insn = prev_nonnote_insn (insn);
3074 priority (insn);
3078 insn = PREV_INSN (insn);
3079 /* Don't overrun the bounds of the basic block. */
3080 if (insn == prev_head)
3081 break;
3084 /* Assign priorities to instructions. Also check whether they
3085 are in priority order already. If so then I will be nonnegative.
3086 We use this shortcut only before reloading. */
3087 #if 0
3088 i = reload_completed ? DONE_PRIORITY : MAX_PRIORITY;
3089 #endif
3091 for (; insn != prev_head; insn = PREV_INSN (insn))
3093 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3095 priority (insn);
3096 if (INSN_REF_COUNT (insn) == 0)
3098 if (last == 0)
3099 ready[n_ready++] = insn;
3100 else
3102 /* Make this dependent on the last of the instructions
3103 that must remain in order at the end of the block. */
3104 add_dependence (last, insn, REG_DEP_ANTI);
3105 INSN_REF_COUNT (insn) = 1;
3108 if (SCHED_GROUP_P (insn))
3110 while (SCHED_GROUP_P (insn))
3112 insn = PREV_INSN (insn);
3113 while (GET_CODE (insn) == NOTE)
3114 insn = PREV_INSN (insn);
3115 priority (insn);
3117 continue;
3119 #if 0
3120 if (i < 0)
3121 continue;
3122 if (INSN_PRIORITY (insn) < i)
3123 i = INSN_PRIORITY (insn);
3124 else if (INSN_PRIORITY (insn) > i)
3125 i = DONE_PRIORITY;
3126 #endif
3130 #if 0
3131 /* This short-cut doesn't work. It does not count call insns crossed by
3132 registers in reg_sometimes_live. It does not mark these registers as
3133 dead if they die in this block. It does not mark these registers live
3134 (or create new reg_sometimes_live entries if necessary) if they are born
3135 in this block.
3137 The easy solution is to just always schedule a block. These blocks tend
3138 to be very short, so this doesn't slow down this pass by much. */
3140 /* If existing order is good, don't bother to reorder. */
3141 if (i != DONE_PRIORITY)
3143 if (file)
3144 fprintf (file, ";; already scheduled\n");
3146 if (reload_completed == 0)
3148 for (i = 0; i < sometimes_max; i++)
3149 regs_sometimes_live[i].live_length += n_insns;
3151 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3153 free_pending_lists ();
3154 return;
3156 #endif
3158 /* Scan all the insns to be scheduled, removing NOTE insns
3159 and register death notes.
3160 Line number NOTE insns end up in NOTE_LIST.
3161 Register death notes end up in DEAD_NOTES.
3163 Recreate the register life information for the end of this basic
3164 block. */
3166 if (reload_completed == 0)
3168 bcopy (basic_block_live_at_start[b], bb_live_regs, regset_bytes);
3169 bzero (bb_dead_regs, regset_bytes);
3171 if (b == 0)
3173 /* This is the first block in the function. There may be insns
3174 before head that we can't schedule. We still need to examine
3175 them though for accurate register lifetime analysis. */
3177 /* We don't want to remove any REG_DEAD notes as the code below
3178 does. */
3180 for (insn = basic_block_head[b]; insn != head;
3181 insn = NEXT_INSN (insn))
3182 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3184 /* See if the register gets born here. */
3185 /* We must check for registers being born before we check for
3186 registers dying. It is possible for a register to be born
3187 and die in the same insn, e.g. reading from a volatile
3188 memory location into an otherwise unused register. Such
3189 a register must be marked as dead after this insn. */
3190 if (GET_CODE (PATTERN (insn)) == SET
3191 || GET_CODE (PATTERN (insn)) == CLOBBER)
3192 sched_note_set (b, PATTERN (insn), 0);
3193 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3195 int j;
3196 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3197 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3198 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3199 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3201 /* ??? This code is obsolete and should be deleted. It
3202 is harmless though, so we will leave it in for now. */
3203 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3204 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3205 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3208 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3210 if ((REG_NOTE_KIND (link) == REG_DEAD
3211 || REG_NOTE_KIND (link) == REG_UNUSED)
3212 /* Verify that the REG_NOTE has a legal value. */
3213 && GET_CODE (XEXP (link, 0)) == REG)
3215 register int regno = REGNO (XEXP (link, 0));
3216 register int offset = regno / REGSET_ELT_BITS;
3217 register REGSET_ELT_TYPE bit
3218 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
3220 if (regno < FIRST_PSEUDO_REGISTER)
3222 int j = HARD_REGNO_NREGS (regno,
3223 GET_MODE (XEXP (link, 0)));
3224 while (--j >= 0)
3226 offset = (regno + j) / REGSET_ELT_BITS;
3227 bit = ((REGSET_ELT_TYPE) 1
3228 << ((regno + j) % REGSET_ELT_BITS));
3230 bb_live_regs[offset] &= ~bit;
3231 bb_dead_regs[offset] |= bit;
3234 else
3236 bb_live_regs[offset] &= ~bit;
3237 bb_dead_regs[offset] |= bit;
3245 /* If debugging information is being produced, keep track of the line
3246 number notes for each insn. */
3247 if (write_symbols != NO_DEBUG)
3249 /* We must use the true line number for the first insn in the block
3250 that was computed and saved at the start of this pass. We can't
3251 use the current line number, because scheduling of the previous
3252 block may have changed the current line number. */
3253 rtx line = line_note_head[b];
3255 for (insn = basic_block_head[b];
3256 insn != next_tail;
3257 insn = NEXT_INSN (insn))
3258 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3259 line = insn;
3260 else
3261 LINE_NOTE (insn) = line;
3264 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3266 rtx prev, next, link;
3268 /* Farm out notes. This is needed to keep the debugger from
3269 getting completely deranged. */
3270 if (GET_CODE (insn) == NOTE)
3272 prev = insn;
3273 insn = unlink_notes (insn, next_tail);
3274 if (prev == tail)
3275 abort ();
3276 if (prev == head)
3277 abort ();
3278 if (insn == next_tail)
3279 abort ();
3282 if (reload_completed == 0
3283 && GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3285 /* See if the register gets born here. */
3286 /* We must check for registers being born before we check for
3287 registers dying. It is possible for a register to be born and
3288 die in the same insn, e.g. reading from a volatile memory
3289 location into an otherwise unused register. Such a register
3290 must be marked as dead after this insn. */
3291 if (GET_CODE (PATTERN (insn)) == SET
3292 || GET_CODE (PATTERN (insn)) == CLOBBER)
3293 sched_note_set (b, PATTERN (insn), 0);
3294 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3296 int j;
3297 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3298 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3299 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3300 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3302 /* ??? This code is obsolete and should be deleted. It
3303 is harmless though, so we will leave it in for now. */
3304 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3305 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == USE)
3306 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 0);
3309 /* Need to know what registers this insn kills. */
3310 for (prev = 0, link = REG_NOTES (insn); link; link = next)
3312 int regno;
3314 next = XEXP (link, 1);
3315 if ((REG_NOTE_KIND (link) == REG_DEAD
3316 || REG_NOTE_KIND (link) == REG_UNUSED)
3317 /* Verify that the REG_NOTE has a legal value. */
3318 && GET_CODE (XEXP (link, 0)) == REG)
3320 register int regno = REGNO (XEXP (link, 0));
3321 register int offset = regno / REGSET_ELT_BITS;
3322 register REGSET_ELT_TYPE bit
3323 = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS);
3325 /* Only unlink REG_DEAD notes; leave REG_UNUSED notes
3326 alone. */
3327 if (REG_NOTE_KIND (link) == REG_DEAD)
3329 if (prev)
3330 XEXP (prev, 1) = next;
3331 else
3332 REG_NOTES (insn) = next;
3333 XEXP (link, 1) = dead_notes;
3334 dead_notes = link;
3336 else
3337 prev = link;
3339 if (regno < FIRST_PSEUDO_REGISTER)
3341 int j = HARD_REGNO_NREGS (regno,
3342 GET_MODE (XEXP (link, 0)));
3343 while (--j >= 0)
3345 offset = (regno + j) / REGSET_ELT_BITS;
3346 bit = ((REGSET_ELT_TYPE) 1
3347 << ((regno + j) % REGSET_ELT_BITS));
3349 bb_live_regs[offset] &= ~bit;
3350 bb_dead_regs[offset] |= bit;
3353 else
3355 bb_live_regs[offset] &= ~bit;
3356 bb_dead_regs[offset] |= bit;
3359 else
3360 prev = link;
3365 if (reload_completed == 0)
3367 /* Keep track of register lives. */
3368 old_live_regs = (regset) alloca (regset_bytes);
3369 regs_sometimes_live
3370 = (struct sometimes *) alloca (max_regno * sizeof (struct sometimes));
3371 sometimes_max = 0;
3373 /* Start with registers live at end. */
3374 for (j = 0; j < regset_size; j++)
3376 REGSET_ELT_TYPE live = bb_live_regs[j];
3377 old_live_regs[j] = live;
3378 if (live)
3380 register REGSET_ELT_TYPE bit;
3381 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
3382 if (live & ((REGSET_ELT_TYPE) 1 << bit))
3383 sometimes_max = new_sometimes_live (regs_sometimes_live, j,
3384 bit, sometimes_max);
3389 SCHED_SORT (ready, n_ready, 1);
3391 if (file)
3393 fprintf (file, ";; ready list initially:\n;; ");
3394 for (i = 0; i < n_ready; i++)
3395 fprintf (file, "%d ", INSN_UID (ready[i]));
3396 fprintf (file, "\n\n");
3398 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3399 if (INSN_PRIORITY (insn) > 0)
3400 fprintf (file, ";; insn[%4d]: priority = %4d, ref_count = %4d\n",
3401 INSN_UID (insn), INSN_PRIORITY (insn),
3402 INSN_REF_COUNT (insn));
3405 /* Now HEAD and TAIL are going to become disconnected
3406 entirely from the insn chain. */
3407 tail = 0;
3409 /* Q_SIZE will always be zero here. */
3410 q_ptr = 0; clock = 0;
3411 bzero (insn_queue, sizeof (insn_queue));
3413 /* Now, perform list scheduling. */
3415 /* Where we start inserting insns is after TAIL. */
3416 last = next_tail;
3418 new_needs = (NEXT_INSN (prev_head) == basic_block_head[b]
3419 ? NEED_HEAD : NEED_NOTHING);
3420 if (PREV_INSN (next_tail) == basic_block_end[b])
3421 new_needs |= NEED_TAIL;
3423 new_ready = n_ready;
3424 while (sched_n_insns < n_insns)
3426 q_ptr = NEXT_Q (q_ptr); clock++;
3428 /* Add all pending insns that can be scheduled without stalls to the
3429 ready list. */
3430 for (insn = insn_queue[q_ptr]; insn; insn = NEXT_INSN (insn))
3432 if (file)
3433 fprintf (file, ";; launching %d before %d with no stalls at T-%d\n",
3434 INSN_UID (insn), INSN_UID (last), clock);
3435 ready[new_ready++] = insn;
3436 q_size -= 1;
3438 insn_queue[q_ptr] = 0;
3440 /* If there are no ready insns, stall until one is ready and add all
3441 of the pending insns at that point to the ready list. */
3442 if (new_ready == 0)
3444 register int stalls;
3446 for (stalls = 1; stalls < INSN_QUEUE_SIZE; stalls++)
3447 if (insn = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)])
3449 for (; insn; insn = NEXT_INSN (insn))
3451 if (file)
3452 fprintf (file, ";; launching %d before %d with %d stalls at T-%d\n",
3453 INSN_UID (insn), INSN_UID (last), stalls, clock);
3454 ready[new_ready++] = insn;
3455 q_size -= 1;
3457 insn_queue[NEXT_Q_AFTER (q_ptr, stalls)] = 0;
3458 break;
3461 q_ptr = NEXT_Q_AFTER (q_ptr, stalls); clock += stalls;
3464 /* There should be some instructions waiting to fire. */
3465 if (new_ready == 0)
3466 abort ();
3468 if (file)
3470 fprintf (file, ";; ready list at T-%d:", clock);
3471 for (i = 0; i < new_ready; i++)
3472 fprintf (file, " %d (%x)",
3473 INSN_UID (ready[i]), INSN_PRIORITY (ready[i]));
3476 /* Sort the ready list and choose the best insn to schedule. Select
3477 which insn should issue in this cycle and queue those that are
3478 blocked by function unit hazards.
3480 N_READY holds the number of items that were scheduled the last time,
3481 minus the one instruction scheduled on the last loop iteration; it
3482 is not modified for any other reason in this loop. */
3484 SCHED_SORT (ready, new_ready, n_ready);
3485 if (MAX_BLOCKAGE > 1)
3487 new_ready = schedule_select (ready, new_ready, clock, file);
3488 if (new_ready == 0)
3490 if (file)
3491 fprintf (file, "\n");
3492 /* We must set n_ready here, to ensure that sorting always
3493 occurs when we come back to the SCHED_SORT line above. */
3494 n_ready = 0;
3495 continue;
3498 n_ready = new_ready;
3499 last_scheduled_insn = insn = ready[0];
3501 /* The first insn scheduled becomes the new tail. */
3502 if (tail == 0)
3503 tail = insn;
3505 if (file)
3507 fprintf (file, ", now");
3508 for (i = 0; i < n_ready; i++)
3509 fprintf (file, " %d", INSN_UID (ready[i]));
3510 fprintf (file, "\n");
3513 if (DONE_PRIORITY_P (insn))
3514 abort ();
3516 if (reload_completed == 0)
3518 /* Process this insn, and each insn linked to this one which must
3519 be immediately output after this insn. */
3522 /* First we kill registers set by this insn, and then we
3523 make registers used by this insn live. This is the opposite
3524 order used above because we are traversing the instructions
3525 backwards. */
3527 /* Strictly speaking, we should scan REG_UNUSED notes and make
3528 every register mentioned there live, however, we will just
3529 kill them again immediately below, so there doesn't seem to
3530 be any reason why we bother to do this. */
3532 /* See if this is the last notice we must take of a register. */
3533 if (GET_CODE (PATTERN (insn)) == SET
3534 || GET_CODE (PATTERN (insn)) == CLOBBER)
3535 sched_note_set (b, PATTERN (insn), 1);
3536 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3538 int j;
3539 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
3540 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
3541 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
3542 sched_note_set (b, XVECEXP (PATTERN (insn), 0, j), 1);
3545 /* This code keeps life analysis information up to date. */
3546 if (GET_CODE (insn) == CALL_INSN)
3548 register struct sometimes *p;
3550 /* A call kills all call used and global registers, except
3551 for those mentioned in the call pattern which will be
3552 made live again later. */
3553 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3554 if (call_used_regs[i] || global_regs[i])
3556 register int offset = i / REGSET_ELT_BITS;
3557 register REGSET_ELT_TYPE bit
3558 = (REGSET_ELT_TYPE) 1 << (i % REGSET_ELT_BITS);
3560 bb_live_regs[offset] &= ~bit;
3561 bb_dead_regs[offset] |= bit;
3564 /* Regs live at the time of a call instruction must not
3565 go in a register clobbered by calls. Record this for
3566 all regs now live. Note that insns which are born or
3567 die in a call do not cross a call, so this must be done
3568 after the killings (above) and before the births
3569 (below). */
3570 p = regs_sometimes_live;
3571 for (i = 0; i < sometimes_max; i++, p++)
3572 if (bb_live_regs[p->offset]
3573 & ((REGSET_ELT_TYPE) 1 << p->bit))
3574 p->calls_crossed += 1;
3577 /* Make every register used live, and add REG_DEAD notes for
3578 registers which were not live before we started. */
3579 attach_deaths_insn (insn);
3581 /* Find registers now made live by that instruction. */
3582 for (i = 0; i < regset_size; i++)
3584 REGSET_ELT_TYPE diff = bb_live_regs[i] & ~old_live_regs[i];
3585 if (diff)
3587 register int bit;
3588 old_live_regs[i] |= diff;
3589 for (bit = 0; bit < REGSET_ELT_BITS; bit++)
3590 if (diff & ((REGSET_ELT_TYPE) 1 << bit))
3591 sometimes_max
3592 = new_sometimes_live (regs_sometimes_live, i, bit,
3593 sometimes_max);
3597 /* Count lengths of all regs we are worrying about now,
3598 and handle registers no longer live. */
3600 for (i = 0; i < sometimes_max; i++)
3602 register struct sometimes *p = &regs_sometimes_live[i];
3603 int regno = p->offset*REGSET_ELT_BITS + p->bit;
3605 p->live_length += 1;
3607 if ((bb_live_regs[p->offset]
3608 & ((REGSET_ELT_TYPE) 1 << p->bit)) == 0)
3610 /* This is the end of one of this register's lifetime
3611 segments. Save the lifetime info collected so far,
3612 and clear its bit in the old_live_regs entry. */
3613 sched_reg_live_length[regno] += p->live_length;
3614 sched_reg_n_calls_crossed[regno] += p->calls_crossed;
3615 old_live_regs[p->offset]
3616 &= ~((REGSET_ELT_TYPE) 1 << p->bit);
3618 /* Delete the reg_sometimes_live entry for this reg by
3619 copying the last entry over top of it. */
3620 *p = regs_sometimes_live[--sometimes_max];
3621 /* ...and decrement i so that this newly copied entry
3622 will be processed. */
3623 i--;
3627 link = insn;
3628 insn = PREV_INSN (insn);
3630 while (SCHED_GROUP_P (link));
3632 /* Set INSN back to the insn we are scheduling now. */
3633 insn = ready[0];
3636 /* Schedule INSN. Remove it from the ready list. */
3637 ready += 1;
3638 n_ready -= 1;
3640 sched_n_insns += 1;
3641 NEXT_INSN (insn) = last;
3642 PREV_INSN (last) = insn;
3643 last = insn;
3645 /* Everything that precedes INSN now either becomes "ready", if
3646 it can execute immediately before INSN, or "pending", if
3647 there must be a delay. Give INSN high enough priority that
3648 at least one (maybe more) reg-killing insns can be launched
3649 ahead of all others. Mark INSN as scheduled by changing its
3650 priority to -1. */
3651 INSN_PRIORITY (insn) = LAUNCH_PRIORITY;
3652 new_ready = schedule_insn (insn, ready, n_ready, clock);
3653 INSN_PRIORITY (insn) = DONE_PRIORITY;
3655 /* Schedule all prior insns that must not be moved. */
3656 if (SCHED_GROUP_P (insn))
3658 /* Disable these insns from being launched. */
3659 link = insn;
3660 while (SCHED_GROUP_P (link))
3662 /* Disable these insns from being launched by anybody. */
3663 link = PREV_INSN (link);
3664 INSN_REF_COUNT (link) = 0;
3667 /* None of these insns can move forward into delay slots. */
3668 while (SCHED_GROUP_P (insn))
3670 insn = PREV_INSN (insn);
3671 new_ready = schedule_insn (insn, ready, new_ready, clock);
3672 INSN_PRIORITY (insn) = DONE_PRIORITY;
3674 sched_n_insns += 1;
3675 NEXT_INSN (insn) = last;
3676 PREV_INSN (last) = insn;
3677 last = insn;
3681 if (q_size != 0)
3682 abort ();
3684 if (reload_completed == 0)
3685 finish_sometimes_live (regs_sometimes_live, sometimes_max);
3687 /* HEAD is now the first insn in the chain of insns that
3688 been scheduled by the loop above.
3689 TAIL is the last of those insns. */
3690 head = insn;
3692 /* NOTE_LIST is the end of a chain of notes previously found
3693 among the insns. Insert them at the beginning of the insns. */
3694 if (note_list != 0)
3696 rtx note_head = note_list;
3697 while (PREV_INSN (note_head))
3698 note_head = PREV_INSN (note_head);
3700 PREV_INSN (head) = note_list;
3701 NEXT_INSN (note_list) = head;
3702 head = note_head;
3705 /* There should be no REG_DEAD notes leftover at the end.
3706 In practice, this can occur as the result of bugs in flow, combine.c,
3707 and/or sched.c. The values of the REG_DEAD notes remaining are
3708 meaningless, because dead_notes is just used as a free list. */
3709 #if 1
3710 if (dead_notes != 0)
3711 abort ();
3712 #endif
3714 if (new_needs & NEED_HEAD)
3715 basic_block_head[b] = head;
3716 PREV_INSN (head) = prev_head;
3717 NEXT_INSN (prev_head) = head;
3719 if (new_needs & NEED_TAIL)
3720 basic_block_end[b] = tail;
3721 NEXT_INSN (tail) = next_tail;
3722 PREV_INSN (next_tail) = tail;
3724 /* Restore the line-number notes of each insn. */
3725 if (write_symbols != NO_DEBUG)
3727 rtx line, note, prev, new;
3728 int notes = 0;
3730 head = basic_block_head[b];
3731 next_tail = NEXT_INSN (basic_block_end[b]);
3733 /* Determine the current line-number. We want to know the current
3734 line number of the first insn of the block here, in case it is
3735 different from the true line number that was saved earlier. If
3736 different, then we need a line number note before the first insn
3737 of this block. If it happens to be the same, then we don't want to
3738 emit another line number note here. */
3739 for (line = head; line; line = PREV_INSN (line))
3740 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
3741 break;
3743 /* Walk the insns keeping track of the current line-number and inserting
3744 the line-number notes as needed. */
3745 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3746 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
3747 line = insn;
3748 /* This used to emit line number notes before every non-deleted note.
3749 However, this confuses a debugger, because line notes not separated
3750 by real instructions all end up at the same address. I can find no
3751 use for line number notes before other notes, so none are emitted. */
3752 else if (GET_CODE (insn) != NOTE
3753 && (note = LINE_NOTE (insn)) != 0
3754 && note != line
3755 && (line == 0
3756 || NOTE_LINE_NUMBER (note) != NOTE_LINE_NUMBER (line)
3757 || NOTE_SOURCE_FILE (note) != NOTE_SOURCE_FILE (line)))
3759 line = note;
3760 prev = PREV_INSN (insn);
3761 if (LINE_NOTE (note))
3763 /* Re-use the original line-number note. */
3764 LINE_NOTE (note) = 0;
3765 PREV_INSN (note) = prev;
3766 NEXT_INSN (prev) = note;
3767 PREV_INSN (insn) = note;
3768 NEXT_INSN (note) = insn;
3770 else
3772 notes++;
3773 new = emit_note_after (NOTE_LINE_NUMBER (note), prev);
3774 NOTE_SOURCE_FILE (new) = NOTE_SOURCE_FILE (note);
3777 if (file && notes)
3778 fprintf (file, ";; added %d line-number notes\n", notes);
3781 if (file)
3783 fprintf (file, ";; total time = %d\n;; new basic block head = %d\n;; new basic block end = %d\n\n",
3784 clock, INSN_UID (basic_block_head[b]), INSN_UID (basic_block_end[b]));
3787 /* Yow! We're done! */
3788 free_pending_lists ();
3790 return;
3793 /* Subroutine of split_hard_reg_notes. Searches X for any reference to
3794 REGNO, returning the rtx of the reference found if any. Otherwise,
3795 returns 0. */
3798 regno_use_in (regno, x)
3799 int regno;
3800 rtx x;
3802 register char *fmt;
3803 int i, j;
3804 rtx tem;
3806 if (GET_CODE (x) == REG && REGNO (x) == regno)
3807 return x;
3809 fmt = GET_RTX_FORMAT (GET_CODE (x));
3810 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3812 if (fmt[i] == 'e')
3814 if (tem = regno_use_in (regno, XEXP (x, i)))
3815 return tem;
3817 else if (fmt[i] == 'E')
3818 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3819 if (tem = regno_use_in (regno , XVECEXP (x, i, j)))
3820 return tem;
3823 return 0;
3826 /* Subroutine of update_flow_info. Determines whether any new REG_NOTEs are
3827 needed for the hard register mentioned in the note. This can happen
3828 if the reference to the hard register in the original insn was split into
3829 several smaller hard register references in the split insns. */
3831 static void
3832 split_hard_reg_notes (note, first, last, orig_insn)
3833 rtx note, first, last, orig_insn;
3835 rtx reg, temp, link;
3836 int n_regs, i, new_reg;
3837 rtx insn;
3839 /* Assume that this is a REG_DEAD note. */
3840 if (REG_NOTE_KIND (note) != REG_DEAD)
3841 abort ();
3843 reg = XEXP (note, 0);
3845 n_regs = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
3847 for (i = 0; i < n_regs; i++)
3849 new_reg = REGNO (reg) + i;
3851 /* Check for references to new_reg in the split insns. */
3852 for (insn = last; ; insn = PREV_INSN (insn))
3854 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
3855 && (temp = regno_use_in (new_reg, PATTERN (insn))))
3857 /* Create a new reg dead note here. */
3858 link = rtx_alloc (EXPR_LIST);
3859 PUT_REG_NOTE_KIND (link, REG_DEAD);
3860 XEXP (link, 0) = temp;
3861 XEXP (link, 1) = REG_NOTES (insn);
3862 REG_NOTES (insn) = link;
3864 /* If killed multiple registers here, then add in the excess. */
3865 i += HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) - 1;
3867 break;
3869 /* It isn't mentioned anywhere, so no new reg note is needed for
3870 this register. */
3871 if (insn == first)
3872 break;
3877 /* Subroutine of update_flow_info. Determines whether a SET or CLOBBER in an
3878 insn created by splitting needs a REG_DEAD or REG_UNUSED note added. */
3880 static void
3881 new_insn_dead_notes (pat, insn, last, orig_insn)
3882 rtx pat, insn, last, orig_insn;
3884 rtx dest, tem, set;
3886 /* PAT is either a CLOBBER or a SET here. */
3887 dest = XEXP (pat, 0);
3889 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
3890 || GET_CODE (dest) == STRICT_LOW_PART
3891 || GET_CODE (dest) == SIGN_EXTRACT)
3892 dest = XEXP (dest, 0);
3894 if (GET_CODE (dest) == REG)
3896 for (tem = last; tem != insn; tem = PREV_INSN (tem))
3898 if (GET_RTX_CLASS (GET_CODE (tem)) == 'i'
3899 && reg_overlap_mentioned_p (dest, PATTERN (tem))
3900 && (set = single_set (tem)))
3902 rtx tem_dest = SET_DEST (set);
3904 while (GET_CODE (tem_dest) == ZERO_EXTRACT
3905 || GET_CODE (tem_dest) == SUBREG
3906 || GET_CODE (tem_dest) == STRICT_LOW_PART
3907 || GET_CODE (tem_dest) == SIGN_EXTRACT)
3908 tem_dest = XEXP (tem_dest, 0);
3910 if (tem_dest != dest)
3912 /* Use the same scheme as combine.c, don't put both REG_DEAD
3913 and REG_UNUSED notes on the same insn. */
3914 if (! find_regno_note (tem, REG_UNUSED, REGNO (dest))
3915 && ! find_regno_note (tem, REG_DEAD, REGNO (dest)))
3917 rtx note = rtx_alloc (EXPR_LIST);
3918 PUT_REG_NOTE_KIND (note, REG_DEAD);
3919 XEXP (note, 0) = dest;
3920 XEXP (note, 1) = REG_NOTES (tem);
3921 REG_NOTES (tem) = note;
3923 /* The reg only dies in one insn, the last one that uses
3924 it. */
3925 break;
3927 else if (reg_overlap_mentioned_p (dest, SET_SRC (set)))
3928 /* We found an instruction that both uses the register,
3929 and sets it, so no new REG_NOTE is needed for this set. */
3930 break;
3933 /* If this is a set, it must die somewhere, unless it is the dest of
3934 the original insn, and hence is live after the original insn. Abort
3935 if it isn't supposed to be live after the original insn.
3937 If this is a clobber, then just add a REG_UNUSED note. */
3938 if (tem == insn)
3940 int live_after_orig_insn = 0;
3941 rtx pattern = PATTERN (orig_insn);
3942 int i;
3944 if (GET_CODE (pat) == CLOBBER)
3946 rtx note = rtx_alloc (EXPR_LIST);
3947 PUT_REG_NOTE_KIND (note, REG_UNUSED);
3948 XEXP (note, 0) = dest;
3949 XEXP (note, 1) = REG_NOTES (insn);
3950 REG_NOTES (insn) = note;
3951 return;
3954 /* The original insn could have multiple sets, so search the
3955 insn for all sets. */
3956 if (GET_CODE (pattern) == SET)
3958 if (reg_overlap_mentioned_p (dest, SET_DEST (pattern)))
3959 live_after_orig_insn = 1;
3961 else if (GET_CODE (pattern) == PARALLEL)
3963 for (i = 0; i < XVECLEN (pattern, 0); i++)
3964 if (GET_CODE (XVECEXP (pattern, 0, i)) == SET
3965 && reg_overlap_mentioned_p (dest,
3966 SET_DEST (XVECEXP (pattern,
3967 0, i))))
3968 live_after_orig_insn = 1;
3971 if (! live_after_orig_insn)
3972 abort ();
3977 /* Subroutine of update_flow_info. Update the value of reg_n_sets for all
3978 registers modified by X. INC is -1 if the containing insn is being deleted,
3979 and is 1 if the containing insn is a newly generated insn. */
3981 static void
3982 update_n_sets (x, inc)
3983 rtx x;
3984 int inc;
3986 rtx dest = SET_DEST (x);
3988 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
3989 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
3990 dest = SUBREG_REG (dest);
3992 if (GET_CODE (dest) == REG)
3994 int regno = REGNO (dest);
3996 if (regno < FIRST_PSEUDO_REGISTER)
3998 register int i;
3999 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (dest));
4001 for (i = regno; i < endregno; i++)
4002 reg_n_sets[i] += inc;
4004 else
4005 reg_n_sets[regno] += inc;
4009 /* Updates all flow-analysis related quantities (including REG_NOTES) for
4010 the insns from FIRST to LAST inclusive that were created by splitting
4011 ORIG_INSN. NOTES are the original REG_NOTES. */
4013 static void
4014 update_flow_info (notes, first, last, orig_insn)
4015 rtx notes;
4016 rtx first, last;
4017 rtx orig_insn;
4019 rtx insn, note;
4020 rtx next;
4021 rtx orig_dest, temp;
4022 rtx set;
4024 /* Get and save the destination set by the original insn. */
4026 orig_dest = single_set (orig_insn);
4027 if (orig_dest)
4028 orig_dest = SET_DEST (orig_dest);
4030 /* Move REG_NOTES from the original insn to where they now belong. */
4032 for (note = notes; note; note = next)
4034 next = XEXP (note, 1);
4035 switch (REG_NOTE_KIND (note))
4037 case REG_DEAD:
4038 case REG_UNUSED:
4039 /* Move these notes from the original insn to the last new insn where
4040 the register is now set. */
4042 for (insn = last; ; insn = PREV_INSN (insn))
4044 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4045 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4047 /* If this note refers to a multiple word hard register, it
4048 may have been split into several smaller hard register
4049 references, so handle it specially. */
4050 temp = XEXP (note, 0);
4051 if (REG_NOTE_KIND (note) == REG_DEAD
4052 && GET_CODE (temp) == REG
4053 && REGNO (temp) < FIRST_PSEUDO_REGISTER
4054 && HARD_REGNO_NREGS (REGNO (temp), GET_MODE (temp)) > 1)
4055 split_hard_reg_notes (note, first, last, orig_insn);
4056 else
4058 XEXP (note, 1) = REG_NOTES (insn);
4059 REG_NOTES (insn) = note;
4062 /* Sometimes need to convert REG_UNUSED notes to REG_DEAD
4063 notes. */
4064 /* ??? This won't handle multiple word registers correctly,
4065 but should be good enough for now. */
4066 if (REG_NOTE_KIND (note) == REG_UNUSED
4067 && ! dead_or_set_p (insn, XEXP (note, 0)))
4068 PUT_REG_NOTE_KIND (note, REG_DEAD);
4070 /* The reg only dies in one insn, the last one that uses
4071 it. */
4072 break;
4074 /* It must die somewhere, fail it we couldn't find where it died.
4076 If this is a REG_UNUSED note, then it must be a temporary
4077 register that was not needed by this instantiation of the
4078 pattern, so we can safely ignore it. */
4079 if (insn == first)
4081 if (REG_NOTE_KIND (note) != REG_UNUSED)
4082 abort ();
4084 break;
4087 break;
4089 case REG_WAS_0:
4090 /* This note applies to the dest of the original insn. Find the
4091 first new insn that now has the same dest, and move the note
4092 there. */
4094 if (! orig_dest)
4095 abort ();
4097 for (insn = first; ; insn = NEXT_INSN (insn))
4099 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4100 && (temp = single_set (insn))
4101 && rtx_equal_p (SET_DEST (temp), orig_dest))
4103 XEXP (note, 1) = REG_NOTES (insn);
4104 REG_NOTES (insn) = note;
4105 /* The reg is only zero before one insn, the first that
4106 uses it. */
4107 break;
4109 /* It must be set somewhere, fail if we couldn't find where it
4110 was set. */
4111 if (insn == last)
4112 abort ();
4114 break;
4116 case REG_EQUAL:
4117 case REG_EQUIV:
4118 /* A REG_EQUIV or REG_EQUAL note on an insn with more than one
4119 set is meaningless. Just drop the note. */
4120 if (! orig_dest)
4121 break;
4123 case REG_NO_CONFLICT:
4124 /* These notes apply to the dest of the original insn. Find the last
4125 new insn that now has the same dest, and move the note there. */
4127 if (! orig_dest)
4128 abort ();
4130 for (insn = last; ; insn = PREV_INSN (insn))
4132 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4133 && (temp = single_set (insn))
4134 && rtx_equal_p (SET_DEST (temp), orig_dest))
4136 XEXP (note, 1) = REG_NOTES (insn);
4137 REG_NOTES (insn) = note;
4138 /* Only put this note on one of the new insns. */
4139 break;
4142 /* The original dest must still be set someplace. Abort if we
4143 couldn't find it. */
4144 if (insn == first)
4145 abort ();
4147 break;
4149 case REG_LIBCALL:
4150 /* Move a REG_LIBCALL note to the first insn created, and update
4151 the corresponding REG_RETVAL note. */
4152 XEXP (note, 1) = REG_NOTES (first);
4153 REG_NOTES (first) = note;
4155 insn = XEXP (note, 0);
4156 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4157 if (note)
4158 XEXP (note, 0) = first;
4159 break;
4161 case REG_RETVAL:
4162 /* Move a REG_RETVAL note to the last insn created, and update
4163 the corresponding REG_LIBCALL note. */
4164 XEXP (note, 1) = REG_NOTES (last);
4165 REG_NOTES (last) = note;
4167 insn = XEXP (note, 0);
4168 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4169 if (note)
4170 XEXP (note, 0) = last;
4171 break;
4173 case REG_NONNEG:
4174 /* This should be moved to whichever instruction is a JUMP_INSN. */
4176 for (insn = last; ; insn = PREV_INSN (insn))
4178 if (GET_CODE (insn) == JUMP_INSN)
4180 XEXP (note, 1) = REG_NOTES (insn);
4181 REG_NOTES (insn) = note;
4182 /* Only put this note on one of the new insns. */
4183 break;
4185 /* Fail if we couldn't find a JUMP_INSN. */
4186 if (insn == first)
4187 abort ();
4189 break;
4191 case REG_INC:
4192 /* This should be moved to whichever instruction now has the
4193 increment operation. */
4194 abort ();
4196 case REG_LABEL:
4197 /* Should be moved to the new insn(s) which use the label. */
4198 for (insn = first; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
4199 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4200 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn)))
4201 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_LABEL,
4202 XEXP (note, 0), REG_NOTES (insn));
4203 break;
4205 case REG_CC_SETTER:
4206 case REG_CC_USER:
4207 /* These two notes will never appear until after reorg, so we don't
4208 have to handle them here. */
4209 default:
4210 abort ();
4214 /* Each new insn created, except the last, has a new set. If the destination
4215 is a register, then this reg is now live across several insns, whereas
4216 previously the dest reg was born and died within the same insn. To
4217 reflect this, we now need a REG_DEAD note on the insn where this
4218 dest reg dies.
4220 Similarly, the new insns may have clobbers that need REG_UNUSED notes. */
4222 for (insn = first; insn != last; insn = NEXT_INSN (insn))
4224 rtx pat;
4225 int i;
4227 pat = PATTERN (insn);
4228 if (GET_CODE (pat) == SET || GET_CODE (pat) == CLOBBER)
4229 new_insn_dead_notes (pat, insn, last, orig_insn);
4230 else if (GET_CODE (pat) == PARALLEL)
4232 for (i = 0; i < XVECLEN (pat, 0); i++)
4233 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
4234 || GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER)
4235 new_insn_dead_notes (XVECEXP (pat, 0, i), insn, last, orig_insn);
4239 /* If any insn, except the last, uses the register set by the last insn,
4240 then we need a new REG_DEAD note on that insn. In this case, there
4241 would not have been a REG_DEAD note for this register in the original
4242 insn because it was used and set within one insn.
4244 There is no new REG_DEAD note needed if the last insn uses the register
4245 that it is setting. */
4247 set = single_set (last);
4248 if (set)
4250 rtx dest = SET_DEST (set);
4252 while (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG
4253 || GET_CODE (dest) == STRICT_LOW_PART
4254 || GET_CODE (dest) == SIGN_EXTRACT)
4255 dest = XEXP (dest, 0);
4257 if (GET_CODE (dest) == REG
4258 && ! reg_overlap_mentioned_p (dest, SET_SRC (set)))
4260 for (insn = PREV_INSN (last); ; insn = PREV_INSN (insn))
4262 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4263 && reg_mentioned_p (dest, PATTERN (insn))
4264 && (set = single_set (insn)))
4266 rtx insn_dest = SET_DEST (set);
4268 while (GET_CODE (insn_dest) == ZERO_EXTRACT
4269 || GET_CODE (insn_dest) == SUBREG
4270 || GET_CODE (insn_dest) == STRICT_LOW_PART
4271 || GET_CODE (insn_dest) == SIGN_EXTRACT)
4272 insn_dest = XEXP (insn_dest, 0);
4274 if (insn_dest != dest)
4276 note = rtx_alloc (EXPR_LIST);
4277 PUT_REG_NOTE_KIND (note, REG_DEAD);
4278 XEXP (note, 0) = dest;
4279 XEXP (note, 1) = REG_NOTES (insn);
4280 REG_NOTES (insn) = note;
4281 /* The reg only dies in one insn, the last one
4282 that uses it. */
4283 break;
4286 if (insn == first)
4287 break;
4292 /* If the original dest is modifying a multiple register target, and the
4293 original instruction was split such that the original dest is now set
4294 by two or more SUBREG sets, then the split insns no longer kill the
4295 destination of the original insn.
4297 In this case, if there exists an instruction in the same basic block,
4298 before the split insn, which uses the original dest, and this use is
4299 killed by the original insn, then we must remove the REG_DEAD note on
4300 this insn, because it is now superfluous.
4302 This does not apply when a hard register gets split, because the code
4303 knows how to handle overlapping hard registers properly. */
4304 if (orig_dest && GET_CODE (orig_dest) == REG)
4306 int found_orig_dest = 0;
4307 int found_split_dest = 0;
4309 for (insn = first; ; insn = NEXT_INSN (insn))
4311 set = single_set (insn);
4312 if (set)
4314 if (GET_CODE (SET_DEST (set)) == REG
4315 && REGNO (SET_DEST (set)) == REGNO (orig_dest))
4317 found_orig_dest = 1;
4318 break;
4320 else if (GET_CODE (SET_DEST (set)) == SUBREG
4321 && SUBREG_REG (SET_DEST (set)) == orig_dest)
4323 found_split_dest = 1;
4324 break;
4328 if (insn == last)
4329 break;
4332 if (found_split_dest)
4334 /* Search backwards from FIRST, looking for the first insn that uses
4335 the original dest. Stop if we pass a CODE_LABEL or a JUMP_INSN.
4336 If we find an insn, and it has a REG_DEAD note, then delete the
4337 note. */
4339 for (insn = first; insn; insn = PREV_INSN (insn))
4341 if (GET_CODE (insn) == CODE_LABEL
4342 || GET_CODE (insn) == JUMP_INSN)
4343 break;
4344 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
4345 && reg_mentioned_p (orig_dest, insn))
4347 note = find_regno_note (insn, REG_DEAD, REGNO (orig_dest));
4348 if (note)
4349 remove_note (insn, note);
4353 else if (! found_orig_dest)
4355 /* This should never happen. */
4356 abort ();
4360 /* Update reg_n_sets. This is necessary to prevent local alloc from
4361 converting REG_EQUAL notes to REG_EQUIV when splitting has modified
4362 a reg from set once to set multiple times. */
4365 rtx x = PATTERN (orig_insn);
4366 RTX_CODE code = GET_CODE (x);
4368 if (code == SET || code == CLOBBER)
4369 update_n_sets (x, -1);
4370 else if (code == PARALLEL)
4372 int i;
4373 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4375 code = GET_CODE (XVECEXP (x, 0, i));
4376 if (code == SET || code == CLOBBER)
4377 update_n_sets (XVECEXP (x, 0, i), -1);
4381 for (insn = first; ; insn = NEXT_INSN (insn))
4383 x = PATTERN (insn);
4384 code = GET_CODE (x);
4386 if (code == SET || code == CLOBBER)
4387 update_n_sets (x, 1);
4388 else if (code == PARALLEL)
4390 int i;
4391 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
4393 code = GET_CODE (XVECEXP (x, 0, i));
4394 if (code == SET || code == CLOBBER)
4395 update_n_sets (XVECEXP (x, 0, i), 1);
4399 if (insn == last)
4400 break;
4405 /* The one entry point in this file. DUMP_FILE is the dump file for
4406 this pass. */
4408 void
4409 schedule_insns (dump_file)
4410 FILE *dump_file;
4412 int max_uid = MAX_INSNS_PER_SPLIT * (get_max_uid () + 1);
4413 int i, b;
4414 rtx insn;
4416 /* Taking care of this degenerate case makes the rest of
4417 this code simpler. */
4418 if (n_basic_blocks == 0)
4419 return;
4421 /* Create an insn here so that we can hang dependencies off of it later. */
4422 sched_before_next_call
4423 = gen_rtx (INSN, VOIDmode, 0, NULL_RTX, NULL_RTX,
4424 NULL_RTX, 0, NULL_RTX, 0);
4426 /* Initialize the unused_*_lists. We can't use the ones left over from
4427 the previous function, because gcc has freed that memory. We can use
4428 the ones left over from the first sched pass in the second pass however,
4429 so only clear them on the first sched pass. The first pass is before
4430 reload if flag_schedule_insns is set, otherwise it is afterwards. */
4432 if (reload_completed == 0 || ! flag_schedule_insns)
4434 unused_insn_list = 0;
4435 unused_expr_list = 0;
4438 /* We create no insns here, only reorder them, so we
4439 remember how far we can cut back the stack on exit. */
4441 /* Allocate data for this pass. See comments, above,
4442 for what these vectors do. */
4443 insn_luid = (int *) alloca (max_uid * sizeof (int));
4444 insn_priority = (int *) alloca (max_uid * sizeof (int));
4445 insn_tick = (int *) alloca (max_uid * sizeof (int));
4446 insn_costs = (short *) alloca (max_uid * sizeof (short));
4447 insn_units = (short *) alloca (max_uid * sizeof (short));
4448 insn_blockage = (unsigned int *) alloca (max_uid * sizeof (unsigned int));
4449 insn_ref_count = (int *) alloca (max_uid * sizeof (int));
4451 if (reload_completed == 0)
4453 sched_reg_n_deaths = (short *) alloca (max_regno * sizeof (short));
4454 sched_reg_n_calls_crossed = (int *) alloca (max_regno * sizeof (int));
4455 sched_reg_live_length = (int *) alloca (max_regno * sizeof (int));
4456 bb_dead_regs = (regset) alloca (regset_bytes);
4457 bb_live_regs = (regset) alloca (regset_bytes);
4458 bzero (sched_reg_n_calls_crossed, max_regno * sizeof (int));
4459 bzero (sched_reg_live_length, max_regno * sizeof (int));
4460 bcopy (reg_n_deaths, sched_reg_n_deaths, max_regno * sizeof (short));
4461 init_alias_analysis ();
4463 else
4465 sched_reg_n_deaths = 0;
4466 sched_reg_n_calls_crossed = 0;
4467 sched_reg_live_length = 0;
4468 bb_dead_regs = 0;
4469 bb_live_regs = 0;
4470 if (! flag_schedule_insns)
4471 init_alias_analysis ();
4474 if (write_symbols != NO_DEBUG)
4476 rtx line;
4478 line_note = (rtx *) alloca (max_uid * sizeof (rtx));
4479 bzero (line_note, max_uid * sizeof (rtx));
4480 line_note_head = (rtx *) alloca (n_basic_blocks * sizeof (rtx));
4481 bzero (line_note_head, n_basic_blocks * sizeof (rtx));
4483 /* Determine the line-number at the start of each basic block.
4484 This must be computed and saved now, because after a basic block's
4485 predecessor has been scheduled, it is impossible to accurately
4486 determine the correct line number for the first insn of the block. */
4488 for (b = 0; b < n_basic_blocks; b++)
4489 for (line = basic_block_head[b]; line; line = PREV_INSN (line))
4490 if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
4492 line_note_head[b] = line;
4493 break;
4497 bzero (insn_luid, max_uid * sizeof (int));
4498 bzero (insn_priority, max_uid * sizeof (int));
4499 bzero (insn_tick, max_uid * sizeof (int));
4500 bzero (insn_costs, max_uid * sizeof (short));
4501 bzero (insn_units, max_uid * sizeof (short));
4502 bzero (insn_blockage, max_uid * sizeof (unsigned int));
4503 bzero (insn_ref_count, max_uid * sizeof (int));
4505 /* Schedule each basic block, block by block. */
4507 /* ??? Add a NOTE after the last insn of the last basic block. It is not
4508 known why this is done. */
4510 insn = basic_block_end[n_basic_blocks-1];
4511 if (NEXT_INSN (insn) == 0
4512 || (GET_CODE (insn) != NOTE
4513 && GET_CODE (insn) != CODE_LABEL
4514 /* Don't emit a NOTE if it would end up between an unconditional
4515 jump and a BARRIER. */
4516 && ! (GET_CODE (insn) == JUMP_INSN
4517 && GET_CODE (NEXT_INSN (insn)) == BARRIER)))
4518 emit_note_after (NOTE_INSN_DELETED, basic_block_end[n_basic_blocks-1]);
4520 for (b = 0; b < n_basic_blocks; b++)
4522 rtx insn, next;
4523 rtx insns;
4525 note_list = 0;
4527 for (insn = basic_block_head[b]; ; insn = next)
4529 rtx prev;
4530 rtx set;
4532 /* Can't use `next_real_insn' because that
4533 might go across CODE_LABELS and short-out basic blocks. */
4534 next = NEXT_INSN (insn);
4535 if (GET_CODE (insn) != INSN)
4537 if (insn == basic_block_end[b])
4538 break;
4540 continue;
4543 /* Don't split no-op move insns. These should silently disappear
4544 later in final. Splitting such insns would break the code
4545 that handles REG_NO_CONFLICT blocks. */
4546 set = single_set (insn);
4547 if (set && rtx_equal_p (SET_SRC (set), SET_DEST (set)))
4549 if (insn == basic_block_end[b])
4550 break;
4552 /* Nops get in the way while scheduling, so delete them now if
4553 register allocation has already been done. It is too risky
4554 to try to do this before register allocation, and there are
4555 unlikely to be very many nops then anyways. */
4556 if (reload_completed)
4558 PUT_CODE (insn, NOTE);
4559 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4560 NOTE_SOURCE_FILE (insn) = 0;
4563 continue;
4566 /* Split insns here to get max fine-grain parallelism. */
4567 prev = PREV_INSN (insn);
4568 if (reload_completed == 0)
4570 rtx last, first = PREV_INSN (insn);
4571 rtx notes = REG_NOTES (insn);
4573 last = try_split (PATTERN (insn), insn, 1);
4574 if (last != insn)
4576 /* try_split returns the NOTE that INSN became. */
4577 first = NEXT_INSN (first);
4578 update_flow_info (notes, first, last, insn);
4580 PUT_CODE (insn, NOTE);
4581 NOTE_SOURCE_FILE (insn) = 0;
4582 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4583 if (insn == basic_block_head[b])
4584 basic_block_head[b] = first;
4585 if (insn == basic_block_end[b])
4587 basic_block_end[b] = last;
4588 break;
4593 if (insn == basic_block_end[b])
4594 break;
4597 schedule_block (b, dump_file);
4599 #ifdef USE_C_ALLOCA
4600 alloca (0);
4601 #endif
4604 /* Reposition the prologue and epilogue notes in case we moved the
4605 prologue/epilogue insns. */
4606 if (reload_completed)
4607 reposition_prologue_and_epilogue_notes (get_insns ());
4609 if (write_symbols != NO_DEBUG)
4611 rtx line = 0;
4612 rtx insn = get_insns ();
4613 int active_insn = 0;
4614 int notes = 0;
4616 /* Walk the insns deleting redundant line-number notes. Many of these
4617 are already present. The remainder tend to occur at basic
4618 block boundaries. */
4619 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
4620 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
4622 /* If there are no active insns following, INSN is redundant. */
4623 if (active_insn == 0)
4625 notes++;
4626 NOTE_SOURCE_FILE (insn) = 0;
4627 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4629 /* If the line number is unchanged, LINE is redundant. */
4630 else if (line
4631 && NOTE_LINE_NUMBER (line) == NOTE_LINE_NUMBER (insn)
4632 && NOTE_SOURCE_FILE (line) == NOTE_SOURCE_FILE (insn))
4634 notes++;
4635 NOTE_SOURCE_FILE (line) = 0;
4636 NOTE_LINE_NUMBER (line) = NOTE_INSN_DELETED;
4637 line = insn;
4639 else
4640 line = insn;
4641 active_insn = 0;
4643 else if (! ((GET_CODE (insn) == NOTE
4644 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED)
4645 || (GET_CODE (insn) == INSN
4646 && (GET_CODE (PATTERN (insn)) == USE
4647 || GET_CODE (PATTERN (insn)) == CLOBBER))))
4648 active_insn++;
4650 if (dump_file && notes)
4651 fprintf (dump_file, ";; deleted %d line-number notes\n", notes);
4654 if (reload_completed == 0)
4656 int regno;
4657 for (regno = 0; regno < max_regno; regno++)
4658 if (sched_reg_live_length[regno])
4660 if (dump_file)
4662 if (reg_live_length[regno] > sched_reg_live_length[regno])
4663 fprintf (dump_file,
4664 ";; register %d life shortened from %d to %d\n",
4665 regno, reg_live_length[regno],
4666 sched_reg_live_length[regno]);
4667 /* Negative values are special; don't overwrite the current
4668 reg_live_length value if it is negative. */
4669 else if (reg_live_length[regno] < sched_reg_live_length[regno]
4670 && reg_live_length[regno] >= 0)
4671 fprintf (dump_file,
4672 ";; register %d life extended from %d to %d\n",
4673 regno, reg_live_length[regno],
4674 sched_reg_live_length[regno]);
4676 if (reg_n_calls_crossed[regno]
4677 && ! sched_reg_n_calls_crossed[regno])
4678 fprintf (dump_file,
4679 ";; register %d no longer crosses calls\n", regno);
4680 else if (! reg_n_calls_crossed[regno]
4681 && sched_reg_n_calls_crossed[regno])
4682 fprintf (dump_file,
4683 ";; register %d now crosses calls\n", regno);
4685 /* Negative values are special; don't overwrite the current
4686 reg_live_length value if it is negative. */
4687 if (reg_live_length[regno] >= 0)
4688 reg_live_length[regno] = sched_reg_live_length[regno];
4689 reg_n_calls_crossed[regno] = sched_reg_n_calls_crossed[regno];
4693 #endif /* INSN_SCHEDULING */