1 /* Swing Modulo Scheduling implementation.
2 Copyright (C) 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
42 #include "cfglayout.h"
51 #include "tree-pass.h"
53 #ifdef INSN_SCHEDULING
55 /* This file contains the implementation of the Swing Modulo Scheduler,
56 described in the following references:
57 [1] J. Llosa, A. Gonzalez, E. Ayguade, M. Valero., and J. Eckhardt.
58 Lifetime--sensitive modulo scheduling in a production environment.
59 IEEE Trans. on Comps., 50(3), March 2001
60 [2] J. Llosa, A. Gonzalez, E. Ayguade, and M. Valero.
61 Swing Modulo Scheduling: A Lifetime Sensitive Approach.
62 PACT '96 , pages 80-87, October 1996 (Boston - Massachusetts - USA).
64 The basic structure is:
65 1. Build a data-dependence graph (DDG) for each loop.
66 2. Use the DDG to order the insns of a loop (not in topological order
67 necessarily, but rather) trying to place each insn after all its
68 predecessors _or_ after all its successors.
69 3. Compute MII: a lower bound on the number of cycles to schedule the loop.
70 4. Use the ordering to perform list-scheduling of the loop:
71 1. Set II = MII. We will try to schedule the loop within II cycles.
72 2. Try to schedule the insns one by one according to the ordering.
73 For each insn compute an interval of cycles by considering already-
74 scheduled preds and succs (and associated latencies); try to place
75 the insn in the cycles of this window checking for potential
76 resource conflicts (using the DFA interface).
77 Note: this is different from the cycle-scheduling of schedule_insns;
78 here the insns are not scheduled monotonically top-down (nor bottom-
80 3. If failed in scheduling all insns - bump II++ and try again, unless
81 II reaches an upper bound MaxII, in which case report failure.
82 5. If we succeeded in scheduling the loop within II cycles, we now
83 generate prolog and epilog, decrease the counter of the loop, and
84 perform modulo variable expansion for live ranges that span more than
85 II cycles (i.e. use register copies to prevent a def from overwriting
86 itself before reaching the use).
90 /* This page defines partial-schedule structures and functions for
93 typedef struct partial_schedule
*partial_schedule_ptr
;
94 typedef struct ps_insn
*ps_insn_ptr
;
96 /* The minimum (absolute) cycle that a node of ps was scheduled in. */
97 #define PS_MIN_CYCLE(ps) (((partial_schedule_ptr)(ps))->min_cycle)
99 /* The maximum (absolute) cycle that a node of ps was scheduled in. */
100 #define PS_MAX_CYCLE(ps) (((partial_schedule_ptr)(ps))->max_cycle)
102 /* Perform signed modulo, always returning a non-negative value. */
103 #define SMODULO(x,y) ((x) % (y) < 0 ? ((x) % (y) + (y)) : (x) % (y))
105 /* The number of different iterations the nodes in ps span, assuming
106 the stage boundaries are placed efficiently. */
107 #define PS_STAGE_COUNT(ps) ((PS_MAX_CYCLE (ps) - PS_MIN_CYCLE (ps) \
108 + 1 + (ps)->ii - 1) / (ps)->ii)
110 /* A single instruction in the partial schedule. */
113 /* The corresponding DDG_NODE. */
116 /* The (absolute) cycle in which the PS instruction is scheduled.
117 Same as SCHED_TIME (node). */
120 /* The next/prev PS_INSN in the same row. */
121 ps_insn_ptr next_in_row
,
124 /* The number of nodes in the same row that come after this node. */
128 /* Holds the partial schedule as an array of II rows. Each entry of the
129 array points to a linked list of PS_INSNs, which represents the
130 instructions that are scheduled for that row. */
131 struct partial_schedule
133 int ii
; /* Number of rows in the partial schedule. */
134 int history
; /* Threshold for conflict checking using DFA. */
136 /* rows[i] points to linked list of insns scheduled in row i (0<=i<ii). */
139 /* The earliest absolute cycle of an insn in the partial schedule. */
142 /* The latest absolute cycle of an insn in the partial schedule. */
145 ddg_ptr g
; /* The DDG of the insns in the partial schedule. */
148 /* We use this to record all the register replacements we do in
149 the kernel so we can undo SMS if it is not profitable. */
150 struct undo_replace_buff_elem
155 struct undo_replace_buff_elem
*next
;
160 static partial_schedule_ptr
create_partial_schedule (int ii
, ddg_ptr
, int history
);
161 static void free_partial_schedule (partial_schedule_ptr
);
162 static void reset_partial_schedule (partial_schedule_ptr
, int new_ii
);
163 void print_partial_schedule (partial_schedule_ptr
, FILE *);
164 static int kernel_number_of_cycles (rtx first_insn
, rtx last_insn
);
165 static ps_insn_ptr
ps_add_node_check_conflicts (partial_schedule_ptr
,
166 ddg_node_ptr node
, int cycle
,
167 sbitmap must_precede
,
168 sbitmap must_follow
);
169 static void rotate_partial_schedule (partial_schedule_ptr
, int);
170 void set_row_column_for_ps (partial_schedule_ptr
);
171 static bool ps_unschedule_node (partial_schedule_ptr
, ddg_node_ptr
);
174 /* This page defines constants and structures for the modulo scheduling
177 /* As in haifa-sched.c: */
178 /* issue_rate is the number of insns that can be scheduled in the same
179 machine cycle. It can be defined in the config/mach/mach.h file,
180 otherwise we set it to 1. */
182 static int issue_rate
;
184 static int sms_order_nodes (ddg_ptr
, int, int * result
);
185 static void set_node_sched_params (ddg_ptr
);
186 static partial_schedule_ptr
sms_schedule_by_order (ddg_ptr
, int, int, int *);
187 static void permute_partial_schedule (partial_schedule_ptr ps
, rtx last
);
188 static void generate_prolog_epilog (partial_schedule_ptr
,struct loop
* loop
, rtx
);
189 static void duplicate_insns_of_cycles (partial_schedule_ptr ps
,
190 int from_stage
, int to_stage
,
193 #define SCHED_ASAP(x) (((node_sched_params_ptr)(x)->aux.info)->asap)
194 #define SCHED_TIME(x) (((node_sched_params_ptr)(x)->aux.info)->time)
195 #define SCHED_FIRST_REG_MOVE(x) \
196 (((node_sched_params_ptr)(x)->aux.info)->first_reg_move)
197 #define SCHED_NREG_MOVES(x) \
198 (((node_sched_params_ptr)(x)->aux.info)->nreg_moves)
199 #define SCHED_ROW(x) (((node_sched_params_ptr)(x)->aux.info)->row)
200 #define SCHED_STAGE(x) (((node_sched_params_ptr)(x)->aux.info)->stage)
201 #define SCHED_COLUMN(x) (((node_sched_params_ptr)(x)->aux.info)->column)
203 /* The scheduling parameters held for each node. */
204 typedef struct node_sched_params
206 int asap
; /* A lower-bound on the absolute scheduling cycle. */
207 int time
; /* The absolute scheduling cycle (time >= asap). */
209 /* The following field (first_reg_move) is a pointer to the first
210 register-move instruction added to handle the modulo-variable-expansion
211 of the register defined by this node. This register-move copies the
212 original register defined by the node. */
215 /* The number of register-move instructions added, immediately preceding
219 int row
; /* Holds time % ii. */
220 int stage
; /* Holds time / ii. */
222 /* The column of a node inside the ps. If nodes u, v are on the same row,
223 u will precede v if column (u) < column (v). */
225 } *node_sched_params_ptr
;
228 /* The following three functions are copied from the current scheduler
229 code in order to use sched_analyze() for computing the dependencies.
230 They are used when initializing the sched_info structure. */
232 sms_print_insn (rtx insn
, int aligned ATTRIBUTE_UNUSED
)
236 sprintf (tmp
, "i%4d", INSN_UID (insn
));
241 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED
,
242 regset cond_exec ATTRIBUTE_UNUSED
,
243 regset used ATTRIBUTE_UNUSED
,
244 regset set ATTRIBUTE_UNUSED
)
248 static struct sched_info sms_sched_info
=
257 compute_jump_reg_dependencies
,
262 NULL
, NULL
, NULL
, NULL
, NULL
,
263 #ifdef ENABLE_CHECKING
270 /* Return the register decremented and tested in INSN,
271 or zero if it is not a decrement-and-branch insn. */
274 doloop_register_get (rtx insn ATTRIBUTE_UNUSED
)
276 #ifdef HAVE_doloop_end
277 rtx pattern
, reg
, condition
;
282 pattern
= PATTERN (insn
);
283 condition
= doloop_condition_get (pattern
);
287 if (REG_P (XEXP (condition
, 0)))
288 reg
= XEXP (condition
, 0);
289 else if (GET_CODE (XEXP (condition
, 0)) == PLUS
290 && REG_P (XEXP (XEXP (condition
, 0), 0)))
291 reg
= XEXP (XEXP (condition
, 0), 0);
301 /* Check if COUNT_REG is set to a constant in the PRE_HEADER block, so
302 that the number of iterations is a compile-time constant. If so,
303 return the rtx that sets COUNT_REG to a constant, and set COUNT to
304 this constant. Otherwise return 0. */
306 const_iteration_count (rtx count_reg
, basic_block pre_header
,
307 HOST_WIDEST_INT
* count
)
315 get_ebb_head_tail (pre_header
, pre_header
, &head
, &tail
);
317 for (insn
= tail
; insn
!= PREV_INSN (head
); insn
= PREV_INSN (insn
))
318 if (INSN_P (insn
) && single_set (insn
) &&
319 rtx_equal_p (count_reg
, SET_DEST (single_set (insn
))))
321 rtx pat
= single_set (insn
);
323 if (GET_CODE (SET_SRC (pat
)) == CONST_INT
)
325 *count
= INTVAL (SET_SRC (pat
));
335 /* A very simple resource-based lower bound on the initiation interval.
336 ??? Improve the accuracy of this bound by considering the
337 utilization of various units. */
341 return (g
->num_nodes
/ issue_rate
);
345 /* Points to the array that contains the sched data for each node. */
346 static node_sched_params_ptr node_sched_params
;
348 /* Allocate sched_params for each node and initialize it. Assumes that
349 the aux field of each node contain the asap bound (computed earlier),
350 and copies it into the sched_params field. */
352 set_node_sched_params (ddg_ptr g
)
356 /* Allocate for each node in the DDG a place to hold the "sched_data". */
357 /* Initialize ASAP/ALAP/HIGHT to zero. */
358 node_sched_params
= (node_sched_params_ptr
)
359 xcalloc (g
->num_nodes
,
360 sizeof (struct node_sched_params
));
362 /* Set the pointer of the general data of the node to point to the
363 appropriate sched_params structure. */
364 for (i
= 0; i
< g
->num_nodes
; i
++)
366 /* Watch out for aliasing problems? */
367 node_sched_params
[i
].asap
= g
->nodes
[i
].aux
.count
;
368 g
->nodes
[i
].aux
.info
= &node_sched_params
[i
];
373 print_node_sched_params (FILE * file
, int num_nodes
)
379 for (i
= 0; i
< num_nodes
; i
++)
381 node_sched_params_ptr nsp
= &node_sched_params
[i
];
382 rtx reg_move
= nsp
->first_reg_move
;
385 fprintf (file
, "Node %d:\n", i
);
386 fprintf (file
, " asap = %d:\n", nsp
->asap
);
387 fprintf (file
, " time = %d:\n", nsp
->time
);
388 fprintf (file
, " nreg_moves = %d:\n", nsp
->nreg_moves
);
389 for (j
= 0; j
< nsp
->nreg_moves
; j
++)
391 fprintf (file
, " reg_move = ");
392 print_rtl_single (file
, reg_move
);
393 reg_move
= PREV_INSN (reg_move
);
398 /* Calculate an upper bound for II. SMS should not schedule the loop if it
399 requires more cycles than this bound. Currently set to the sum of the
400 longest latency edge for each node. Reset based on experiments. */
402 calculate_maxii (ddg_ptr g
)
407 for (i
= 0; i
< g
->num_nodes
; i
++)
409 ddg_node_ptr u
= &g
->nodes
[i
];
411 int max_edge_latency
= 0;
413 for (e
= u
->out
; e
; e
= e
->next_out
)
414 max_edge_latency
= MAX (max_edge_latency
, e
->latency
);
416 maxii
+= max_edge_latency
;
422 Breaking intra-loop register anti-dependences:
423 Each intra-loop register anti-dependence implies a cross-iteration true
424 dependence of distance 1. Therefore, we can remove such false dependencies
425 and figure out if the partial schedule broke them by checking if (for a
426 true-dependence of distance 1): SCHED_TIME (def) < SCHED_TIME (use) and
427 if so generate a register move. The number of such moves is equal to:
428 SCHED_TIME (use) - SCHED_TIME (def) { 0 broken
429 nreg_moves = ----------------------------------- + 1 - { dependence.
432 static struct undo_replace_buff_elem
*
433 generate_reg_moves (partial_schedule_ptr ps
)
438 struct undo_replace_buff_elem
*reg_move_replaces
= NULL
;
440 for (i
= 0; i
< g
->num_nodes
; i
++)
442 ddg_node_ptr u
= &g
->nodes
[i
];
444 int nreg_moves
= 0, i_reg_move
;
445 sbitmap
*uses_of_defs
;
447 rtx prev_reg
, old_reg
;
449 /* Compute the number of reg_moves needed for u, by looking at life
450 ranges started at u (excluding self-loops). */
451 for (e
= u
->out
; e
; e
= e
->next_out
)
452 if (e
->type
== TRUE_DEP
&& e
->dest
!= e
->src
)
454 int nreg_moves4e
= (SCHED_TIME (e
->dest
) - SCHED_TIME (e
->src
)) / ii
;
456 if (e
->distance
== 1)
457 nreg_moves4e
= (SCHED_TIME (e
->dest
) - SCHED_TIME (e
->src
) + ii
) / ii
;
459 /* If dest precedes src in the schedule of the kernel, then dest
460 will read before src writes and we can save one reg_copy. */
461 if (SCHED_ROW (e
->dest
) == SCHED_ROW (e
->src
)
462 && SCHED_COLUMN (e
->dest
) < SCHED_COLUMN (e
->src
))
465 nreg_moves
= MAX (nreg_moves
, nreg_moves4e
);
471 /* Every use of the register defined by node may require a different
472 copy of this register, depending on the time the use is scheduled.
473 Set a bitmap vector, telling which nodes use each copy of this
475 uses_of_defs
= sbitmap_vector_alloc (nreg_moves
, g
->num_nodes
);
476 sbitmap_vector_zero (uses_of_defs
, nreg_moves
);
477 for (e
= u
->out
; e
; e
= e
->next_out
)
478 if (e
->type
== TRUE_DEP
&& e
->dest
!= e
->src
)
480 int dest_copy
= (SCHED_TIME (e
->dest
) - SCHED_TIME (e
->src
)) / ii
;
482 if (e
->distance
== 1)
483 dest_copy
= (SCHED_TIME (e
->dest
) - SCHED_TIME (e
->src
) + ii
) / ii
;
485 if (SCHED_ROW (e
->dest
) == SCHED_ROW (e
->src
)
486 && SCHED_COLUMN (e
->dest
) < SCHED_COLUMN (e
->src
))
490 SET_BIT (uses_of_defs
[dest_copy
- 1], e
->dest
->cuid
);
493 /* Now generate the reg_moves, attaching relevant uses to them. */
494 SCHED_NREG_MOVES (u
) = nreg_moves
;
495 old_reg
= prev_reg
= copy_rtx (SET_DEST (single_set (u
->insn
)));
496 last_reg_move
= u
->insn
;
498 for (i_reg_move
= 0; i_reg_move
< nreg_moves
; i_reg_move
++)
500 unsigned int i_use
= 0;
501 rtx new_reg
= gen_reg_rtx (GET_MODE (prev_reg
));
502 rtx reg_move
= gen_move_insn (new_reg
, prev_reg
);
503 sbitmap_iterator sbi
;
505 add_insn_before (reg_move
, last_reg_move
);
506 last_reg_move
= reg_move
;
508 if (!SCHED_FIRST_REG_MOVE (u
))
509 SCHED_FIRST_REG_MOVE (u
) = reg_move
;
511 EXECUTE_IF_SET_IN_SBITMAP (uses_of_defs
[i_reg_move
], 0, i_use
, sbi
)
513 struct undo_replace_buff_elem
*rep
;
515 rep
= (struct undo_replace_buff_elem
*)
516 xcalloc (1, sizeof (struct undo_replace_buff_elem
));
517 rep
->insn
= g
->nodes
[i_use
].insn
;
518 rep
->orig_reg
= old_reg
;
519 rep
->new_reg
= new_reg
;
521 if (! reg_move_replaces
)
522 reg_move_replaces
= rep
;
525 rep
->next
= reg_move_replaces
;
526 reg_move_replaces
= rep
;
529 replace_rtx (g
->nodes
[i_use
].insn
, old_reg
, new_reg
);
534 sbitmap_vector_free (uses_of_defs
);
536 return reg_move_replaces
;
539 /* We call this when we want to undo the SMS schedule for a given loop.
540 One of the things that we do is to delete the register moves generated
541 for the sake of SMS; this function deletes the register move instructions
542 recorded in the undo buffer. */
544 undo_generate_reg_moves (partial_schedule_ptr ps
,
545 struct undo_replace_buff_elem
*reg_move_replaces
)
549 for (i
= 0; i
< ps
->g
->num_nodes
; i
++)
551 ddg_node_ptr u
= &ps
->g
->nodes
[i
];
553 rtx crr
= SCHED_FIRST_REG_MOVE (u
);
555 for (j
= 0; j
< SCHED_NREG_MOVES (u
); j
++)
557 prev
= PREV_INSN (crr
);
561 SCHED_FIRST_REG_MOVE (u
) = NULL_RTX
;
564 while (reg_move_replaces
)
566 struct undo_replace_buff_elem
*rep
= reg_move_replaces
;
568 reg_move_replaces
= reg_move_replaces
->next
;
569 replace_rtx (rep
->insn
, rep
->new_reg
, rep
->orig_reg
);
573 /* Free memory allocated for the undo buffer. */
575 free_undo_replace_buff (struct undo_replace_buff_elem
*reg_move_replaces
)
578 while (reg_move_replaces
)
580 struct undo_replace_buff_elem
*rep
= reg_move_replaces
;
582 reg_move_replaces
= reg_move_replaces
->next
;
587 /* Bump the SCHED_TIMEs of all nodes to start from zero. Set the values
588 of SCHED_ROW and SCHED_STAGE. */
590 normalize_sched_times (partial_schedule_ptr ps
)
594 int amount
= PS_MIN_CYCLE (ps
);
597 /* Don't include the closing branch assuming that it is the last node. */
598 for (i
= 0; i
< g
->num_nodes
- 1; i
++)
600 ddg_node_ptr u
= &g
->nodes
[i
];
601 int normalized_time
= SCHED_TIME (u
) - amount
;
603 gcc_assert (normalized_time
>= 0);
605 SCHED_TIME (u
) = normalized_time
;
606 SCHED_ROW (u
) = normalized_time
% ii
;
607 SCHED_STAGE (u
) = normalized_time
/ ii
;
611 /* Set SCHED_COLUMN of each node according to its position in PS. */
613 set_columns_for_ps (partial_schedule_ptr ps
)
617 for (row
= 0; row
< ps
->ii
; row
++)
619 ps_insn_ptr cur_insn
= ps
->rows
[row
];
622 for (; cur_insn
; cur_insn
= cur_insn
->next_in_row
)
623 SCHED_COLUMN (cur_insn
->node
) = column
++;
627 /* Permute the insns according to their order in PS, from row 0 to
628 row ii-1, and position them right before LAST. This schedules
629 the insns of the loop kernel. */
631 permute_partial_schedule (partial_schedule_ptr ps
, rtx last
)
637 for (row
= 0; row
< ii
; row
++)
638 for (ps_ij
= ps
->rows
[row
]; ps_ij
; ps_ij
= ps_ij
->next_in_row
)
639 if (PREV_INSN (last
) != ps_ij
->node
->insn
)
640 reorder_insns_nobb (ps_ij
->node
->first_note
, ps_ij
->node
->insn
,
644 /* As part of undoing SMS we return to the original ordering of the
645 instructions inside the loop kernel. Given the partial schedule PS, this
646 function returns the ordering of the instruction according to their CUID
647 in the DDG (PS->G), which is the original order of the instruction before
650 undo_permute_partial_schedule (partial_schedule_ptr ps
, rtx last
)
654 for (i
= 0 ; i
< ps
->g
->num_nodes
; i
++)
655 if (last
== ps
->g
->nodes
[i
].insn
656 || last
== ps
->g
->nodes
[i
].first_note
)
658 else if (PREV_INSN (last
) != ps
->g
->nodes
[i
].insn
)
659 reorder_insns_nobb (ps
->g
->nodes
[i
].first_note
, ps
->g
->nodes
[i
].insn
,
663 /* Used to generate the prologue & epilogue. Duplicate the subset of
664 nodes whose stages are between FROM_STAGE and TO_STAGE (inclusive
665 of both), together with a prefix/suffix of their reg_moves. */
667 duplicate_insns_of_cycles (partial_schedule_ptr ps
, int from_stage
,
668 int to_stage
, int for_prolog
)
673 for (row
= 0; row
< ps
->ii
; row
++)
674 for (ps_ij
= ps
->rows
[row
]; ps_ij
; ps_ij
= ps_ij
->next_in_row
)
676 ddg_node_ptr u_node
= ps_ij
->node
;
678 rtx reg_move
= NULL_RTX
;
682 /* SCHED_STAGE (u_node) >= from_stage == 0. Generate increasing
683 number of reg_moves starting with the second occurrence of
684 u_node, which is generated if its SCHED_STAGE <= to_stage. */
685 i_reg_moves
= to_stage
- SCHED_STAGE (u_node
) + 1;
686 i_reg_moves
= MAX (i_reg_moves
, 0);
687 i_reg_moves
= MIN (i_reg_moves
, SCHED_NREG_MOVES (u_node
));
689 /* The reg_moves start from the *first* reg_move backwards. */
692 reg_move
= SCHED_FIRST_REG_MOVE (u_node
);
693 for (j
= 1; j
< i_reg_moves
; j
++)
694 reg_move
= PREV_INSN (reg_move
);
697 else /* It's for the epilog. */
699 /* SCHED_STAGE (u_node) <= to_stage. Generate all reg_moves,
700 starting to decrease one stage after u_node no longer occurs;
701 that is, generate all reg_moves until
702 SCHED_STAGE (u_node) == from_stage - 1. */
703 i_reg_moves
= SCHED_NREG_MOVES (u_node
)
704 - (from_stage
- SCHED_STAGE (u_node
) - 1);
705 i_reg_moves
= MAX (i_reg_moves
, 0);
706 i_reg_moves
= MIN (i_reg_moves
, SCHED_NREG_MOVES (u_node
));
708 /* The reg_moves start from the *last* reg_move forwards. */
711 reg_move
= SCHED_FIRST_REG_MOVE (u_node
);
712 for (j
= 1; j
< SCHED_NREG_MOVES (u_node
); j
++)
713 reg_move
= PREV_INSN (reg_move
);
717 for (j
= 0; j
< i_reg_moves
; j
++, reg_move
= NEXT_INSN (reg_move
))
718 emit_insn (copy_rtx (PATTERN (reg_move
)));
719 if (SCHED_STAGE (u_node
) >= from_stage
720 && SCHED_STAGE (u_node
) <= to_stage
)
721 duplicate_insn_chain (u_node
->first_note
, u_node
->insn
);
726 /* Generate the instructions (including reg_moves) for prolog & epilog. */
728 generate_prolog_epilog (partial_schedule_ptr ps
, struct loop
* loop
, rtx count_reg
)
731 int last_stage
= PS_STAGE_COUNT (ps
) - 1;
734 /* Generate the prolog, inserting its insns on the loop-entry edge. */
738 /* Generate a subtract instruction at the beginning of the prolog to
739 adjust the loop count by STAGE_COUNT. */
740 emit_insn (gen_sub2_insn (count_reg
, GEN_INT (last_stage
)));
742 for (i
= 0; i
< last_stage
; i
++)
743 duplicate_insns_of_cycles (ps
, 0, i
, 1);
745 /* Put the prolog on the entry edge. */
746 e
= loop_preheader_edge (loop
);
747 split_edge_and_insert (e
, get_insns());
751 /* Generate the epilog, inserting its insns on the loop-exit edge. */
754 for (i
= 0; i
< last_stage
; i
++)
755 duplicate_insns_of_cycles (ps
, i
+ 1, last_stage
, 0);
757 /* Put the epilogue on the exit edge. */
758 gcc_assert (single_exit (loop
));
759 e
= single_exit (loop
);
760 split_edge_and_insert (e
, get_insns());
764 /* Return true if all the BBs of the loop are empty except the
767 loop_single_full_bb_p (struct loop
*loop
)
770 basic_block
*bbs
= get_loop_body (loop
);
772 for (i
= 0; i
< loop
->num_nodes
; i
++)
775 bool empty_bb
= true;
777 if (bbs
[i
] == loop
->header
)
780 /* Make sure that basic blocks other than the header
781 have only notes labels or jumps. */
782 get_ebb_head_tail (bbs
[i
], bbs
[i
], &head
, &tail
);
783 for (; head
!= NEXT_INSN (tail
); head
= NEXT_INSN (head
))
785 if (NOTE_P (head
) || LABEL_P (head
)
786 || (INSN_P (head
) && JUMP_P (head
)))
802 /* A simple loop from SMS point of view; it is a loop that is composed of
803 either a single basic block or two BBs - a header and a latch. */
804 #define SIMPLE_SMS_LOOP_P(loop) ((loop->num_nodes < 3 ) \
805 && (EDGE_COUNT (loop->latch->preds) == 1) \
806 && (EDGE_COUNT (loop->latch->succs) == 1))
808 /* Return true if the loop is in its canonical form and false if not.
809 i.e. SIMPLE_SMS_LOOP_P and have one preheader block, and single exit. */
811 loop_canon_p (struct loop
*loop
)
814 if (loop
->inner
|| ! loop
->outer
)
817 if (!single_exit (loop
))
821 rtx insn
= BB_END (loop
->header
);
823 fprintf (dump_file
, "SMS loop many exits ");
824 fprintf (dump_file
, " %s %d (file, line)\n",
825 insn_file (insn
), insn_line (insn
));
830 if (! SIMPLE_SMS_LOOP_P (loop
) && ! loop_single_full_bb_p (loop
))
834 rtx insn
= BB_END (loop
->header
);
836 fprintf (dump_file
, "SMS loop many BBs. ");
837 fprintf (dump_file
, " %s %d (file, line)\n",
838 insn_file (insn
), insn_line (insn
));
846 /* If there are more than one entry for the loop,
847 make it one by splitting the first entry edge and
848 redirecting the others to the new BB. */
850 canon_loop (struct loop
*loop
)
855 /* Avoid annoying special cases of edges going to exit
857 FOR_EACH_EDGE (e
, i
, EXIT_BLOCK_PTR
->preds
)
858 if ((e
->flags
& EDGE_FALLTHRU
) && (EDGE_COUNT (e
->src
->succs
) > 1))
861 if (loop
->latch
== loop
->header
862 || EDGE_COUNT (loop
->latch
->succs
) > 1)
864 FOR_EACH_EDGE (e
, i
, loop
->header
->preds
)
865 if (e
->src
== loop
->latch
)
871 /* Main entry point, perform SMS scheduling on the loops of the function
872 that consist of single basic blocks. */
876 static int passes
= 0;
882 partial_schedule_ptr ps
;
884 basic_block bb
= NULL
;
885 struct loop
*loop
, *nloop
;
886 basic_block condition_bb
= NULL
;
888 gcov_type trip_count
= 0;
890 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
891 | LOOPS_HAVE_MARKED_SINGLE_EXITS
);
893 return; /* There are no loops to schedule. */
895 /* Initialize issue_rate. */
896 if (targetm
.sched
.issue_rate
)
898 int temp
= reload_completed
;
900 reload_completed
= 1;
901 issue_rate
= targetm
.sched
.issue_rate ();
902 reload_completed
= temp
;
907 /* Initialize the scheduler. */
908 current_sched_info
= &sms_sched_info
;
911 /* Init Data Flow analysis, to be used in interloop dep calculation. */
912 df
= df_init (DF_HARD_REGS
| DF_EQUIV_NOTES
| DF_SUBREGS
);
913 df_rd_add_problem (df
, 0);
914 df_ru_add_problem (df
, 0);
915 df_chain_add_problem (df
, DF_DU_CHAIN
| DF_UD_CHAIN
);
919 df_dump (df
, dump_file
);
921 /* Allocate memory to hold the DDG array one entry for each loop.
922 We use loop->num as index into this array. */
923 g_arr
= XCNEWVEC (ddg_ptr
, number_of_loops ());
925 /* Build DDGs for all the relevant loops and hold them in G_ARR
926 indexed by the loop index. */
927 FOR_EACH_LOOP (li
, loop
, 0)
933 if ((passes
++ > MAX_SMS_LOOP_NUMBER
) && (MAX_SMS_LOOP_NUMBER
!= -1))
936 fprintf (dump_file
, "SMS reached MAX_PASSES... \n");
941 if (! loop_canon_p (loop
))
944 if (! loop_single_full_bb_p (loop
))
949 get_ebb_head_tail (bb
, bb
, &head
, &tail
);
950 latch_edge
= loop_latch_edge (loop
);
951 gcc_assert (single_exit (loop
));
952 if (single_exit (loop
)->count
)
953 trip_count
= latch_edge
->count
/ single_exit (loop
)->count
;
955 /* Perfrom SMS only on loops that their average count is above threshold. */
957 if ( latch_edge
->count
958 && (latch_edge
->count
< single_exit (loop
)->count
* SMS_LOOP_AVERAGE_COUNT_THRESHOLD
))
962 fprintf (dump_file
, " %s %d (file, line)\n",
963 insn_file (tail
), insn_line (tail
));
964 fprintf (dump_file
, "SMS single-bb-loop\n");
965 if (profile_info
&& flag_branch_probabilities
)
967 fprintf (dump_file
, "SMS loop-count ");
968 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
969 (HOST_WIDEST_INT
) bb
->count
);
970 fprintf (dump_file
, "\n");
971 fprintf (dump_file
, "SMS trip-count ");
972 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
973 (HOST_WIDEST_INT
) trip_count
);
974 fprintf (dump_file
, "\n");
975 fprintf (dump_file
, "SMS profile-sum-max ");
976 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
977 (HOST_WIDEST_INT
) profile_info
->sum_max
);
978 fprintf (dump_file
, "\n");
984 /* Make sure this is a doloop. */
985 if ( !(count_reg
= doloop_register_get (tail
)))
988 /* Don't handle BBs with calls or barriers, or !single_set insns. */
989 for (insn
= head
; insn
!= NEXT_INSN (tail
); insn
= NEXT_INSN (insn
))
992 || (INSN_P (insn
) && !JUMP_P (insn
)
993 && !single_set (insn
) && GET_CODE (PATTERN (insn
)) != USE
))
996 if (insn
!= NEXT_INSN (tail
))
1001 fprintf (dump_file
, "SMS loop-with-call\n");
1002 else if (BARRIER_P (insn
))
1003 fprintf (dump_file
, "SMS loop-with-barrier\n");
1005 fprintf (dump_file
, "SMS loop-with-not-single-set\n");
1006 print_rtl_single (dump_file
, insn
);
1012 if (! (g
= create_ddg (bb
, df
, 0)))
1015 fprintf (dump_file
, "SMS doloop\n");
1019 g_arr
[loop
->num
] = g
;
1022 /* Release Data Flow analysis data structures. */
1026 /* We don't want to perform SMS on new loops - created by versioning. */
1027 FOR_EACH_LOOP (li
, loop
, LI_ONLY_OLD
)
1030 rtx count_reg
, count_init
;
1032 unsigned stage_count
= 0;
1033 HOST_WIDEST_INT loop_count
= 0;
1035 if (! (g
= g_arr
[loop
->num
]))
1039 print_ddg (dump_file
, g
);
1041 get_ebb_head_tail (loop
->header
, loop
->header
, &head
, &tail
);
1043 latch_edge
= loop_latch_edge (loop
);
1044 gcc_assert (single_exit (loop
));
1045 if (single_exit (loop
)->count
)
1046 trip_count
= latch_edge
->count
/ single_exit (loop
)->count
;
1050 fprintf (dump_file
, " %s %d (file, line)\n",
1051 insn_file (tail
), insn_line (tail
));
1052 fprintf (dump_file
, "SMS single-bb-loop\n");
1053 if (profile_info
&& flag_branch_probabilities
)
1055 fprintf (dump_file
, "SMS loop-count ");
1056 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
1057 (HOST_WIDEST_INT
) bb
->count
);
1058 fprintf (dump_file
, "\n");
1059 fprintf (dump_file
, "SMS profile-sum-max ");
1060 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
1061 (HOST_WIDEST_INT
) profile_info
->sum_max
);
1062 fprintf (dump_file
, "\n");
1064 fprintf (dump_file
, "SMS doloop\n");
1065 fprintf (dump_file
, "SMS built-ddg %d\n", g
->num_nodes
);
1066 fprintf (dump_file
, "SMS num-loads %d\n", g
->num_loads
);
1067 fprintf (dump_file
, "SMS num-stores %d\n", g
->num_stores
);
1071 /* In case of th loop have doloop register it gets special
1073 count_init
= NULL_RTX
;
1074 if ((count_reg
= doloop_register_get (tail
)))
1076 basic_block pre_header
;
1078 pre_header
= loop_preheader_edge (loop
)->src
;
1079 count_init
= const_iteration_count (count_reg
, pre_header
,
1082 gcc_assert (count_reg
);
1084 if (dump_file
&& count_init
)
1086 fprintf (dump_file
, "SMS const-doloop ");
1087 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
,
1089 fprintf (dump_file
, "\n");
1092 node_order
= XNEWVEC (int, g
->num_nodes
);
1094 mii
= 1; /* Need to pass some estimate of mii. */
1095 rec_mii
= sms_order_nodes (g
, mii
, node_order
);
1096 mii
= MAX (res_MII (g
), rec_mii
);
1097 maxii
= (calculate_maxii (g
) * SMS_MAX_II_FACTOR
) / 100;
1100 fprintf (dump_file
, "SMS iis %d %d %d (rec_mii, mii, maxii)\n",
1101 rec_mii
, mii
, maxii
);
1103 /* After sms_order_nodes and before sms_schedule_by_order, to copy over
1105 set_node_sched_params (g
);
1107 ps
= sms_schedule_by_order (g
, mii
, maxii
, node_order
);
1110 stage_count
= PS_STAGE_COUNT (ps
);
1112 /* Stage count of 1 means that there is no interleaving between
1113 iterations, let the scheduling passes do the job. */
1115 || (count_init
&& (loop_count
<= stage_count
))
1116 || (flag_branch_probabilities
&& (trip_count
<= stage_count
)))
1120 fprintf (dump_file
, "SMS failed... \n");
1121 fprintf (dump_file
, "SMS sched-failed (stage-count=%d, loop-count=", stage_count
);
1122 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
, loop_count
);
1123 fprintf (dump_file
, ", trip-count=");
1124 fprintf (dump_file
, HOST_WIDEST_INT_PRINT_DEC
, trip_count
);
1125 fprintf (dump_file
, ")\n");
1131 int orig_cycles
= kernel_number_of_cycles (BB_HEAD (g
->bb
), BB_END (g
->bb
));
1133 struct undo_replace_buff_elem
*reg_move_replaces
;
1138 "SMS succeeded %d %d (with ii, sc)\n", ps
->ii
,
1140 print_partial_schedule (ps
, dump_file
);
1142 "SMS Branch (%d) will later be scheduled at cycle %d.\n",
1143 g
->closing_branch
->cuid
, PS_MIN_CYCLE (ps
) - 1);
1146 /* Set the stage boundaries. If the DDG is built with closing_branch_deps,
1147 the closing_branch was scheduled and should appear in the last (ii-1)
1148 row. Otherwise, we are free to schedule the branch, and we let nodes
1149 that were scheduled at the first PS_MIN_CYCLE cycle appear in the first
1150 row; this should reduce stage_count to minimum. */
1151 normalize_sched_times (ps
);
1152 rotate_partial_schedule (ps
, PS_MIN_CYCLE (ps
));
1153 set_columns_for_ps (ps
);
1155 /* Generate the kernel just to be able to measure its cycles. */
1156 permute_partial_schedule (ps
, g
->closing_branch
->first_note
);
1157 reg_move_replaces
= generate_reg_moves (ps
);
1159 /* Get the number of cycles the new kernel expect to execute in. */
1160 new_cycles
= kernel_number_of_cycles (BB_HEAD (g
->bb
), BB_END (g
->bb
));
1162 /* Get back to the original loop so we can do loop versioning. */
1163 undo_permute_partial_schedule (ps
, g
->closing_branch
->first_note
);
1164 if (reg_move_replaces
)
1165 undo_generate_reg_moves (ps
, reg_move_replaces
);
1167 if ( new_cycles
>= orig_cycles
)
1169 /* SMS is not profitable so undo the permutation and reg move generation
1170 and return the kernel to its original state. */
1172 fprintf (dump_file
, "Undoing SMS because it is not profitable.\n");
1179 /* case the BCT count is not known , Do loop-versioning */
1180 if (count_reg
&& ! count_init
)
1182 rtx comp_rtx
= gen_rtx_fmt_ee (GT
, VOIDmode
, count_reg
,
1183 GEN_INT(stage_count
));
1185 nloop
= loop_version (loop
, comp_rtx
, &condition_bb
, true);
1188 /* Set new iteration count of loop kernel. */
1189 if (count_reg
&& count_init
)
1190 SET_SRC (single_set (count_init
)) = GEN_INT (loop_count
1193 /* Now apply the scheduled kernel to the RTL of the loop. */
1194 permute_partial_schedule (ps
, g
->closing_branch
->first_note
);
1196 /* Mark this loop as software pipelined so the later
1197 scheduling passes doesn't touch it. */
1198 if (! flag_resched_modulo_sched
)
1199 g
->bb
->flags
|= BB_DISABLE_SCHEDULE
;
1200 /* The life-info is not valid any more. */
1201 g
->bb
->flags
|= BB_DIRTY
;
1203 reg_move_replaces
= generate_reg_moves (ps
);
1205 print_node_sched_params (dump_file
, g
->num_nodes
);
1206 /* Generate prolog and epilog. */
1207 if (count_reg
&& !count_init
)
1208 generate_prolog_epilog (ps
, loop
, count_reg
);
1210 generate_prolog_epilog (ps
, loop
, NULL_RTX
);
1212 free_undo_replace_buff (reg_move_replaces
);
1215 free_partial_schedule (ps
);
1216 free (node_sched_params
);
1223 /* Release scheduler data, needed until now because of DFA. */
1225 loop_optimizer_finalize ();
1228 /* The SMS scheduling algorithm itself
1229 -----------------------------------
1230 Input: 'O' an ordered list of insns of a loop.
1231 Output: A scheduling of the loop - kernel, prolog, and epilogue.
1233 'Q' is the empty Set
1234 'PS' is the partial schedule; it holds the currently scheduled nodes with
1236 'PSP' previously scheduled predecessors.
1237 'PSS' previously scheduled successors.
1238 't(u)' the cycle where u is scheduled.
1239 'l(u)' is the latency of u.
1240 'd(v,u)' is the dependence distance from v to u.
1241 'ASAP(u)' the earliest time at which u could be scheduled as computed in
1242 the node ordering phase.
1243 'check_hardware_resources_conflicts(u, PS, c)'
1244 run a trace around cycle/slot through DFA model
1245 to check resource conflicts involving instruction u
1246 at cycle c given the partial schedule PS.
1247 'add_to_partial_schedule_at_time(u, PS, c)'
1248 Add the node/instruction u to the partial schedule
1250 'calculate_register_pressure(PS)'
1251 Given a schedule of instructions, calculate the register
1252 pressure it implies. One implementation could be the
1253 maximum number of overlapping live ranges.
1254 'maxRP' The maximum allowed register pressure, it is usually derived from the number
1255 registers available in the hardware.
1259 3. for each node u in O in pre-computed order
1260 4. if (PSP(u) != Q && PSS(u) == Q) then
1261 5. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1262 6. start = Early_start; end = Early_start + II - 1; step = 1
1263 11. else if (PSP(u) == Q && PSS(u) != Q) then
1264 12. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1265 13. start = Late_start; end = Late_start - II + 1; step = -1
1266 14. else if (PSP(u) != Q && PSS(u) != Q) then
1267 15. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1268 16. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1269 17. start = Early_start;
1270 18. end = min(Early_start + II - 1 , Late_start);
1272 20. else "if (PSP(u) == Q && PSS(u) == Q)"
1273 21. start = ASAP(u); end = start + II - 1; step = 1
1277 24. for (c = start ; c != end ; c += step)
1278 25. if check_hardware_resources_conflicts(u, PS, c) then
1279 26. add_to_partial_schedule_at_time(u, PS, c)
1284 31. if (success == false) then
1286 33. if (II > maxII) then
1287 34. finish - failed to schedule
1292 39. if (calculate_register_pressure(PS) > maxRP) then
1295 42. compute epilogue & prologue
1296 43. finish - succeeded to schedule
1299 /* A limit on the number of cycles that resource conflicts can span. ??? Should
1300 be provided by DFA, and be dependent on the type of insn scheduled. Currently
1301 set to 0 to save compile time. */
1302 #define DFA_HISTORY SMS_DFA_HISTORY
1304 /* Given the partial schedule PS, this function calculates and returns the
1305 cycles in which we can schedule the node with the given index I.
1306 NOTE: Here we do the backtracking in SMS, in some special cases. We have
1307 noticed that there are several cases in which we fail to SMS the loop
1308 because the sched window of a node is empty due to tight data-deps. In
1309 such cases we want to unschedule some of the predecessors/successors
1310 until we get non-empty scheduling window. It returns -1 if the
1311 scheduling window is empty and zero otherwise. */
1314 get_sched_window (partial_schedule_ptr ps
, int *nodes_order
, int i
,
1315 sbitmap sched_nodes
, int ii
, int *start_p
, int *step_p
, int *end_p
)
1317 int start
, step
, end
;
1319 int u
= nodes_order
[i
];
1320 ddg_node_ptr u_node
= &ps
->g
->nodes
[u
];
1321 sbitmap psp
= sbitmap_alloc (ps
->g
->num_nodes
);
1322 sbitmap pss
= sbitmap_alloc (ps
->g
->num_nodes
);
1323 sbitmap u_node_preds
= NODE_PREDECESSORS (u_node
);
1324 sbitmap u_node_succs
= NODE_SUCCESSORS (u_node
);
1328 /* 1. compute sched window for u (start, end, step). */
1331 psp_not_empty
= sbitmap_a_and_b_cg (psp
, u_node_preds
, sched_nodes
);
1332 pss_not_empty
= sbitmap_a_and_b_cg (pss
, u_node_succs
, sched_nodes
);
1334 if (psp_not_empty
&& !pss_not_empty
)
1336 int early_start
= INT_MIN
;
1339 for (e
= u_node
->in
; e
!= 0; e
= e
->next_in
)
1341 ddg_node_ptr v_node
= e
->src
;
1342 if (TEST_BIT (sched_nodes
, v_node
->cuid
))
1344 int node_st
= SCHED_TIME (v_node
)
1345 + e
->latency
- (e
->distance
* ii
);
1347 early_start
= MAX (early_start
, node_st
);
1349 if (e
->data_type
== MEM_DEP
)
1350 end
= MIN (end
, SCHED_TIME (v_node
) + ii
- 1);
1353 start
= early_start
;
1354 end
= MIN (end
, early_start
+ ii
);
1358 else if (!psp_not_empty
&& pss_not_empty
)
1360 int late_start
= INT_MAX
;
1363 for (e
= u_node
->out
; e
!= 0; e
= e
->next_out
)
1365 ddg_node_ptr v_node
= e
->dest
;
1366 if (TEST_BIT (sched_nodes
, v_node
->cuid
))
1368 late_start
= MIN (late_start
,
1369 SCHED_TIME (v_node
) - e
->latency
1370 + (e
->distance
* ii
));
1371 if (e
->data_type
== MEM_DEP
)
1372 end
= MAX (end
, SCHED_TIME (v_node
) - ii
+ 1);
1376 end
= MAX (end
, late_start
- ii
);
1380 else if (psp_not_empty
&& pss_not_empty
)
1382 int early_start
= INT_MIN
;
1383 int late_start
= INT_MAX
;
1387 for (e
= u_node
->in
; e
!= 0; e
= e
->next_in
)
1389 ddg_node_ptr v_node
= e
->src
;
1391 if (TEST_BIT (sched_nodes
, v_node
->cuid
))
1393 early_start
= MAX (early_start
,
1394 SCHED_TIME (v_node
) + e
->latency
1395 - (e
->distance
* ii
));
1396 if (e
->data_type
== MEM_DEP
)
1397 end
= MIN (end
, SCHED_TIME (v_node
) + ii
- 1);
1400 for (e
= u_node
->out
; e
!= 0; e
= e
->next_out
)
1402 ddg_node_ptr v_node
= e
->dest
;
1404 if (TEST_BIT (sched_nodes
, v_node
->cuid
))
1406 late_start
= MIN (late_start
,
1407 SCHED_TIME (v_node
) - e
->latency
1408 + (e
->distance
* ii
));
1409 if (e
->data_type
== MEM_DEP
)
1410 start
= MAX (start
, SCHED_TIME (v_node
) - ii
+ 1);
1413 start
= MAX (start
, early_start
);
1414 end
= MIN (end
, MIN (early_start
+ ii
, late_start
+ 1));
1417 else /* psp is empty && pss is empty. */
1419 start
= SCHED_ASAP (u_node
);
1430 if ((start
>= end
&& step
== 1) || (start
<= end
&& step
== -1))
1436 /* This function implements the scheduling algorithm for SMS according to the
1438 static partial_schedule_ptr
1439 sms_schedule_by_order (ddg_ptr g
, int mii
, int maxii
, int *nodes_order
)
1443 int try_again_with_larger_ii
= true;
1444 int num_nodes
= g
->num_nodes
;
1446 int start
, end
, step
; /* Place together into one struct? */
1447 sbitmap sched_nodes
= sbitmap_alloc (num_nodes
);
1448 sbitmap must_precede
= sbitmap_alloc (num_nodes
);
1449 sbitmap must_follow
= sbitmap_alloc (num_nodes
);
1450 sbitmap tobe_scheduled
= sbitmap_alloc (num_nodes
);
1452 partial_schedule_ptr ps
= create_partial_schedule (ii
, g
, DFA_HISTORY
);
1454 sbitmap_ones (tobe_scheduled
);
1455 sbitmap_zero (sched_nodes
);
1457 while ((! sbitmap_equal (tobe_scheduled
, sched_nodes
)
1458 || try_again_with_larger_ii
) && ii
< maxii
)
1461 bool unscheduled_nodes
= false;
1464 fprintf(dump_file
, "Starting with ii=%d\n", ii
);
1465 if (try_again_with_larger_ii
)
1467 try_again_with_larger_ii
= false;
1468 sbitmap_zero (sched_nodes
);
1471 for (i
= 0; i
< num_nodes
; i
++)
1473 int u
= nodes_order
[i
];
1474 ddg_node_ptr u_node
= &ps
->g
->nodes
[u
];
1475 rtx insn
= u_node
->insn
;
1479 RESET_BIT (tobe_scheduled
, u
);
1483 if (JUMP_P (insn
)) /* Closing branch handled later. */
1485 RESET_BIT (tobe_scheduled
, u
);
1489 if (TEST_BIT (sched_nodes
, u
))
1492 /* Try to get non-empty scheduling window. */
1494 while (get_sched_window (ps
, nodes_order
, i
, sched_nodes
, ii
, &start
, &step
, &end
) < 0
1497 unscheduled_nodes
= true;
1498 if (TEST_BIT (NODE_PREDECESSORS (u_node
), nodes_order
[j
- 1])
1499 || TEST_BIT (NODE_SUCCESSORS (u_node
), nodes_order
[j
- 1]))
1501 ps_unschedule_node (ps
, &ps
->g
->nodes
[nodes_order
[j
- 1]]);
1502 RESET_BIT (sched_nodes
, nodes_order
[j
- 1]);
1508 /* ??? Try backtracking instead of immediately ii++? */
1510 try_again_with_larger_ii
= true;
1511 reset_partial_schedule (ps
, ii
);
1514 /* 2. Try scheduling u in window. */
1516 fprintf(dump_file
, "Trying to schedule node %d in (%d .. %d) step %d\n",
1517 u
, start
, end
, step
);
1519 /* use must_follow & must_precede bitmaps to determine order
1520 of nodes within the cycle. */
1521 sbitmap_zero (must_precede
);
1522 sbitmap_zero (must_follow
);
1523 for (e
= u_node
->in
; e
!= 0; e
= e
->next_in
)
1524 if (TEST_BIT (sched_nodes
, e
->src
->cuid
)
1525 && e
->latency
== (ii
* e
->distance
)
1526 && start
== SCHED_TIME (e
->src
))
1527 SET_BIT (must_precede
, e
->src
->cuid
);
1529 for (e
= u_node
->out
; e
!= 0; e
= e
->next_out
)
1530 if (TEST_BIT (sched_nodes
, e
->dest
->cuid
)
1531 && e
->latency
== (ii
* e
->distance
)
1532 && end
== SCHED_TIME (e
->dest
))
1533 SET_BIT (must_follow
, e
->dest
->cuid
);
1536 if ((step
> 0 && start
< end
) || (step
< 0 && start
> end
))
1537 for (c
= start
; c
!= end
; c
+= step
)
1541 psi
= ps_add_node_check_conflicts (ps
, u_node
, c
,
1547 SCHED_TIME (u_node
) = c
;
1548 SET_BIT (sched_nodes
, u
);
1551 fprintf(dump_file
, "Schedule in %d\n", c
);
1557 /* ??? Try backtracking instead of immediately ii++? */
1559 try_again_with_larger_ii
= true;
1560 reset_partial_schedule (ps
, ii
);
1563 if (unscheduled_nodes
)
1566 /* ??? If (success), check register pressure estimates. */
1567 } /* Continue with next node. */
1568 } /* While try_again_with_larger_ii. */
1570 sbitmap_free (sched_nodes
);
1571 sbitmap_free (must_precede
);
1572 sbitmap_free (must_follow
);
1573 sbitmap_free (tobe_scheduled
);
1577 free_partial_schedule (ps
);
1584 /* This page implements the algorithm for ordering the nodes of a DDG
1585 for modulo scheduling, activated through the
1586 "int sms_order_nodes (ddg_ptr, int mii, int * result)" API. */
1588 #define ORDER_PARAMS(x) ((struct node_order_params *) (x)->aux.info)
1589 #define ASAP(x) (ORDER_PARAMS ((x))->asap)
1590 #define ALAP(x) (ORDER_PARAMS ((x))->alap)
1591 #define HEIGHT(x) (ORDER_PARAMS ((x))->height)
1592 #define MOB(x) (ALAP ((x)) - ASAP ((x)))
1593 #define DEPTH(x) (ASAP ((x)))
1595 typedef struct node_order_params
* nopa
;
1597 static void order_nodes_of_sccs (ddg_all_sccs_ptr
, int * result
);
1598 static int order_nodes_in_scc (ddg_ptr
, sbitmap
, sbitmap
, int*, int);
1599 static nopa
calculate_order_params (ddg_ptr
, int mii
);
1600 static int find_max_asap (ddg_ptr
, sbitmap
);
1601 static int find_max_hv_min_mob (ddg_ptr
, sbitmap
);
1602 static int find_max_dv_min_mob (ddg_ptr
, sbitmap
);
1604 enum sms_direction
{BOTTOMUP
, TOPDOWN
};
1606 struct node_order_params
1613 /* Check if NODE_ORDER contains a permutation of 0 .. NUM_NODES-1. */
1615 check_nodes_order (int *node_order
, int num_nodes
)
1618 sbitmap tmp
= sbitmap_alloc (num_nodes
);
1622 for (i
= 0; i
< num_nodes
; i
++)
1624 int u
= node_order
[i
];
1626 gcc_assert (u
< num_nodes
&& u
>= 0 && !TEST_BIT (tmp
, u
));
1634 /* Order the nodes of G for scheduling and pass the result in
1635 NODE_ORDER. Also set aux.count of each node to ASAP.
1636 Return the recMII for the given DDG. */
1638 sms_order_nodes (ddg_ptr g
, int mii
, int * node_order
)
1642 ddg_all_sccs_ptr sccs
= create_ddg_all_sccs (g
);
1644 nopa nops
= calculate_order_params (g
, mii
);
1646 order_nodes_of_sccs (sccs
, node_order
);
1648 if (sccs
->num_sccs
> 0)
1649 /* First SCC has the largest recurrence_length. */
1650 rec_mii
= sccs
->sccs
[0]->recurrence_length
;
1652 /* Save ASAP before destroying node_order_params. */
1653 for (i
= 0; i
< g
->num_nodes
; i
++)
1655 ddg_node_ptr v
= &g
->nodes
[i
];
1656 v
->aux
.count
= ASAP (v
);
1660 free_ddg_all_sccs (sccs
);
1661 check_nodes_order (node_order
, g
->num_nodes
);
1667 order_nodes_of_sccs (ddg_all_sccs_ptr all_sccs
, int * node_order
)
1670 ddg_ptr g
= all_sccs
->ddg
;
1671 int num_nodes
= g
->num_nodes
;
1672 sbitmap prev_sccs
= sbitmap_alloc (num_nodes
);
1673 sbitmap on_path
= sbitmap_alloc (num_nodes
);
1674 sbitmap tmp
= sbitmap_alloc (num_nodes
);
1675 sbitmap ones
= sbitmap_alloc (num_nodes
);
1677 sbitmap_zero (prev_sccs
);
1678 sbitmap_ones (ones
);
1680 /* Perfrom the node ordering starting from the SCC with the highest recMII.
1681 For each SCC order the nodes according to their ASAP/ALAP/HEIGHT etc. */
1682 for (i
= 0; i
< all_sccs
->num_sccs
; i
++)
1684 ddg_scc_ptr scc
= all_sccs
->sccs
[i
];
1686 /* Add nodes on paths from previous SCCs to the current SCC. */
1687 find_nodes_on_paths (on_path
, g
, prev_sccs
, scc
->nodes
);
1688 sbitmap_a_or_b (tmp
, scc
->nodes
, on_path
);
1690 /* Add nodes on paths from the current SCC to previous SCCs. */
1691 find_nodes_on_paths (on_path
, g
, scc
->nodes
, prev_sccs
);
1692 sbitmap_a_or_b (tmp
, tmp
, on_path
);
1694 /* Remove nodes of previous SCCs from current extended SCC. */
1695 sbitmap_difference (tmp
, tmp
, prev_sccs
);
1697 pos
= order_nodes_in_scc (g
, prev_sccs
, tmp
, node_order
, pos
);
1698 /* Above call to order_nodes_in_scc updated prev_sccs |= tmp. */
1701 /* Handle the remaining nodes that do not belong to any scc. Each call
1702 to order_nodes_in_scc handles a single connected component. */
1703 while (pos
< g
->num_nodes
)
1705 sbitmap_difference (tmp
, ones
, prev_sccs
);
1706 pos
= order_nodes_in_scc (g
, prev_sccs
, tmp
, node_order
, pos
);
1708 sbitmap_free (prev_sccs
);
1709 sbitmap_free (on_path
);
1711 sbitmap_free (ones
);
1714 /* MII is needed if we consider backarcs (that do not close recursive cycles). */
1715 static struct node_order_params
*
1716 calculate_order_params (ddg_ptr g
, int mii ATTRIBUTE_UNUSED
)
1720 int num_nodes
= g
->num_nodes
;
1722 /* Allocate a place to hold ordering params for each node in the DDG. */
1723 nopa node_order_params_arr
;
1725 /* Initialize of ASAP/ALAP/HEIGHT to zero. */
1726 node_order_params_arr
= (nopa
) xcalloc (num_nodes
,
1727 sizeof (struct node_order_params
));
1729 /* Set the aux pointer of each node to point to its order_params structure. */
1730 for (u
= 0; u
< num_nodes
; u
++)
1731 g
->nodes
[u
].aux
.info
= &node_order_params_arr
[u
];
1733 /* Disregarding a backarc from each recursive cycle to obtain a DAG,
1734 calculate ASAP, ALAP, mobility, distance, and height for each node
1735 in the dependence (direct acyclic) graph. */
1737 /* We assume that the nodes in the array are in topological order. */
1740 for (u
= 0; u
< num_nodes
; u
++)
1742 ddg_node_ptr u_node
= &g
->nodes
[u
];
1745 for (e
= u_node
->in
; e
; e
= e
->next_in
)
1746 if (e
->distance
== 0)
1747 ASAP (u_node
) = MAX (ASAP (u_node
),
1748 ASAP (e
->src
) + e
->latency
);
1749 max_asap
= MAX (max_asap
, ASAP (u_node
));
1752 for (u
= num_nodes
- 1; u
> -1; u
--)
1754 ddg_node_ptr u_node
= &g
->nodes
[u
];
1756 ALAP (u_node
) = max_asap
;
1757 HEIGHT (u_node
) = 0;
1758 for (e
= u_node
->out
; e
; e
= e
->next_out
)
1759 if (e
->distance
== 0)
1761 ALAP (u_node
) = MIN (ALAP (u_node
),
1762 ALAP (e
->dest
) - e
->latency
);
1763 HEIGHT (u_node
) = MAX (HEIGHT (u_node
),
1764 HEIGHT (e
->dest
) + e
->latency
);
1768 return node_order_params_arr
;
1772 find_max_asap (ddg_ptr g
, sbitmap nodes
)
1777 sbitmap_iterator sbi
;
1779 EXECUTE_IF_SET_IN_SBITMAP (nodes
, 0, u
, sbi
)
1781 ddg_node_ptr u_node
= &g
->nodes
[u
];
1783 if (max_asap
< ASAP (u_node
))
1785 max_asap
= ASAP (u_node
);
1793 find_max_hv_min_mob (ddg_ptr g
, sbitmap nodes
)
1797 int min_mob
= INT_MAX
;
1799 sbitmap_iterator sbi
;
1801 EXECUTE_IF_SET_IN_SBITMAP (nodes
, 0, u
, sbi
)
1803 ddg_node_ptr u_node
= &g
->nodes
[u
];
1805 if (max_hv
< HEIGHT (u_node
))
1807 max_hv
= HEIGHT (u_node
);
1808 min_mob
= MOB (u_node
);
1811 else if ((max_hv
== HEIGHT (u_node
))
1812 && (min_mob
> MOB (u_node
)))
1814 min_mob
= MOB (u_node
);
1822 find_max_dv_min_mob (ddg_ptr g
, sbitmap nodes
)
1826 int min_mob
= INT_MAX
;
1828 sbitmap_iterator sbi
;
1830 EXECUTE_IF_SET_IN_SBITMAP (nodes
, 0, u
, sbi
)
1832 ddg_node_ptr u_node
= &g
->nodes
[u
];
1834 if (max_dv
< DEPTH (u_node
))
1836 max_dv
= DEPTH (u_node
);
1837 min_mob
= MOB (u_node
);
1840 else if ((max_dv
== DEPTH (u_node
))
1841 && (min_mob
> MOB (u_node
)))
1843 min_mob
= MOB (u_node
);
1850 /* Places the nodes of SCC into the NODE_ORDER array starting
1851 at position POS, according to the SMS ordering algorithm.
1852 NODES_ORDERED (in&out parameter) holds the bitset of all nodes in
1853 the NODE_ORDER array, starting from position zero. */
1855 order_nodes_in_scc (ddg_ptr g
, sbitmap nodes_ordered
, sbitmap scc
,
1856 int * node_order
, int pos
)
1858 enum sms_direction dir
;
1859 int num_nodes
= g
->num_nodes
;
1860 sbitmap workset
= sbitmap_alloc (num_nodes
);
1861 sbitmap tmp
= sbitmap_alloc (num_nodes
);
1862 sbitmap zero_bitmap
= sbitmap_alloc (num_nodes
);
1863 sbitmap predecessors
= sbitmap_alloc (num_nodes
);
1864 sbitmap successors
= sbitmap_alloc (num_nodes
);
1866 sbitmap_zero (predecessors
);
1867 find_predecessors (predecessors
, g
, nodes_ordered
);
1869 sbitmap_zero (successors
);
1870 find_successors (successors
, g
, nodes_ordered
);
1873 if (sbitmap_a_and_b_cg (tmp
, predecessors
, scc
))
1875 sbitmap_copy (workset
, tmp
);
1878 else if (sbitmap_a_and_b_cg (tmp
, successors
, scc
))
1880 sbitmap_copy (workset
, tmp
);
1887 sbitmap_zero (workset
);
1888 if ((u
= find_max_asap (g
, scc
)) >= 0)
1889 SET_BIT (workset
, u
);
1893 sbitmap_zero (zero_bitmap
);
1894 while (!sbitmap_equal (workset
, zero_bitmap
))
1897 ddg_node_ptr v_node
;
1898 sbitmap v_node_preds
;
1899 sbitmap v_node_succs
;
1903 while (!sbitmap_equal (workset
, zero_bitmap
))
1905 v
= find_max_hv_min_mob (g
, workset
);
1906 v_node
= &g
->nodes
[v
];
1907 node_order
[pos
++] = v
;
1908 v_node_succs
= NODE_SUCCESSORS (v_node
);
1909 sbitmap_a_and_b (tmp
, v_node_succs
, scc
);
1911 /* Don't consider the already ordered successors again. */
1912 sbitmap_difference (tmp
, tmp
, nodes_ordered
);
1913 sbitmap_a_or_b (workset
, workset
, tmp
);
1914 RESET_BIT (workset
, v
);
1915 SET_BIT (nodes_ordered
, v
);
1918 sbitmap_zero (predecessors
);
1919 find_predecessors (predecessors
, g
, nodes_ordered
);
1920 sbitmap_a_and_b (workset
, predecessors
, scc
);
1924 while (!sbitmap_equal (workset
, zero_bitmap
))
1926 v
= find_max_dv_min_mob (g
, workset
);
1927 v_node
= &g
->nodes
[v
];
1928 node_order
[pos
++] = v
;
1929 v_node_preds
= NODE_PREDECESSORS (v_node
);
1930 sbitmap_a_and_b (tmp
, v_node_preds
, scc
);
1932 /* Don't consider the already ordered predecessors again. */
1933 sbitmap_difference (tmp
, tmp
, nodes_ordered
);
1934 sbitmap_a_or_b (workset
, workset
, tmp
);
1935 RESET_BIT (workset
, v
);
1936 SET_BIT (nodes_ordered
, v
);
1939 sbitmap_zero (successors
);
1940 find_successors (successors
, g
, nodes_ordered
);
1941 sbitmap_a_and_b (workset
, successors
, scc
);
1945 sbitmap_free (workset
);
1946 sbitmap_free (zero_bitmap
);
1947 sbitmap_free (predecessors
);
1948 sbitmap_free (successors
);
1953 /* This page contains functions for manipulating partial-schedules during
1954 modulo scheduling. */
1956 /* Create a partial schedule and allocate a memory to hold II rows. */
1958 static partial_schedule_ptr
1959 create_partial_schedule (int ii
, ddg_ptr g
, int history
)
1961 partial_schedule_ptr ps
= XNEW (struct partial_schedule
);
1962 ps
->rows
= (ps_insn_ptr
*) xcalloc (ii
, sizeof (ps_insn_ptr
));
1964 ps
->history
= history
;
1965 ps
->min_cycle
= INT_MAX
;
1966 ps
->max_cycle
= INT_MIN
;
1972 /* Free the PS_INSNs in rows array of the given partial schedule.
1973 ??? Consider caching the PS_INSN's. */
1975 free_ps_insns (partial_schedule_ptr ps
)
1979 for (i
= 0; i
< ps
->ii
; i
++)
1983 ps_insn_ptr ps_insn
= ps
->rows
[i
]->next_in_row
;
1986 ps
->rows
[i
] = ps_insn
;
1992 /* Free all the memory allocated to the partial schedule. */
1995 free_partial_schedule (partial_schedule_ptr ps
)
2004 /* Clear the rows array with its PS_INSNs, and create a new one with
2008 reset_partial_schedule (partial_schedule_ptr ps
, int new_ii
)
2013 if (new_ii
== ps
->ii
)
2015 ps
->rows
= (ps_insn_ptr
*) xrealloc (ps
->rows
, new_ii
2016 * sizeof (ps_insn_ptr
));
2017 memset (ps
->rows
, 0, new_ii
* sizeof (ps_insn_ptr
));
2019 ps
->min_cycle
= INT_MAX
;
2020 ps
->max_cycle
= INT_MIN
;
2023 /* Prints the partial schedule as an ii rows array, for each rows
2024 print the ids of the insns in it. */
2026 print_partial_schedule (partial_schedule_ptr ps
, FILE *dump
)
2030 for (i
= 0; i
< ps
->ii
; i
++)
2032 ps_insn_ptr ps_i
= ps
->rows
[i
];
2034 fprintf (dump
, "\n[CYCLE %d ]: ", i
);
2037 fprintf (dump
, "%d, ",
2038 INSN_UID (ps_i
->node
->insn
));
2039 ps_i
= ps_i
->next_in_row
;
2044 /* Creates an object of PS_INSN and initializes it to the given parameters. */
2046 create_ps_insn (ddg_node_ptr node
, int rest_count
, int cycle
)
2048 ps_insn_ptr ps_i
= XNEW (struct ps_insn
);
2051 ps_i
->next_in_row
= NULL
;
2052 ps_i
->prev_in_row
= NULL
;
2053 ps_i
->row_rest_count
= rest_count
;
2054 ps_i
->cycle
= cycle
;
2060 /* Removes the given PS_INSN from the partial schedule. Returns false if the
2061 node is not found in the partial schedule, else returns true. */
2063 remove_node_from_ps (partial_schedule_ptr ps
, ps_insn_ptr ps_i
)
2070 row
= SMODULO (ps_i
->cycle
, ps
->ii
);
2071 if (! ps_i
->prev_in_row
)
2073 if (ps_i
!= ps
->rows
[row
])
2076 ps
->rows
[row
] = ps_i
->next_in_row
;
2078 ps
->rows
[row
]->prev_in_row
= NULL
;
2082 ps_i
->prev_in_row
->next_in_row
= ps_i
->next_in_row
;
2083 if (ps_i
->next_in_row
)
2084 ps_i
->next_in_row
->prev_in_row
= ps_i
->prev_in_row
;
2090 /* Unlike what literature describes for modulo scheduling (which focuses
2091 on VLIW machines) the order of the instructions inside a cycle is
2092 important. Given the bitmaps MUST_FOLLOW and MUST_PRECEDE we know
2093 where the current instruction should go relative to the already
2094 scheduled instructions in the given cycle. Go over these
2095 instructions and find the first possible column to put it in. */
2097 ps_insn_find_column (partial_schedule_ptr ps
, ps_insn_ptr ps_i
,
2098 sbitmap must_precede
, sbitmap must_follow
)
2100 ps_insn_ptr next_ps_i
;
2101 ps_insn_ptr first_must_follow
= NULL
;
2102 ps_insn_ptr last_must_precede
= NULL
;
2108 row
= SMODULO (ps_i
->cycle
, ps
->ii
);
2110 /* Find the first must follow and the last must precede
2111 and insert the node immediately after the must precede
2112 but make sure that it there is no must follow after it. */
2113 for (next_ps_i
= ps
->rows
[row
];
2115 next_ps_i
= next_ps_i
->next_in_row
)
2117 if (TEST_BIT (must_follow
, next_ps_i
->node
->cuid
)
2118 && ! first_must_follow
)
2119 first_must_follow
= next_ps_i
;
2120 if (TEST_BIT (must_precede
, next_ps_i
->node
->cuid
))
2122 /* If we have already met a node that must follow, then
2123 there is no possible column. */
2124 if (first_must_follow
)
2127 last_must_precede
= next_ps_i
;
2131 /* Now insert the node after INSERT_AFTER_PSI. */
2133 if (! last_must_precede
)
2135 ps_i
->next_in_row
= ps
->rows
[row
];
2136 ps_i
->prev_in_row
= NULL
;
2137 if (ps_i
->next_in_row
)
2138 ps_i
->next_in_row
->prev_in_row
= ps_i
;
2139 ps
->rows
[row
] = ps_i
;
2143 ps_i
->next_in_row
= last_must_precede
->next_in_row
;
2144 last_must_precede
->next_in_row
= ps_i
;
2145 ps_i
->prev_in_row
= last_must_precede
;
2146 if (ps_i
->next_in_row
)
2147 ps_i
->next_in_row
->prev_in_row
= ps_i
;
2153 /* Advances the PS_INSN one column in its current row; returns false
2154 in failure and true in success. Bit N is set in MUST_FOLLOW if
2155 the node with cuid N must be come after the node pointed to by
2156 PS_I when scheduled in the same cycle. */
2158 ps_insn_advance_column (partial_schedule_ptr ps
, ps_insn_ptr ps_i
,
2159 sbitmap must_follow
)
2161 ps_insn_ptr prev
, next
;
2163 ddg_node_ptr next_node
;
2168 row
= SMODULO (ps_i
->cycle
, ps
->ii
);
2170 if (! ps_i
->next_in_row
)
2173 next_node
= ps_i
->next_in_row
->node
;
2175 /* Check if next_in_row is dependent on ps_i, both having same sched
2176 times (typically ANTI_DEP). If so, ps_i cannot skip over it. */
2177 if (TEST_BIT (must_follow
, next_node
->cuid
))
2180 /* Advance PS_I over its next_in_row in the doubly linked list. */
2181 prev
= ps_i
->prev_in_row
;
2182 next
= ps_i
->next_in_row
;
2184 if (ps_i
== ps
->rows
[row
])
2185 ps
->rows
[row
] = next
;
2187 ps_i
->next_in_row
= next
->next_in_row
;
2189 if (next
->next_in_row
)
2190 next
->next_in_row
->prev_in_row
= ps_i
;
2192 next
->next_in_row
= ps_i
;
2193 ps_i
->prev_in_row
= next
;
2195 next
->prev_in_row
= prev
;
2197 prev
->next_in_row
= next
;
2202 /* Inserts a DDG_NODE to the given partial schedule at the given cycle.
2203 Returns 0 if this is not possible and a PS_INSN otherwise. Bit N is
2204 set in MUST_PRECEDE/MUST_FOLLOW if the node with cuid N must be come
2205 before/after (respectively) the node pointed to by PS_I when scheduled
2206 in the same cycle. */
2208 add_node_to_ps (partial_schedule_ptr ps
, ddg_node_ptr node
, int cycle
,
2209 sbitmap must_precede
, sbitmap must_follow
)
2213 int row
= SMODULO (cycle
, ps
->ii
);
2216 && ps
->rows
[row
]->row_rest_count
>= issue_rate
)
2220 rest_count
+= ps
->rows
[row
]->row_rest_count
;
2222 ps_i
= create_ps_insn (node
, rest_count
, cycle
);
2224 /* Finds and inserts PS_I according to MUST_FOLLOW and
2226 if (! ps_insn_find_column (ps
, ps_i
, must_precede
, must_follow
))
2235 /* Advance time one cycle. Assumes DFA is being used. */
2237 advance_one_cycle (void)
2239 if (targetm
.sched
.dfa_pre_cycle_insn
)
2240 state_transition (curr_state
,
2241 targetm
.sched
.dfa_pre_cycle_insn ());
2243 state_transition (curr_state
, NULL
);
2245 if (targetm
.sched
.dfa_post_cycle_insn
)
2246 state_transition (curr_state
,
2247 targetm
.sched
.dfa_post_cycle_insn ());
2250 /* Given the kernel of a loop (from FIRST_INSN to LAST_INSN), finds
2251 the number of cycles according to DFA that the kernel fits in,
2252 we use this to check if we done well with SMS after we add
2253 register moves. In some cases register moves overhead makes
2254 it even worse than the original loop. We want SMS to be performed
2255 when it gives less cycles after register moves are added. */
2257 kernel_number_of_cycles (rtx first_insn
, rtx last_insn
)
2261 int can_issue_more
= issue_rate
;
2263 state_reset (curr_state
);
2265 for (insn
= first_insn
;
2266 insn
!= NULL_RTX
&& insn
!= last_insn
;
2267 insn
= NEXT_INSN (insn
))
2269 if (! INSN_P (insn
) || GET_CODE (PATTERN (insn
)) == USE
)
2272 /* Check if there is room for the current insn. */
2273 if (!can_issue_more
|| state_dead_lock_p (curr_state
))
2276 advance_one_cycle ();
2277 can_issue_more
= issue_rate
;
2280 /* Update the DFA state and return with failure if the DFA found
2281 recource conflicts. */
2282 if (state_transition (curr_state
, insn
) >= 0)
2285 advance_one_cycle ();
2286 can_issue_more
= issue_rate
;
2289 if (targetm
.sched
.variable_issue
)
2291 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
2292 insn
, can_issue_more
);
2293 /* A naked CLOBBER or USE generates no instruction, so don't
2294 let them consume issue slots. */
2295 else if (GET_CODE (PATTERN (insn
)) != USE
2296 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
2302 /* Checks if PS has resource conflicts according to DFA, starting from
2303 FROM cycle to TO cycle; returns true if there are conflicts and false
2304 if there are no conflicts. Assumes DFA is being used. */
2306 ps_has_conflicts (partial_schedule_ptr ps
, int from
, int to
)
2310 state_reset (curr_state
);
2312 for (cycle
= from
; cycle
<= to
; cycle
++)
2314 ps_insn_ptr crr_insn
;
2315 /* Holds the remaining issue slots in the current row. */
2316 int can_issue_more
= issue_rate
;
2318 /* Walk through the DFA for the current row. */
2319 for (crr_insn
= ps
->rows
[SMODULO (cycle
, ps
->ii
)];
2321 crr_insn
= crr_insn
->next_in_row
)
2323 rtx insn
= crr_insn
->node
->insn
;
2328 /* Check if there is room for the current insn. */
2329 if (!can_issue_more
|| state_dead_lock_p (curr_state
))
2332 /* Update the DFA state and return with failure if the DFA found
2333 recource conflicts. */
2334 if (state_transition (curr_state
, insn
) >= 0)
2337 if (targetm
.sched
.variable_issue
)
2339 targetm
.sched
.variable_issue (sched_dump
, sched_verbose
,
2340 insn
, can_issue_more
);
2341 /* A naked CLOBBER or USE generates no instruction, so don't
2342 let them consume issue slots. */
2343 else if (GET_CODE (PATTERN (insn
)) != USE
2344 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
2348 /* Advance the DFA to the next cycle. */
2349 advance_one_cycle ();
2354 /* Checks if the given node causes resource conflicts when added to PS at
2355 cycle C. If not the node is added to PS and returned; otherwise zero
2356 is returned. Bit N is set in MUST_PRECEDE/MUST_FOLLOW if the node with
2357 cuid N must be come before/after (respectively) the node pointed to by
2358 PS_I when scheduled in the same cycle. */
2360 ps_add_node_check_conflicts (partial_schedule_ptr ps
, ddg_node_ptr n
,
2361 int c
, sbitmap must_precede
,
2362 sbitmap must_follow
)
2364 int has_conflicts
= 0;
2367 /* First add the node to the PS, if this succeeds check for
2368 conflicts, trying different issue slots in the same row. */
2369 if (! (ps_i
= add_node_to_ps (ps
, n
, c
, must_precede
, must_follow
)))
2370 return NULL
; /* Failed to insert the node at the given cycle. */
2372 has_conflicts
= ps_has_conflicts (ps
, c
, c
)
2374 && ps_has_conflicts (ps
,
2378 /* Try different issue slots to find one that the given node can be
2379 scheduled in without conflicts. */
2380 while (has_conflicts
)
2382 if (! ps_insn_advance_column (ps
, ps_i
, must_follow
))
2384 has_conflicts
= ps_has_conflicts (ps
, c
, c
)
2386 && ps_has_conflicts (ps
,
2393 remove_node_from_ps (ps
, ps_i
);
2397 ps
->min_cycle
= MIN (ps
->min_cycle
, c
);
2398 ps
->max_cycle
= MAX (ps
->max_cycle
, c
);
2402 /* Rotate the rows of PS such that insns scheduled at time
2403 START_CYCLE will appear in row 0. Updates max/min_cycles. */
2405 rotate_partial_schedule (partial_schedule_ptr ps
, int start_cycle
)
2407 int i
, row
, backward_rotates
;
2408 int last_row
= ps
->ii
- 1;
2410 if (start_cycle
== 0)
2413 backward_rotates
= SMODULO (start_cycle
, ps
->ii
);
2415 /* Revisit later and optimize this into a single loop. */
2416 for (i
= 0; i
< backward_rotates
; i
++)
2418 ps_insn_ptr first_row
= ps
->rows
[0];
2420 for (row
= 0; row
< last_row
; row
++)
2421 ps
->rows
[row
] = ps
->rows
[row
+1];
2423 ps
->rows
[last_row
] = first_row
;
2426 ps
->max_cycle
-= start_cycle
;
2427 ps
->min_cycle
-= start_cycle
;
2430 /* Remove the node N from the partial schedule PS; because we restart the DFA
2431 each time we want to check for resource conflicts; this is equivalent to
2432 unscheduling the node N. */
2434 ps_unschedule_node (partial_schedule_ptr ps
, ddg_node_ptr n
)
2437 int row
= SMODULO (SCHED_TIME (n
), ps
->ii
);
2439 if (row
< 0 || row
> ps
->ii
)
2442 for (ps_i
= ps
->rows
[row
];
2443 ps_i
&& ps_i
->node
!= n
;
2444 ps_i
= ps_i
->next_in_row
);
2448 return remove_node_from_ps (ps
, ps_i
);
2450 #endif /* INSN_SCHEDULING */
2453 gate_handle_sms (void)
2455 return (optimize
> 0 && flag_modulo_sched
);
2459 /* Run instruction scheduler. */
2460 /* Perform SMS module scheduling. */
2462 rest_of_handle_sms (void)
2464 #ifdef INSN_SCHEDULING
2467 /* We want to be able to create new pseudos. */
2469 /* Collect loop information to be used in SMS. */
2470 cfg_layout_initialize (CLEANUP_UPDATE_LIFE
);
2473 /* Update the life information, because we add pseudos. */
2474 max_regno
= max_reg_num ();
2475 allocate_reg_info (max_regno
, FALSE
, FALSE
);
2476 update_life_info (NULL
, UPDATE_LIFE_GLOBAL_RM_NOTES
,
2479 | PROP_KILL_DEAD_CODE
2480 | PROP_SCAN_DEAD_CODE
));
2484 /* Finalize layout changes. */
2486 if (bb
->next_bb
!= EXIT_BLOCK_PTR
)
2487 bb
->aux
= bb
->next_bb
;
2488 cfg_layout_finalize ();
2489 free_dominance_info (CDI_DOMINATORS
);
2490 #endif /* INSN_SCHEDULING */
2494 struct tree_opt_pass pass_sms
=
2497 gate_handle_sms
, /* gate */
2498 rest_of_handle_sms
, /* execute */
2501 0, /* static_pass_number */
2503 0, /* properties_required */
2504 0, /* properties_provided */
2505 0, /* properties_destroyed */
2506 TODO_dump_func
, /* todo_flags_start */
2508 TODO_ggc_collect
, /* todo_flags_finish */