2012-07-09 Tom de Vries <tom@codesourcery.com>
[official-gcc.git] / gcc / modulo-sched.c
blob953b78d763e4eddc2ca982ee5308482c3ab20788
1 /* Swing Modulo Scheduling implementation.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "hard-reg-set.h"
31 #include "regs.h"
32 #include "function.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "insn-attr.h"
36 #include "except.h"
37 #include "recog.h"
38 #include "sched-int.h"
39 #include "target.h"
40 #include "cfgloop.h"
41 #include "expr.h"
42 #include "params.h"
43 #include "gcov-io.h"
44 #include "ddg.h"
45 #include "timevar.h"
46 #include "tree-pass.h"
47 #include "dbgcnt.h"
48 #include "df.h"
50 #ifdef INSN_SCHEDULING
52 /* This file contains the implementation of the Swing Modulo Scheduler,
53 described in the following references:
54 [1] J. Llosa, A. Gonzalez, E. Ayguade, M. Valero., and J. Eckhardt.
55 Lifetime--sensitive modulo scheduling in a production environment.
56 IEEE Trans. on Comps., 50(3), March 2001
57 [2] J. Llosa, A. Gonzalez, E. Ayguade, and M. Valero.
58 Swing Modulo Scheduling: A Lifetime Sensitive Approach.
59 PACT '96 , pages 80-87, October 1996 (Boston - Massachusetts - USA).
61 The basic structure is:
62 1. Build a data-dependence graph (DDG) for each loop.
63 2. Use the DDG to order the insns of a loop (not in topological order
64 necessarily, but rather) trying to place each insn after all its
65 predecessors _or_ after all its successors.
66 3. Compute MII: a lower bound on the number of cycles to schedule the loop.
67 4. Use the ordering to perform list-scheduling of the loop:
68 1. Set II = MII. We will try to schedule the loop within II cycles.
69 2. Try to schedule the insns one by one according to the ordering.
70 For each insn compute an interval of cycles by considering already-
71 scheduled preds and succs (and associated latencies); try to place
72 the insn in the cycles of this window checking for potential
73 resource conflicts (using the DFA interface).
74 Note: this is different from the cycle-scheduling of schedule_insns;
75 here the insns are not scheduled monotonically top-down (nor bottom-
76 up).
77 3. If failed in scheduling all insns - bump II++ and try again, unless
78 II reaches an upper bound MaxII, in which case report failure.
79 5. If we succeeded in scheduling the loop within II cycles, we now
80 generate prolog and epilog, decrease the counter of the loop, and
81 perform modulo variable expansion for live ranges that span more than
82 II cycles (i.e. use register copies to prevent a def from overwriting
83 itself before reaching the use).
85 SMS works with countable loops (1) whose control part can be easily
86 decoupled from the rest of the loop and (2) whose loop count can
87 be easily adjusted. This is because we peel a constant number of
88 iterations into a prologue and epilogue for which we want to avoid
89 emitting the control part, and a kernel which is to iterate that
90 constant number of iterations less than the original loop. So the
91 control part should be a set of insns clearly identified and having
92 its own iv, not otherwise used in the loop (at-least for now), which
93 initializes a register before the loop to the number of iterations.
94 Currently SMS relies on the do-loop pattern to recognize such loops,
95 where (1) the control part comprises of all insns defining and/or
96 using a certain 'count' register and (2) the loop count can be
97 adjusted by modifying this register prior to the loop.
98 TODO: Rely on cfgloop analysis instead. */
100 /* This page defines partial-schedule structures and functions for
101 modulo scheduling. */
103 typedef struct partial_schedule *partial_schedule_ptr;
104 typedef struct ps_insn *ps_insn_ptr;
106 /* The minimum (absolute) cycle that a node of ps was scheduled in. */
107 #define PS_MIN_CYCLE(ps) (((partial_schedule_ptr)(ps))->min_cycle)
109 /* The maximum (absolute) cycle that a node of ps was scheduled in. */
110 #define PS_MAX_CYCLE(ps) (((partial_schedule_ptr)(ps))->max_cycle)
112 /* Perform signed modulo, always returning a non-negative value. */
113 #define SMODULO(x,y) ((x) % (y) < 0 ? ((x) % (y) + (y)) : (x) % (y))
115 /* The number of different iterations the nodes in ps span, assuming
116 the stage boundaries are placed efficiently. */
117 #define CALC_STAGE_COUNT(max_cycle,min_cycle,ii) ((max_cycle - min_cycle \
118 + 1 + ii - 1) / ii)
119 /* The stage count of ps. */
120 #define PS_STAGE_COUNT(ps) (((partial_schedule_ptr)(ps))->stage_count)
122 /* A single instruction in the partial schedule. */
123 struct ps_insn
125 /* Identifies the instruction to be scheduled. Values smaller than
126 the ddg's num_nodes refer directly to ddg nodes. A value of
127 X - num_nodes refers to register move X. */
128 int id;
130 /* The (absolute) cycle in which the PS instruction is scheduled.
131 Same as SCHED_TIME (node). */
132 int cycle;
134 /* The next/prev PS_INSN in the same row. */
135 ps_insn_ptr next_in_row,
136 prev_in_row;
140 /* Information about a register move that has been added to a partial
141 schedule. */
142 struct ps_reg_move_info
144 /* The source of the move is defined by the ps_insn with id DEF.
145 The destination is used by the ps_insns with the ids in USES. */
146 int def;
147 sbitmap uses;
149 /* The original form of USES' instructions used OLD_REG, but they
150 should now use NEW_REG. */
151 rtx old_reg;
152 rtx new_reg;
154 /* The number of consecutive stages that the move occupies. */
155 int num_consecutive_stages;
157 /* An instruction that sets NEW_REG to the correct value. The first
158 move associated with DEF will have an rhs of OLD_REG; later moves
159 use the result of the previous move. */
160 rtx insn;
163 typedef struct ps_reg_move_info ps_reg_move_info;
164 DEF_VEC_O (ps_reg_move_info);
165 DEF_VEC_ALLOC_O (ps_reg_move_info, heap);
167 /* Holds the partial schedule as an array of II rows. Each entry of the
168 array points to a linked list of PS_INSNs, which represents the
169 instructions that are scheduled for that row. */
170 struct partial_schedule
172 int ii; /* Number of rows in the partial schedule. */
173 int history; /* Threshold for conflict checking using DFA. */
175 /* rows[i] points to linked list of insns scheduled in row i (0<=i<ii). */
176 ps_insn_ptr *rows;
178 /* All the moves added for this partial schedule. Index X has
179 a ps_insn id of X + g->num_nodes. */
180 VEC (ps_reg_move_info, heap) *reg_moves;
182 /* rows_length[i] holds the number of instructions in the row.
183 It is used only (as an optimization) to back off quickly from
184 trying to schedule a node in a full row; that is, to avoid running
185 through futile DFA state transitions. */
186 int *rows_length;
188 /* The earliest absolute cycle of an insn in the partial schedule. */
189 int min_cycle;
191 /* The latest absolute cycle of an insn in the partial schedule. */
192 int max_cycle;
194 ddg_ptr g; /* The DDG of the insns in the partial schedule. */
196 int stage_count; /* The stage count of the partial schedule. */
200 static partial_schedule_ptr create_partial_schedule (int ii, ddg_ptr, int history);
201 static void free_partial_schedule (partial_schedule_ptr);
202 static void reset_partial_schedule (partial_schedule_ptr, int new_ii);
203 void print_partial_schedule (partial_schedule_ptr, FILE *);
204 static void verify_partial_schedule (partial_schedule_ptr, sbitmap);
205 static ps_insn_ptr ps_add_node_check_conflicts (partial_schedule_ptr,
206 int, int, sbitmap, sbitmap);
207 static void rotate_partial_schedule (partial_schedule_ptr, int);
208 void set_row_column_for_ps (partial_schedule_ptr);
209 static void ps_insert_empty_row (partial_schedule_ptr, int, sbitmap);
210 static int compute_split_row (sbitmap, int, int, int, ddg_node_ptr);
213 /* This page defines constants and structures for the modulo scheduling
214 driver. */
216 static int sms_order_nodes (ddg_ptr, int, int *, int *);
217 static void set_node_sched_params (ddg_ptr);
218 static partial_schedule_ptr sms_schedule_by_order (ddg_ptr, int, int, int *);
219 static void permute_partial_schedule (partial_schedule_ptr, rtx);
220 static void generate_prolog_epilog (partial_schedule_ptr, struct loop *,
221 rtx, rtx);
222 static int calculate_stage_count (partial_schedule_ptr, int);
223 static void calculate_must_precede_follow (ddg_node_ptr, int, int,
224 int, int, sbitmap, sbitmap, sbitmap);
225 static int get_sched_window (partial_schedule_ptr, ddg_node_ptr,
226 sbitmap, int, int *, int *, int *);
227 static bool try_scheduling_node_in_cycle (partial_schedule_ptr, int, int,
228 sbitmap, int *, sbitmap, sbitmap);
229 static void remove_node_from_ps (partial_schedule_ptr, ps_insn_ptr);
231 #define NODE_ASAP(node) ((node)->aux.count)
233 #define SCHED_PARAMS(x) VEC_index (node_sched_params, node_sched_param_vec, x)
234 #define SCHED_TIME(x) (SCHED_PARAMS (x)->time)
235 #define SCHED_ROW(x) (SCHED_PARAMS (x)->row)
236 #define SCHED_STAGE(x) (SCHED_PARAMS (x)->stage)
237 #define SCHED_COLUMN(x) (SCHED_PARAMS (x)->column)
239 /* The scheduling parameters held for each node. */
240 typedef struct node_sched_params
242 int time; /* The absolute scheduling cycle. */
244 int row; /* Holds time % ii. */
245 int stage; /* Holds time / ii. */
247 /* The column of a node inside the ps. If nodes u, v are on the same row,
248 u will precede v if column (u) < column (v). */
249 int column;
250 } *node_sched_params_ptr;
252 typedef struct node_sched_params node_sched_params;
253 DEF_VEC_O (node_sched_params);
254 DEF_VEC_ALLOC_O (node_sched_params, heap);
256 /* The following three functions are copied from the current scheduler
257 code in order to use sched_analyze() for computing the dependencies.
258 They are used when initializing the sched_info structure. */
259 static const char *
260 sms_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
262 static char tmp[80];
264 sprintf (tmp, "i%4d", INSN_UID (insn));
265 return tmp;
268 static void
269 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
270 regset used ATTRIBUTE_UNUSED)
274 static struct common_sched_info_def sms_common_sched_info;
276 static struct sched_deps_info_def sms_sched_deps_info =
278 compute_jump_reg_dependencies,
279 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
280 NULL,
281 0, 0, 0
284 static struct haifa_sched_info sms_sched_info =
286 NULL,
287 NULL,
288 NULL,
289 NULL,
290 NULL,
291 sms_print_insn,
292 NULL,
293 NULL, /* insn_finishes_block_p */
294 NULL, NULL,
295 NULL, NULL,
296 0, 0,
298 NULL, NULL, NULL, NULL,
299 NULL, NULL,
303 /* Partial schedule instruction ID in PS is a register move. Return
304 information about it. */
305 static struct ps_reg_move_info *
306 ps_reg_move (partial_schedule_ptr ps, int id)
308 gcc_checking_assert (id >= ps->g->num_nodes);
309 return VEC_index (ps_reg_move_info, ps->reg_moves, id - ps->g->num_nodes);
312 /* Return the rtl instruction that is being scheduled by partial schedule
313 instruction ID, which belongs to schedule PS. */
314 static rtx
315 ps_rtl_insn (partial_schedule_ptr ps, int id)
317 if (id < ps->g->num_nodes)
318 return ps->g->nodes[id].insn;
319 else
320 return ps_reg_move (ps, id)->insn;
323 /* Partial schedule instruction ID, which belongs to PS, occurred in
324 the original (unscheduled) loop. Return the first instruction
325 in the loop that was associated with ps_rtl_insn (PS, ID).
326 If the instruction had some notes before it, this is the first
327 of those notes. */
328 static rtx
329 ps_first_note (partial_schedule_ptr ps, int id)
331 gcc_assert (id < ps->g->num_nodes);
332 return ps->g->nodes[id].first_note;
335 /* Return the number of consecutive stages that are occupied by
336 partial schedule instruction ID in PS. */
337 static int
338 ps_num_consecutive_stages (partial_schedule_ptr ps, int id)
340 if (id < ps->g->num_nodes)
341 return 1;
342 else
343 return ps_reg_move (ps, id)->num_consecutive_stages;
346 /* Given HEAD and TAIL which are the first and last insns in a loop;
347 return the register which controls the loop. Return zero if it has
348 more than one occurrence in the loop besides the control part or the
349 do-loop pattern is not of the form we expect. */
350 static rtx
351 doloop_register_get (rtx head ATTRIBUTE_UNUSED, rtx tail ATTRIBUTE_UNUSED)
353 #ifdef HAVE_doloop_end
354 rtx reg, condition, insn, first_insn_not_to_check;
356 if (!JUMP_P (tail))
357 return NULL_RTX;
359 /* TODO: Free SMS's dependence on doloop_condition_get. */
360 condition = doloop_condition_get (tail);
361 if (! condition)
362 return NULL_RTX;
364 if (REG_P (XEXP (condition, 0)))
365 reg = XEXP (condition, 0);
366 else if (GET_CODE (XEXP (condition, 0)) == PLUS
367 && REG_P (XEXP (XEXP (condition, 0), 0)))
368 reg = XEXP (XEXP (condition, 0), 0);
369 else
370 gcc_unreachable ();
372 /* Check that the COUNT_REG has no other occurrences in the loop
373 until the decrement. We assume the control part consists of
374 either a single (parallel) branch-on-count or a (non-parallel)
375 branch immediately preceded by a single (decrement) insn. */
376 first_insn_not_to_check = (GET_CODE (PATTERN (tail)) == PARALLEL ? tail
377 : prev_nondebug_insn (tail));
379 for (insn = head; insn != first_insn_not_to_check; insn = NEXT_INSN (insn))
380 if (!DEBUG_INSN_P (insn) && reg_mentioned_p (reg, insn))
382 if (dump_file)
384 fprintf (dump_file, "SMS count_reg found ");
385 print_rtl_single (dump_file, reg);
386 fprintf (dump_file, " outside control in insn:\n");
387 print_rtl_single (dump_file, insn);
390 return NULL_RTX;
393 return reg;
394 #else
395 return NULL_RTX;
396 #endif
399 /* Check if COUNT_REG is set to a constant in the PRE_HEADER block, so
400 that the number of iterations is a compile-time constant. If so,
401 return the rtx that sets COUNT_REG to a constant, and set COUNT to
402 this constant. Otherwise return 0. */
403 static rtx
404 const_iteration_count (rtx count_reg, basic_block pre_header,
405 HOST_WIDEST_INT * count)
407 rtx insn;
408 rtx head, tail;
410 if (! pre_header)
411 return NULL_RTX;
413 get_ebb_head_tail (pre_header, pre_header, &head, &tail);
415 for (insn = tail; insn != PREV_INSN (head); insn = PREV_INSN (insn))
416 if (NONDEBUG_INSN_P (insn) && single_set (insn) &&
417 rtx_equal_p (count_reg, SET_DEST (single_set (insn))))
419 rtx pat = single_set (insn);
421 if (CONST_INT_P (SET_SRC (pat)))
423 *count = INTVAL (SET_SRC (pat));
424 return insn;
427 return NULL_RTX;
430 return NULL_RTX;
433 /* A very simple resource-based lower bound on the initiation interval.
434 ??? Improve the accuracy of this bound by considering the
435 utilization of various units. */
436 static int
437 res_MII (ddg_ptr g)
439 if (targetm.sched.sms_res_mii)
440 return targetm.sched.sms_res_mii (g);
442 return ((g->num_nodes - g->num_debug) / issue_rate);
446 /* A vector that contains the sched data for each ps_insn. */
447 static VEC (node_sched_params, heap) *node_sched_param_vec;
449 /* Allocate sched_params for each node and initialize it. */
450 static void
451 set_node_sched_params (ddg_ptr g)
453 VEC_truncate (node_sched_params, node_sched_param_vec, 0);
454 VEC_safe_grow_cleared (node_sched_params, heap,
455 node_sched_param_vec, g->num_nodes);
458 /* Make sure that node_sched_param_vec has an entry for every move in PS. */
459 static void
460 extend_node_sched_params (partial_schedule_ptr ps)
462 VEC_safe_grow_cleared (node_sched_params, heap, node_sched_param_vec,
463 ps->g->num_nodes + VEC_length (ps_reg_move_info,
464 ps->reg_moves));
467 /* Update the sched_params (time, row and stage) for node U using the II,
468 the CYCLE of U and MIN_CYCLE.
469 We're not simply taking the following
470 SCHED_STAGE (u) = CALC_STAGE_COUNT (SCHED_TIME (u), min_cycle, ii);
471 because the stages may not be aligned on cycle 0. */
472 static void
473 update_node_sched_params (int u, int ii, int cycle, int min_cycle)
475 int sc_until_cycle_zero;
476 int stage;
478 SCHED_TIME (u) = cycle;
479 SCHED_ROW (u) = SMODULO (cycle, ii);
481 /* The calculation of stage count is done adding the number
482 of stages before cycle zero and after cycle zero. */
483 sc_until_cycle_zero = CALC_STAGE_COUNT (-1, min_cycle, ii);
485 if (SCHED_TIME (u) < 0)
487 stage = CALC_STAGE_COUNT (-1, SCHED_TIME (u), ii);
488 SCHED_STAGE (u) = sc_until_cycle_zero - stage;
490 else
492 stage = CALC_STAGE_COUNT (SCHED_TIME (u), 0, ii);
493 SCHED_STAGE (u) = sc_until_cycle_zero + stage - 1;
497 static void
498 print_node_sched_params (FILE *file, int num_nodes, partial_schedule_ptr ps)
500 int i;
502 if (! file)
503 return;
504 for (i = 0; i < num_nodes; i++)
506 node_sched_params_ptr nsp = SCHED_PARAMS (i);
508 fprintf (file, "Node = %d; INSN = %d\n", i,
509 INSN_UID (ps_rtl_insn (ps, i)));
510 fprintf (file, " asap = %d:\n", NODE_ASAP (&ps->g->nodes[i]));
511 fprintf (file, " time = %d:\n", nsp->time);
512 fprintf (file, " stage = %d:\n", nsp->stage);
516 /* Set SCHED_COLUMN for each instruction in row ROW of PS. */
517 static void
518 set_columns_for_row (partial_schedule_ptr ps, int row)
520 ps_insn_ptr cur_insn;
521 int column;
523 column = 0;
524 for (cur_insn = ps->rows[row]; cur_insn; cur_insn = cur_insn->next_in_row)
525 SCHED_COLUMN (cur_insn->id) = column++;
528 /* Set SCHED_COLUMN for each instruction in PS. */
529 static void
530 set_columns_for_ps (partial_schedule_ptr ps)
532 int row;
534 for (row = 0; row < ps->ii; row++)
535 set_columns_for_row (ps, row);
538 /* Try to schedule the move with ps_insn identifier I_REG_MOVE in PS.
539 Its single predecessor has already been scheduled, as has its
540 ddg node successors. (The move may have also another move as its
541 successor, in which case that successor will be scheduled later.)
543 The move is part of a chain that satisfies register dependencies
544 between a producing ddg node and various consuming ddg nodes.
545 If some of these dependencies have a distance of 1 (meaning that
546 the use is upward-exposed) then DISTANCE1_USES is nonnull and
547 contains the set of uses with distance-1 dependencies.
548 DISTANCE1_USES is null otherwise.
550 MUST_FOLLOW is a scratch bitmap that is big enough to hold
551 all current ps_insn ids.
553 Return true on success. */
554 static bool
555 schedule_reg_move (partial_schedule_ptr ps, int i_reg_move,
556 sbitmap distance1_uses, sbitmap must_follow)
558 unsigned int u;
559 int this_time, this_distance, this_start, this_end, this_latency;
560 int start, end, c, ii;
561 sbitmap_iterator sbi;
562 ps_reg_move_info *move;
563 rtx this_insn;
564 ps_insn_ptr psi;
566 move = ps_reg_move (ps, i_reg_move);
567 ii = ps->ii;
568 if (dump_file)
570 fprintf (dump_file, "Scheduling register move INSN %d; ii = %d"
571 ", min cycle = %d\n\n", INSN_UID (move->insn), ii,
572 PS_MIN_CYCLE (ps));
573 print_rtl_single (dump_file, move->insn);
574 fprintf (dump_file, "\n%11s %11s %5s\n", "start", "end", "time");
575 fprintf (dump_file, "=========== =========== =====\n");
578 start = INT_MIN;
579 end = INT_MAX;
581 /* For dependencies of distance 1 between a producer ddg node A
582 and consumer ddg node B, we have a chain of dependencies:
584 A --(T,L1,1)--> M1 --(T,L2,0)--> M2 ... --(T,Ln,0)--> B
586 where Mi is the ith move. For dependencies of distance 0 between
587 a producer ddg node A and consumer ddg node C, we have a chain of
588 dependencies:
590 A --(T,L1',0)--> M1' --(T,L2',0)--> M2' ... --(T,Ln',0)--> C
592 where Mi' occupies the same position as Mi but occurs a stage later.
593 We can only schedule each move once, so if we have both types of
594 chain, we model the second as:
596 A --(T,L1',1)--> M1 --(T,L2',0)--> M2 ... --(T,Ln',-1)--> C
598 First handle the dependencies between the previously-scheduled
599 predecessor and the move. */
600 this_insn = ps_rtl_insn (ps, move->def);
601 this_latency = insn_latency (this_insn, move->insn);
602 this_distance = distance1_uses && move->def < ps->g->num_nodes ? 1 : 0;
603 this_time = SCHED_TIME (move->def) - this_distance * ii;
604 this_start = this_time + this_latency;
605 this_end = this_time + ii;
606 if (dump_file)
607 fprintf (dump_file, "%11d %11d %5d %d --(T,%d,%d)--> %d\n",
608 this_start, this_end, SCHED_TIME (move->def),
609 INSN_UID (this_insn), this_latency, this_distance,
610 INSN_UID (move->insn));
612 if (start < this_start)
613 start = this_start;
614 if (end > this_end)
615 end = this_end;
617 /* Handle the dependencies between the move and previously-scheduled
618 successors. */
619 EXECUTE_IF_SET_IN_SBITMAP (move->uses, 0, u, sbi)
621 this_insn = ps_rtl_insn (ps, u);
622 this_latency = insn_latency (move->insn, this_insn);
623 if (distance1_uses && !TEST_BIT (distance1_uses, u))
624 this_distance = -1;
625 else
626 this_distance = 0;
627 this_time = SCHED_TIME (u) + this_distance * ii;
628 this_start = this_time - ii;
629 this_end = this_time - this_latency;
630 if (dump_file)
631 fprintf (dump_file, "%11d %11d %5d %d --(T,%d,%d)--> %d\n",
632 this_start, this_end, SCHED_TIME (u), INSN_UID (move->insn),
633 this_latency, this_distance, INSN_UID (this_insn));
635 if (start < this_start)
636 start = this_start;
637 if (end > this_end)
638 end = this_end;
641 if (dump_file)
643 fprintf (dump_file, "----------- ----------- -----\n");
644 fprintf (dump_file, "%11d %11d %5s %s\n", start, end, "", "(max, min)");
647 sbitmap_zero (must_follow);
648 SET_BIT (must_follow, move->def);
650 start = MAX (start, end - (ii - 1));
651 for (c = end; c >= start; c--)
653 psi = ps_add_node_check_conflicts (ps, i_reg_move, c,
654 move->uses, must_follow);
655 if (psi)
657 update_node_sched_params (i_reg_move, ii, c, PS_MIN_CYCLE (ps));
658 if (dump_file)
659 fprintf (dump_file, "\nScheduled register move INSN %d at"
660 " time %d, row %d\n\n", INSN_UID (move->insn), c,
661 SCHED_ROW (i_reg_move));
662 return true;
666 if (dump_file)
667 fprintf (dump_file, "\nNo available slot\n\n");
669 return false;
673 Breaking intra-loop register anti-dependences:
674 Each intra-loop register anti-dependence implies a cross-iteration true
675 dependence of distance 1. Therefore, we can remove such false dependencies
676 and figure out if the partial schedule broke them by checking if (for a
677 true-dependence of distance 1): SCHED_TIME (def) < SCHED_TIME (use) and
678 if so generate a register move. The number of such moves is equal to:
679 SCHED_TIME (use) - SCHED_TIME (def) { 0 broken
680 nreg_moves = ----------------------------------- + 1 - { dependence.
681 ii { 1 if not.
683 static bool
684 schedule_reg_moves (partial_schedule_ptr ps)
686 ddg_ptr g = ps->g;
687 int ii = ps->ii;
688 int i;
690 for (i = 0; i < g->num_nodes; i++)
692 ddg_node_ptr u = &g->nodes[i];
693 ddg_edge_ptr e;
694 int nreg_moves = 0, i_reg_move;
695 rtx prev_reg, old_reg;
696 int first_move;
697 int distances[2];
698 sbitmap must_follow;
699 sbitmap distance1_uses;
700 rtx set = single_set (u->insn);
702 /* Skip instructions that do not set a register. */
703 if ((set && !REG_P (SET_DEST (set))))
704 continue;
706 /* Compute the number of reg_moves needed for u, by looking at life
707 ranges started at u (excluding self-loops). */
708 distances[0] = distances[1] = false;
709 for (e = u->out; e; e = e->next_out)
710 if (e->type == TRUE_DEP && e->dest != e->src)
712 int nreg_moves4e = (SCHED_TIME (e->dest->cuid)
713 - SCHED_TIME (e->src->cuid)) / ii;
715 if (e->distance == 1)
716 nreg_moves4e = (SCHED_TIME (e->dest->cuid)
717 - SCHED_TIME (e->src->cuid) + ii) / ii;
719 /* If dest precedes src in the schedule of the kernel, then dest
720 will read before src writes and we can save one reg_copy. */
721 if (SCHED_ROW (e->dest->cuid) == SCHED_ROW (e->src->cuid)
722 && SCHED_COLUMN (e->dest->cuid) < SCHED_COLUMN (e->src->cuid))
723 nreg_moves4e--;
725 if (nreg_moves4e >= 1)
727 /* !single_set instructions are not supported yet and
728 thus we do not except to encounter them in the loop
729 except from the doloop part. For the latter case
730 we assume no regmoves are generated as the doloop
731 instructions are tied to the branch with an edge. */
732 gcc_assert (set);
733 /* If the instruction contains auto-inc register then
734 validate that the regmov is being generated for the
735 target regsiter rather then the inc'ed register. */
736 gcc_assert (!autoinc_var_is_used_p (u->insn, e->dest->insn));
739 if (nreg_moves4e)
741 gcc_assert (e->distance < 2);
742 distances[e->distance] = true;
744 nreg_moves = MAX (nreg_moves, nreg_moves4e);
747 if (nreg_moves == 0)
748 continue;
750 /* Create NREG_MOVES register moves. */
751 first_move = VEC_length (ps_reg_move_info, ps->reg_moves);
752 VEC_safe_grow_cleared (ps_reg_move_info, heap, ps->reg_moves,
753 first_move + nreg_moves);
754 extend_node_sched_params (ps);
756 /* Record the moves associated with this node. */
757 first_move += ps->g->num_nodes;
759 /* Generate each move. */
760 old_reg = prev_reg = SET_DEST (single_set (u->insn));
761 for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++)
763 ps_reg_move_info *move = ps_reg_move (ps, first_move + i_reg_move);
765 move->def = i_reg_move > 0 ? first_move + i_reg_move - 1 : i;
766 move->uses = sbitmap_alloc (first_move + nreg_moves);
767 move->old_reg = old_reg;
768 move->new_reg = gen_reg_rtx (GET_MODE (prev_reg));
769 move->num_consecutive_stages = distances[0] && distances[1] ? 2 : 1;
770 move->insn = gen_move_insn (move->new_reg, copy_rtx (prev_reg));
771 sbitmap_zero (move->uses);
773 prev_reg = move->new_reg;
776 distance1_uses = distances[1] ? sbitmap_alloc (g->num_nodes) : NULL;
778 /* Every use of the register defined by node may require a different
779 copy of this register, depending on the time the use is scheduled.
780 Record which uses require which move results. */
781 for (e = u->out; e; e = e->next_out)
782 if (e->type == TRUE_DEP && e->dest != e->src)
784 int dest_copy = (SCHED_TIME (e->dest->cuid)
785 - SCHED_TIME (e->src->cuid)) / ii;
787 if (e->distance == 1)
788 dest_copy = (SCHED_TIME (e->dest->cuid)
789 - SCHED_TIME (e->src->cuid) + ii) / ii;
791 if (SCHED_ROW (e->dest->cuid) == SCHED_ROW (e->src->cuid)
792 && SCHED_COLUMN (e->dest->cuid) < SCHED_COLUMN (e->src->cuid))
793 dest_copy--;
795 if (dest_copy)
797 ps_reg_move_info *move;
799 move = ps_reg_move (ps, first_move + dest_copy - 1);
800 SET_BIT (move->uses, e->dest->cuid);
801 if (e->distance == 1)
802 SET_BIT (distance1_uses, e->dest->cuid);
806 must_follow = sbitmap_alloc (first_move + nreg_moves);
807 for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++)
808 if (!schedule_reg_move (ps, first_move + i_reg_move,
809 distance1_uses, must_follow))
810 break;
811 sbitmap_free (must_follow);
812 if (distance1_uses)
813 sbitmap_free (distance1_uses);
814 if (i_reg_move < nreg_moves)
815 return false;
817 return true;
820 /* Emit the moves associatied with PS. Apply the substitutions
821 associated with them. */
822 static void
823 apply_reg_moves (partial_schedule_ptr ps)
825 ps_reg_move_info *move;
826 int i;
828 FOR_EACH_VEC_ELT (ps_reg_move_info, ps->reg_moves, i, move)
830 unsigned int i_use;
831 sbitmap_iterator sbi;
833 EXECUTE_IF_SET_IN_SBITMAP (move->uses, 0, i_use, sbi)
835 replace_rtx (ps->g->nodes[i_use].insn, move->old_reg, move->new_reg);
836 df_insn_rescan (ps->g->nodes[i_use].insn);
841 /* Bump the SCHED_TIMEs of all nodes by AMOUNT. Set the values of
842 SCHED_ROW and SCHED_STAGE. Instruction scheduled on cycle AMOUNT
843 will move to cycle zero. */
844 static void
845 reset_sched_times (partial_schedule_ptr ps, int amount)
847 int row;
848 int ii = ps->ii;
849 ps_insn_ptr crr_insn;
851 for (row = 0; row < ii; row++)
852 for (crr_insn = ps->rows[row]; crr_insn; crr_insn = crr_insn->next_in_row)
854 int u = crr_insn->id;
855 int normalized_time = SCHED_TIME (u) - amount;
856 int new_min_cycle = PS_MIN_CYCLE (ps) - amount;
858 if (dump_file)
860 /* Print the scheduling times after the rotation. */
861 rtx insn = ps_rtl_insn (ps, u);
863 fprintf (dump_file, "crr_insn->node=%d (insn id %d), "
864 "crr_insn->cycle=%d, min_cycle=%d", u,
865 INSN_UID (insn), normalized_time, new_min_cycle);
866 if (JUMP_P (insn))
867 fprintf (dump_file, " (branch)");
868 fprintf (dump_file, "\n");
871 gcc_assert (SCHED_TIME (u) >= ps->min_cycle);
872 gcc_assert (SCHED_TIME (u) <= ps->max_cycle);
874 crr_insn->cycle = normalized_time;
875 update_node_sched_params (u, ii, normalized_time, new_min_cycle);
879 /* Permute the insns according to their order in PS, from row 0 to
880 row ii-1, and position them right before LAST. This schedules
881 the insns of the loop kernel. */
882 static void
883 permute_partial_schedule (partial_schedule_ptr ps, rtx last)
885 int ii = ps->ii;
886 int row;
887 ps_insn_ptr ps_ij;
889 for (row = 0; row < ii ; row++)
890 for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row)
892 rtx insn = ps_rtl_insn (ps, ps_ij->id);
894 if (PREV_INSN (last) != insn)
896 if (ps_ij->id < ps->g->num_nodes)
897 reorder_insns_nobb (ps_first_note (ps, ps_ij->id), insn,
898 PREV_INSN (last));
899 else
900 add_insn_before (insn, last, NULL);
905 /* Set bitmaps TMP_FOLLOW and TMP_PRECEDE to MUST_FOLLOW and MUST_PRECEDE
906 respectively only if cycle C falls on the border of the scheduling
907 window boundaries marked by START and END cycles. STEP is the
908 direction of the window. */
909 static inline void
910 set_must_precede_follow (sbitmap *tmp_follow, sbitmap must_follow,
911 sbitmap *tmp_precede, sbitmap must_precede, int c,
912 int start, int end, int step)
914 *tmp_precede = NULL;
915 *tmp_follow = NULL;
917 if (c == start)
919 if (step == 1)
920 *tmp_precede = must_precede;
921 else /* step == -1. */
922 *tmp_follow = must_follow;
924 if (c == end - step)
926 if (step == 1)
927 *tmp_follow = must_follow;
928 else /* step == -1. */
929 *tmp_precede = must_precede;
934 /* Return True if the branch can be moved to row ii-1 while
935 normalizing the partial schedule PS to start from cycle zero and thus
936 optimize the SC. Otherwise return False. */
937 static bool
938 optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
940 int amount = PS_MIN_CYCLE (ps);
941 sbitmap sched_nodes = sbitmap_alloc (g->num_nodes);
942 int start, end, step;
943 int ii = ps->ii;
944 bool ok = false;
945 int stage_count, stage_count_curr;
947 /* Compare the SC after normalization and SC after bringing the branch
948 to row ii-1. If they are equal just bail out. */
949 stage_count = calculate_stage_count (ps, amount);
950 stage_count_curr =
951 calculate_stage_count (ps, SCHED_TIME (g->closing_branch->cuid) - (ii - 1));
953 if (stage_count == stage_count_curr)
955 if (dump_file)
956 fprintf (dump_file, "SMS SC already optimized.\n");
958 ok = false;
959 goto clear;
962 if (dump_file)
964 fprintf (dump_file, "SMS Trying to optimize branch location\n");
965 fprintf (dump_file, "SMS partial schedule before trial:\n");
966 print_partial_schedule (ps, dump_file);
969 /* First, normalize the partial scheduling. */
970 reset_sched_times (ps, amount);
971 rotate_partial_schedule (ps, amount);
972 if (dump_file)
974 fprintf (dump_file,
975 "SMS partial schedule after normalization (ii, %d, SC %d):\n",
976 ii, stage_count);
977 print_partial_schedule (ps, dump_file);
980 if (SMODULO (SCHED_TIME (g->closing_branch->cuid), ii) == ii - 1)
982 ok = true;
983 goto clear;
986 sbitmap_ones (sched_nodes);
988 /* Calculate the new placement of the branch. It should be in row
989 ii-1 and fall into it's scheduling window. */
990 if (get_sched_window (ps, g->closing_branch, sched_nodes, ii, &start,
991 &step, &end) == 0)
993 bool success;
994 ps_insn_ptr next_ps_i;
995 int branch_cycle = SCHED_TIME (g->closing_branch->cuid);
996 int row = SMODULO (branch_cycle, ps->ii);
997 int num_splits = 0;
998 sbitmap must_precede, must_follow, tmp_precede, tmp_follow;
999 int c;
1001 if (dump_file)
1002 fprintf (dump_file, "\nTrying to schedule node %d "
1003 "INSN = %d in (%d .. %d) step %d\n",
1004 g->closing_branch->cuid,
1005 (INSN_UID (g->closing_branch->insn)), start, end, step);
1007 gcc_assert ((step > 0 && start < end) || (step < 0 && start > end));
1008 if (step == 1)
1010 c = start + ii - SMODULO (start, ii) - 1;
1011 gcc_assert (c >= start);
1012 if (c >= end)
1014 ok = false;
1015 if (dump_file)
1016 fprintf (dump_file,
1017 "SMS failed to schedule branch at cycle: %d\n", c);
1018 goto clear;
1021 else
1023 c = start - SMODULO (start, ii) - 1;
1024 gcc_assert (c <= start);
1026 if (c <= end)
1028 if (dump_file)
1029 fprintf (dump_file,
1030 "SMS failed to schedule branch at cycle: %d\n", c);
1031 ok = false;
1032 goto clear;
1036 must_precede = sbitmap_alloc (g->num_nodes);
1037 must_follow = sbitmap_alloc (g->num_nodes);
1039 /* Try to schedule the branch is it's new cycle. */
1040 calculate_must_precede_follow (g->closing_branch, start, end,
1041 step, ii, sched_nodes,
1042 must_precede, must_follow);
1044 set_must_precede_follow (&tmp_follow, must_follow, &tmp_precede,
1045 must_precede, c, start, end, step);
1047 /* Find the element in the partial schedule related to the closing
1048 branch so we can remove it from it's current cycle. */
1049 for (next_ps_i = ps->rows[row];
1050 next_ps_i; next_ps_i = next_ps_i->next_in_row)
1051 if (next_ps_i->id == g->closing_branch->cuid)
1052 break;
1054 remove_node_from_ps (ps, next_ps_i);
1055 success =
1056 try_scheduling_node_in_cycle (ps, g->closing_branch->cuid, c,
1057 sched_nodes, &num_splits,
1058 tmp_precede, tmp_follow);
1059 gcc_assert (num_splits == 0);
1060 if (!success)
1062 if (dump_file)
1063 fprintf (dump_file,
1064 "SMS failed to schedule branch at cycle: %d, "
1065 "bringing it back to cycle %d\n", c, branch_cycle);
1067 /* The branch was failed to be placed in row ii - 1.
1068 Put it back in it's original place in the partial
1069 schedualing. */
1070 set_must_precede_follow (&tmp_follow, must_follow, &tmp_precede,
1071 must_precede, branch_cycle, start, end,
1072 step);
1073 success =
1074 try_scheduling_node_in_cycle (ps, g->closing_branch->cuid,
1075 branch_cycle, sched_nodes,
1076 &num_splits, tmp_precede,
1077 tmp_follow);
1078 gcc_assert (success && (num_splits == 0));
1079 ok = false;
1081 else
1083 /* The branch is placed in row ii - 1. */
1084 if (dump_file)
1085 fprintf (dump_file,
1086 "SMS success in moving branch to cycle %d\n", c);
1088 update_node_sched_params (g->closing_branch->cuid, ii, c,
1089 PS_MIN_CYCLE (ps));
1090 ok = true;
1093 free (must_precede);
1094 free (must_follow);
1097 clear:
1098 free (sched_nodes);
1099 return ok;
1102 static void
1103 duplicate_insns_of_cycles (partial_schedule_ptr ps, int from_stage,
1104 int to_stage, rtx count_reg)
1106 int row;
1107 ps_insn_ptr ps_ij;
1109 for (row = 0; row < ps->ii; row++)
1110 for (ps_ij = ps->rows[row]; ps_ij; ps_ij = ps_ij->next_in_row)
1112 int u = ps_ij->id;
1113 int first_u, last_u;
1114 rtx u_insn;
1116 /* Do not duplicate any insn which refers to count_reg as it
1117 belongs to the control part.
1118 The closing branch is scheduled as well and thus should
1119 be ignored.
1120 TODO: This should be done by analyzing the control part of
1121 the loop. */
1122 u_insn = ps_rtl_insn (ps, u);
1123 if (reg_mentioned_p (count_reg, u_insn)
1124 || JUMP_P (u_insn))
1125 continue;
1127 first_u = SCHED_STAGE (u);
1128 last_u = first_u + ps_num_consecutive_stages (ps, u) - 1;
1129 if (from_stage <= last_u && to_stage >= first_u)
1131 if (u < ps->g->num_nodes)
1132 duplicate_insn_chain (ps_first_note (ps, u), u_insn);
1133 else
1134 emit_insn (copy_rtx (PATTERN (u_insn)));
1140 /* Generate the instructions (including reg_moves) for prolog & epilog. */
1141 static void
1142 generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
1143 rtx count_reg, rtx count_init)
1145 int i;
1146 int last_stage = PS_STAGE_COUNT (ps) - 1;
1147 edge e;
1149 /* Generate the prolog, inserting its insns on the loop-entry edge. */
1150 start_sequence ();
1152 if (!count_init)
1154 /* Generate instructions at the beginning of the prolog to
1155 adjust the loop count by STAGE_COUNT. If loop count is constant
1156 (count_init), this constant is adjusted by STAGE_COUNT in
1157 generate_prolog_epilog function. */
1158 rtx sub_reg = NULL_RTX;
1160 sub_reg = expand_simple_binop (GET_MODE (count_reg), MINUS,
1161 count_reg, GEN_INT (last_stage),
1162 count_reg, 1, OPTAB_DIRECT);
1163 gcc_assert (REG_P (sub_reg));
1164 if (REGNO (sub_reg) != REGNO (count_reg))
1165 emit_move_insn (count_reg, sub_reg);
1168 for (i = 0; i < last_stage; i++)
1169 duplicate_insns_of_cycles (ps, 0, i, count_reg);
1171 /* Put the prolog on the entry edge. */
1172 e = loop_preheader_edge (loop);
1173 split_edge_and_insert (e, get_insns ());
1174 if (!flag_resched_modulo_sched)
1175 e->dest->flags |= BB_DISABLE_SCHEDULE;
1177 end_sequence ();
1179 /* Generate the epilog, inserting its insns on the loop-exit edge. */
1180 start_sequence ();
1182 for (i = 0; i < last_stage; i++)
1183 duplicate_insns_of_cycles (ps, i + 1, last_stage, count_reg);
1185 /* Put the epilogue on the exit edge. */
1186 gcc_assert (single_exit (loop));
1187 e = single_exit (loop);
1188 split_edge_and_insert (e, get_insns ());
1189 if (!flag_resched_modulo_sched)
1190 e->dest->flags |= BB_DISABLE_SCHEDULE;
1192 end_sequence ();
1195 /* Mark LOOP as software pipelined so the later
1196 scheduling passes don't touch it. */
1197 static void
1198 mark_loop_unsched (struct loop *loop)
1200 unsigned i;
1201 basic_block *bbs = get_loop_body (loop);
1203 for (i = 0; i < loop->num_nodes; i++)
1204 bbs[i]->flags |= BB_DISABLE_SCHEDULE;
1206 free (bbs);
1209 /* Return true if all the BBs of the loop are empty except the
1210 loop header. */
1211 static bool
1212 loop_single_full_bb_p (struct loop *loop)
1214 unsigned i;
1215 basic_block *bbs = get_loop_body (loop);
1217 for (i = 0; i < loop->num_nodes ; i++)
1219 rtx head, tail;
1220 bool empty_bb = true;
1222 if (bbs[i] == loop->header)
1223 continue;
1225 /* Make sure that basic blocks other than the header
1226 have only notes labels or jumps. */
1227 get_ebb_head_tail (bbs[i], bbs[i], &head, &tail);
1228 for (; head != NEXT_INSN (tail); head = NEXT_INSN (head))
1230 if (NOTE_P (head) || LABEL_P (head)
1231 || (INSN_P (head) && (DEBUG_INSN_P (head) || JUMP_P (head))))
1232 continue;
1233 empty_bb = false;
1234 break;
1237 if (! empty_bb)
1239 free (bbs);
1240 return false;
1243 free (bbs);
1244 return true;
1247 /* Dump file:line from INSN's location info to dump_file. */
1249 static void
1250 dump_insn_locator (rtx insn)
1252 if (dump_file && INSN_LOCATOR (insn))
1254 const char *file = insn_file (insn);
1255 if (file)
1256 fprintf (dump_file, " %s:%i", file, insn_line (insn));
1260 /* A simple loop from SMS point of view; it is a loop that is composed of
1261 either a single basic block or two BBs - a header and a latch. */
1262 #define SIMPLE_SMS_LOOP_P(loop) ((loop->num_nodes < 3 ) \
1263 && (EDGE_COUNT (loop->latch->preds) == 1) \
1264 && (EDGE_COUNT (loop->latch->succs) == 1))
1266 /* Return true if the loop is in its canonical form and false if not.
1267 i.e. SIMPLE_SMS_LOOP_P and have one preheader block, and single exit. */
1268 static bool
1269 loop_canon_p (struct loop *loop)
1272 if (loop->inner || !loop_outer (loop))
1274 if (dump_file)
1275 fprintf (dump_file, "SMS loop inner or !loop_outer\n");
1276 return false;
1279 if (!single_exit (loop))
1281 if (dump_file)
1283 rtx insn = BB_END (loop->header);
1285 fprintf (dump_file, "SMS loop many exits");
1286 dump_insn_locator (insn);
1287 fprintf (dump_file, "\n");
1289 return false;
1292 if (! SIMPLE_SMS_LOOP_P (loop) && ! loop_single_full_bb_p (loop))
1294 if (dump_file)
1296 rtx insn = BB_END (loop->header);
1298 fprintf (dump_file, "SMS loop many BBs.");
1299 dump_insn_locator (insn);
1300 fprintf (dump_file, "\n");
1302 return false;
1305 return true;
1308 /* If there are more than one entry for the loop,
1309 make it one by splitting the first entry edge and
1310 redirecting the others to the new BB. */
1311 static void
1312 canon_loop (struct loop *loop)
1314 edge e;
1315 edge_iterator i;
1317 /* Avoid annoying special cases of edges going to exit
1318 block. */
1319 FOR_EACH_EDGE (e, i, EXIT_BLOCK_PTR->preds)
1320 if ((e->flags & EDGE_FALLTHRU) && (EDGE_COUNT (e->src->succs) > 1))
1321 split_edge (e);
1323 if (loop->latch == loop->header
1324 || EDGE_COUNT (loop->latch->succs) > 1)
1326 FOR_EACH_EDGE (e, i, loop->header->preds)
1327 if (e->src == loop->latch)
1328 break;
1329 split_edge (e);
1333 /* Setup infos. */
1334 static void
1335 setup_sched_infos (void)
1337 memcpy (&sms_common_sched_info, &haifa_common_sched_info,
1338 sizeof (sms_common_sched_info));
1339 sms_common_sched_info.sched_pass_id = SCHED_SMS_PASS;
1340 common_sched_info = &sms_common_sched_info;
1342 sched_deps_info = &sms_sched_deps_info;
1343 current_sched_info = &sms_sched_info;
1346 /* Probability in % that the sms-ed loop rolls enough so that optimized
1347 version may be entered. Just a guess. */
1348 #define PROB_SMS_ENOUGH_ITERATIONS 80
1350 /* Used to calculate the upper bound of ii. */
1351 #define MAXII_FACTOR 2
1353 /* Main entry point, perform SMS scheduling on the loops of the function
1354 that consist of single basic blocks. */
1355 static void
1356 sms_schedule (void)
1358 rtx insn;
1359 ddg_ptr *g_arr, g;
1360 int * node_order;
1361 int maxii, max_asap;
1362 loop_iterator li;
1363 partial_schedule_ptr ps;
1364 basic_block bb = NULL;
1365 struct loop *loop;
1366 basic_block condition_bb = NULL;
1367 edge latch_edge;
1368 gcov_type trip_count = 0;
1370 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
1371 | LOOPS_HAVE_RECORDED_EXITS);
1372 if (number_of_loops () <= 1)
1374 loop_optimizer_finalize ();
1375 return; /* There are no loops to schedule. */
1378 /* Initialize issue_rate. */
1379 if (targetm.sched.issue_rate)
1381 int temp = reload_completed;
1383 reload_completed = 1;
1384 issue_rate = targetm.sched.issue_rate ();
1385 reload_completed = temp;
1387 else
1388 issue_rate = 1;
1390 /* Initialize the scheduler. */
1391 setup_sched_infos ();
1392 haifa_sched_init ();
1394 /* Allocate memory to hold the DDG array one entry for each loop.
1395 We use loop->num as index into this array. */
1396 g_arr = XCNEWVEC (ddg_ptr, number_of_loops ());
1398 if (dump_file)
1400 fprintf (dump_file, "\n\nSMS analysis phase\n");
1401 fprintf (dump_file, "===================\n\n");
1404 /* Build DDGs for all the relevant loops and hold them in G_ARR
1405 indexed by the loop index. */
1406 FOR_EACH_LOOP (li, loop, 0)
1408 rtx head, tail;
1409 rtx count_reg;
1411 /* For debugging. */
1412 if (dbg_cnt (sms_sched_loop) == false)
1414 if (dump_file)
1415 fprintf (dump_file, "SMS reached max limit... \n");
1417 break;
1420 if (dump_file)
1422 rtx insn = BB_END (loop->header);
1424 fprintf (dump_file, "SMS loop num: %d", loop->num);
1425 dump_insn_locator (insn);
1426 fprintf (dump_file, "\n");
1429 if (! loop_canon_p (loop))
1430 continue;
1432 if (! loop_single_full_bb_p (loop))
1434 if (dump_file)
1435 fprintf (dump_file, "SMS not loop_single_full_bb_p\n");
1436 continue;
1439 bb = loop->header;
1441 get_ebb_head_tail (bb, bb, &head, &tail);
1442 latch_edge = loop_latch_edge (loop);
1443 gcc_assert (single_exit (loop));
1444 if (single_exit (loop)->count)
1445 trip_count = latch_edge->count / single_exit (loop)->count;
1447 /* Perform SMS only on loops that their average count is above threshold. */
1449 if ( latch_edge->count
1450 && (latch_edge->count < single_exit (loop)->count * SMS_LOOP_AVERAGE_COUNT_THRESHOLD))
1452 if (dump_file)
1454 dump_insn_locator (tail);
1455 fprintf (dump_file, "\nSMS single-bb-loop\n");
1456 if (profile_info && flag_branch_probabilities)
1458 fprintf (dump_file, "SMS loop-count ");
1459 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1460 (HOST_WIDEST_INT) bb->count);
1461 fprintf (dump_file, "\n");
1462 fprintf (dump_file, "SMS trip-count ");
1463 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1464 (HOST_WIDEST_INT) trip_count);
1465 fprintf (dump_file, "\n");
1466 fprintf (dump_file, "SMS profile-sum-max ");
1467 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1468 (HOST_WIDEST_INT) profile_info->sum_max);
1469 fprintf (dump_file, "\n");
1472 continue;
1475 /* Make sure this is a doloop. */
1476 if ( !(count_reg = doloop_register_get (head, tail)))
1478 if (dump_file)
1479 fprintf (dump_file, "SMS doloop_register_get failed\n");
1480 continue;
1483 /* Don't handle BBs with calls or barriers
1484 or !single_set with the exception of instructions that include
1485 count_reg---these instructions are part of the control part
1486 that do-loop recognizes.
1487 ??? Should handle insns defining subregs. */
1488 for (insn = head; insn != NEXT_INSN (tail); insn = NEXT_INSN (insn))
1490 rtx set;
1492 if (CALL_P (insn)
1493 || BARRIER_P (insn)
1494 || (NONDEBUG_INSN_P (insn) && !JUMP_P (insn)
1495 && !single_set (insn) && GET_CODE (PATTERN (insn)) != USE
1496 && !reg_mentioned_p (count_reg, insn))
1497 || (INSN_P (insn) && (set = single_set (insn))
1498 && GET_CODE (SET_DEST (set)) == SUBREG))
1499 break;
1502 if (insn != NEXT_INSN (tail))
1504 if (dump_file)
1506 if (CALL_P (insn))
1507 fprintf (dump_file, "SMS loop-with-call\n");
1508 else if (BARRIER_P (insn))
1509 fprintf (dump_file, "SMS loop-with-barrier\n");
1510 else if ((NONDEBUG_INSN_P (insn) && !JUMP_P (insn)
1511 && !single_set (insn) && GET_CODE (PATTERN (insn)) != USE))
1512 fprintf (dump_file, "SMS loop-with-not-single-set\n");
1513 else
1514 fprintf (dump_file, "SMS loop with subreg in lhs\n");
1515 print_rtl_single (dump_file, insn);
1518 continue;
1521 /* Always schedule the closing branch with the rest of the
1522 instructions. The branch is rotated to be in row ii-1 at the
1523 end of the scheduling procedure to make sure it's the last
1524 instruction in the iteration. */
1525 if (! (g = create_ddg (bb, 1)))
1527 if (dump_file)
1528 fprintf (dump_file, "SMS create_ddg failed\n");
1529 continue;
1532 g_arr[loop->num] = g;
1533 if (dump_file)
1534 fprintf (dump_file, "...OK\n");
1537 if (dump_file)
1539 fprintf (dump_file, "\nSMS transformation phase\n");
1540 fprintf (dump_file, "=========================\n\n");
1543 /* We don't want to perform SMS on new loops - created by versioning. */
1544 FOR_EACH_LOOP (li, loop, 0)
1546 rtx head, tail;
1547 rtx count_reg, count_init;
1548 int mii, rec_mii, stage_count, min_cycle;
1549 HOST_WIDEST_INT loop_count = 0;
1550 bool opt_sc_p;
1552 if (! (g = g_arr[loop->num]))
1553 continue;
1555 if (dump_file)
1557 rtx insn = BB_END (loop->header);
1559 fprintf (dump_file, "SMS loop num: %d", loop->num);
1560 dump_insn_locator (insn);
1561 fprintf (dump_file, "\n");
1563 print_ddg (dump_file, g);
1566 get_ebb_head_tail (loop->header, loop->header, &head, &tail);
1568 latch_edge = loop_latch_edge (loop);
1569 gcc_assert (single_exit (loop));
1570 if (single_exit (loop)->count)
1571 trip_count = latch_edge->count / single_exit (loop)->count;
1573 if (dump_file)
1575 dump_insn_locator (tail);
1576 fprintf (dump_file, "\nSMS single-bb-loop\n");
1577 if (profile_info && flag_branch_probabilities)
1579 fprintf (dump_file, "SMS loop-count ");
1580 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1581 (HOST_WIDEST_INT) bb->count);
1582 fprintf (dump_file, "\n");
1583 fprintf (dump_file, "SMS profile-sum-max ");
1584 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1585 (HOST_WIDEST_INT) profile_info->sum_max);
1586 fprintf (dump_file, "\n");
1588 fprintf (dump_file, "SMS doloop\n");
1589 fprintf (dump_file, "SMS built-ddg %d\n", g->num_nodes);
1590 fprintf (dump_file, "SMS num-loads %d\n", g->num_loads);
1591 fprintf (dump_file, "SMS num-stores %d\n", g->num_stores);
1595 /* In case of th loop have doloop register it gets special
1596 handling. */
1597 count_init = NULL_RTX;
1598 if ((count_reg = doloop_register_get (head, tail)))
1600 basic_block pre_header;
1602 pre_header = loop_preheader_edge (loop)->src;
1603 count_init = const_iteration_count (count_reg, pre_header,
1604 &loop_count);
1606 gcc_assert (count_reg);
1608 if (dump_file && count_init)
1610 fprintf (dump_file, "SMS const-doloop ");
1611 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
1612 loop_count);
1613 fprintf (dump_file, "\n");
1616 node_order = XNEWVEC (int, g->num_nodes);
1618 mii = 1; /* Need to pass some estimate of mii. */
1619 rec_mii = sms_order_nodes (g, mii, node_order, &max_asap);
1620 mii = MAX (res_MII (g), rec_mii);
1621 maxii = MAX (max_asap, MAXII_FACTOR * mii);
1623 if (dump_file)
1624 fprintf (dump_file, "SMS iis %d %d %d (rec_mii, mii, maxii)\n",
1625 rec_mii, mii, maxii);
1627 for (;;)
1629 set_node_sched_params (g);
1631 stage_count = 0;
1632 opt_sc_p = false;
1633 ps = sms_schedule_by_order (g, mii, maxii, node_order);
1635 if (ps)
1637 /* Try to achieve optimized SC by normalizing the partial
1638 schedule (having the cycles start from cycle zero).
1639 The branch location must be placed in row ii-1 in the
1640 final scheduling. If failed, shift all instructions to
1641 position the branch in row ii-1. */
1642 opt_sc_p = optimize_sc (ps, g);
1643 if (opt_sc_p)
1644 stage_count = calculate_stage_count (ps, 0);
1645 else
1647 /* Bring the branch to cycle ii-1. */
1648 int amount = (SCHED_TIME (g->closing_branch->cuid)
1649 - (ps->ii - 1));
1651 if (dump_file)
1652 fprintf (dump_file, "SMS schedule branch at cycle ii-1\n");
1654 stage_count = calculate_stage_count (ps, amount);
1657 gcc_assert (stage_count >= 1);
1660 /* The default value of PARAM_SMS_MIN_SC is 2 as stage count of
1661 1 means that there is no interleaving between iterations thus
1662 we let the scheduling passes do the job in this case. */
1663 if (stage_count < PARAM_VALUE (PARAM_SMS_MIN_SC)
1664 || (count_init && (loop_count <= stage_count))
1665 || (flag_branch_probabilities && (trip_count <= stage_count)))
1667 if (dump_file)
1669 fprintf (dump_file, "SMS failed... \n");
1670 fprintf (dump_file, "SMS sched-failed (stage-count=%d,"
1671 " loop-count=", stage_count);
1672 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, loop_count);
1673 fprintf (dump_file, ", trip-count=");
1674 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, trip_count);
1675 fprintf (dump_file, ")\n");
1677 break;
1680 if (!opt_sc_p)
1682 /* Rotate the partial schedule to have the branch in row ii-1. */
1683 int amount = SCHED_TIME (g->closing_branch->cuid) - (ps->ii - 1);
1685 reset_sched_times (ps, amount);
1686 rotate_partial_schedule (ps, amount);
1689 set_columns_for_ps (ps);
1691 min_cycle = PS_MIN_CYCLE (ps) - SMODULO (PS_MIN_CYCLE (ps), ps->ii);
1692 if (!schedule_reg_moves (ps))
1694 mii = ps->ii + 1;
1695 free_partial_schedule (ps);
1696 continue;
1699 /* Moves that handle incoming values might have been added
1700 to a new first stage. Bump the stage count if so.
1702 ??? Perhaps we could consider rotating the schedule here
1703 instead? */
1704 if (PS_MIN_CYCLE (ps) < min_cycle)
1706 reset_sched_times (ps, 0);
1707 stage_count++;
1710 /* The stage count should now be correct without rotation. */
1711 gcc_checking_assert (stage_count == calculate_stage_count (ps, 0));
1712 PS_STAGE_COUNT (ps) = stage_count;
1714 canon_loop (loop);
1716 if (dump_file)
1718 dump_insn_locator (tail);
1719 fprintf (dump_file, " SMS succeeded %d %d (with ii, sc)\n",
1720 ps->ii, stage_count);
1721 print_partial_schedule (ps, dump_file);
1724 /* case the BCT count is not known , Do loop-versioning */
1725 if (count_reg && ! count_init)
1727 rtx comp_rtx = gen_rtx_fmt_ee (GT, VOIDmode, count_reg,
1728 GEN_INT(stage_count));
1729 unsigned prob = (PROB_SMS_ENOUGH_ITERATIONS
1730 * REG_BR_PROB_BASE) / 100;
1732 loop_version (loop, comp_rtx, &condition_bb,
1733 prob, prob, REG_BR_PROB_BASE - prob,
1734 true);
1737 /* Set new iteration count of loop kernel. */
1738 if (count_reg && count_init)
1739 SET_SRC (single_set (count_init)) = GEN_INT (loop_count
1740 - stage_count + 1);
1742 /* Now apply the scheduled kernel to the RTL of the loop. */
1743 permute_partial_schedule (ps, g->closing_branch->first_note);
1745 /* Mark this loop as software pipelined so the later
1746 scheduling passes don't touch it. */
1747 if (! flag_resched_modulo_sched)
1748 mark_loop_unsched (loop);
1750 /* The life-info is not valid any more. */
1751 df_set_bb_dirty (g->bb);
1753 apply_reg_moves (ps);
1754 if (dump_file)
1755 print_node_sched_params (dump_file, g->num_nodes, ps);
1756 /* Generate prolog and epilog. */
1757 generate_prolog_epilog (ps, loop, count_reg, count_init);
1758 break;
1761 free_partial_schedule (ps);
1762 VEC_free (node_sched_params, heap, node_sched_param_vec);
1763 free (node_order);
1764 free_ddg (g);
1767 free (g_arr);
1769 /* Release scheduler data, needed until now because of DFA. */
1770 haifa_sched_finish ();
1771 loop_optimizer_finalize ();
1774 /* The SMS scheduling algorithm itself
1775 -----------------------------------
1776 Input: 'O' an ordered list of insns of a loop.
1777 Output: A scheduling of the loop - kernel, prolog, and epilogue.
1779 'Q' is the empty Set
1780 'PS' is the partial schedule; it holds the currently scheduled nodes with
1781 their cycle/slot.
1782 'PSP' previously scheduled predecessors.
1783 'PSS' previously scheduled successors.
1784 't(u)' the cycle where u is scheduled.
1785 'l(u)' is the latency of u.
1786 'd(v,u)' is the dependence distance from v to u.
1787 'ASAP(u)' the earliest time at which u could be scheduled as computed in
1788 the node ordering phase.
1789 'check_hardware_resources_conflicts(u, PS, c)'
1790 run a trace around cycle/slot through DFA model
1791 to check resource conflicts involving instruction u
1792 at cycle c given the partial schedule PS.
1793 'add_to_partial_schedule_at_time(u, PS, c)'
1794 Add the node/instruction u to the partial schedule
1795 PS at time c.
1796 'calculate_register_pressure(PS)'
1797 Given a schedule of instructions, calculate the register
1798 pressure it implies. One implementation could be the
1799 maximum number of overlapping live ranges.
1800 'maxRP' The maximum allowed register pressure, it is usually derived from the number
1801 registers available in the hardware.
1803 1. II = MII.
1804 2. PS = empty list
1805 3. for each node u in O in pre-computed order
1806 4. if (PSP(u) != Q && PSS(u) == Q) then
1807 5. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1808 6. start = Early_start; end = Early_start + II - 1; step = 1
1809 11. else if (PSP(u) == Q && PSS(u) != Q) then
1810 12. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1811 13. start = Late_start; end = Late_start - II + 1; step = -1
1812 14. else if (PSP(u) != Q && PSS(u) != Q) then
1813 15. Early_start(u) = max ( t(v) + l(v) - d(v,u)*II ) over all every v in PSP(u).
1814 16. Late_start(u) = min ( t(v) - l(v) + d(v,u)*II ) over all every v in PSS(u).
1815 17. start = Early_start;
1816 18. end = min(Early_start + II - 1 , Late_start);
1817 19. step = 1
1818 20. else "if (PSP(u) == Q && PSS(u) == Q)"
1819 21. start = ASAP(u); end = start + II - 1; step = 1
1820 22. endif
1822 23. success = false
1823 24. for (c = start ; c != end ; c += step)
1824 25. if check_hardware_resources_conflicts(u, PS, c) then
1825 26. add_to_partial_schedule_at_time(u, PS, c)
1826 27. success = true
1827 28. break
1828 29. endif
1829 30. endfor
1830 31. if (success == false) then
1831 32. II = II + 1
1832 33. if (II > maxII) then
1833 34. finish - failed to schedule
1834 35. endif
1835 36. goto 2.
1836 37. endif
1837 38. endfor
1838 39. if (calculate_register_pressure(PS) > maxRP) then
1839 40. goto 32.
1840 41. endif
1841 42. compute epilogue & prologue
1842 43. finish - succeeded to schedule
1844 ??? The algorithm restricts the scheduling window to II cycles.
1845 In rare cases, it may be better to allow windows of II+1 cycles.
1846 The window would then start and end on the same row, but with
1847 different "must precede" and "must follow" requirements. */
1849 /* A limit on the number of cycles that resource conflicts can span. ??? Should
1850 be provided by DFA, and be dependent on the type of insn scheduled. Currently
1851 set to 0 to save compile time. */
1852 #define DFA_HISTORY SMS_DFA_HISTORY
1854 /* A threshold for the number of repeated unsuccessful attempts to insert
1855 an empty row, before we flush the partial schedule and start over. */
1856 #define MAX_SPLIT_NUM 10
1857 /* Given the partial schedule PS, this function calculates and returns the
1858 cycles in which we can schedule the node with the given index I.
1859 NOTE: Here we do the backtracking in SMS, in some special cases. We have
1860 noticed that there are several cases in which we fail to SMS the loop
1861 because the sched window of a node is empty due to tight data-deps. In
1862 such cases we want to unschedule some of the predecessors/successors
1863 until we get non-empty scheduling window. It returns -1 if the
1864 scheduling window is empty and zero otherwise. */
1866 static int
1867 get_sched_window (partial_schedule_ptr ps, ddg_node_ptr u_node,
1868 sbitmap sched_nodes, int ii, int *start_p, int *step_p,
1869 int *end_p)
1871 int start, step, end;
1872 int early_start, late_start;
1873 ddg_edge_ptr e;
1874 sbitmap psp = sbitmap_alloc (ps->g->num_nodes);
1875 sbitmap pss = sbitmap_alloc (ps->g->num_nodes);
1876 sbitmap u_node_preds = NODE_PREDECESSORS (u_node);
1877 sbitmap u_node_succs = NODE_SUCCESSORS (u_node);
1878 int psp_not_empty;
1879 int pss_not_empty;
1880 int count_preds;
1881 int count_succs;
1883 /* 1. compute sched window for u (start, end, step). */
1884 sbitmap_zero (psp);
1885 sbitmap_zero (pss);
1886 psp_not_empty = sbitmap_a_and_b_cg (psp, u_node_preds, sched_nodes);
1887 pss_not_empty = sbitmap_a_and_b_cg (pss, u_node_succs, sched_nodes);
1889 /* We first compute a forward range (start <= end), then decide whether
1890 to reverse it. */
1891 early_start = INT_MIN;
1892 late_start = INT_MAX;
1893 start = INT_MIN;
1894 end = INT_MAX;
1895 step = 1;
1897 count_preds = 0;
1898 count_succs = 0;
1900 if (dump_file && (psp_not_empty || pss_not_empty))
1902 fprintf (dump_file, "\nAnalyzing dependencies for node %d (INSN %d)"
1903 "; ii = %d\n\n", u_node->cuid, INSN_UID (u_node->insn), ii);
1904 fprintf (dump_file, "%11s %11s %11s %11s %5s\n",
1905 "start", "early start", "late start", "end", "time");
1906 fprintf (dump_file, "=========== =========== =========== ==========="
1907 " =====\n");
1909 /* Calculate early_start and limit end. Both bounds are inclusive. */
1910 if (psp_not_empty)
1911 for (e = u_node->in; e != 0; e = e->next_in)
1913 int v = e->src->cuid;
1915 if (TEST_BIT (sched_nodes, v))
1917 int p_st = SCHED_TIME (v);
1918 int earliest = p_st + e->latency - (e->distance * ii);
1919 int latest = (e->data_type == MEM_DEP ? p_st + ii - 1 : INT_MAX);
1921 if (dump_file)
1923 fprintf (dump_file, "%11s %11d %11s %11d %5d",
1924 "", earliest, "", latest, p_st);
1925 print_ddg_edge (dump_file, e);
1926 fprintf (dump_file, "\n");
1929 early_start = MAX (early_start, earliest);
1930 end = MIN (end, latest);
1932 if (e->type == TRUE_DEP && e->data_type == REG_DEP)
1933 count_preds++;
1937 /* Calculate late_start and limit start. Both bounds are inclusive. */
1938 if (pss_not_empty)
1939 for (e = u_node->out; e != 0; e = e->next_out)
1941 int v = e->dest->cuid;
1943 if (TEST_BIT (sched_nodes, v))
1945 int s_st = SCHED_TIME (v);
1946 int earliest = (e->data_type == MEM_DEP ? s_st - ii + 1 : INT_MIN);
1947 int latest = s_st - e->latency + (e->distance * ii);
1949 if (dump_file)
1951 fprintf (dump_file, "%11d %11s %11d %11s %5d",
1952 earliest, "", latest, "", s_st);
1953 print_ddg_edge (dump_file, e);
1954 fprintf (dump_file, "\n");
1957 start = MAX (start, earliest);
1958 late_start = MIN (late_start, latest);
1960 if (e->type == TRUE_DEP && e->data_type == REG_DEP)
1961 count_succs++;
1965 if (dump_file && (psp_not_empty || pss_not_empty))
1967 fprintf (dump_file, "----------- ----------- ----------- -----------"
1968 " -----\n");
1969 fprintf (dump_file, "%11d %11d %11d %11d %5s %s\n",
1970 start, early_start, late_start, end, "",
1971 "(max, max, min, min)");
1974 /* Get a target scheduling window no bigger than ii. */
1975 if (early_start == INT_MIN && late_start == INT_MAX)
1976 early_start = NODE_ASAP (u_node);
1977 else if (early_start == INT_MIN)
1978 early_start = late_start - (ii - 1);
1979 late_start = MIN (late_start, early_start + (ii - 1));
1981 /* Apply memory dependence limits. */
1982 start = MAX (start, early_start);
1983 end = MIN (end, late_start);
1985 if (dump_file && (psp_not_empty || pss_not_empty))
1986 fprintf (dump_file, "%11s %11d %11d %11s %5s final window\n",
1987 "", start, end, "", "");
1989 /* If there are at least as many successors as predecessors, schedule the
1990 node close to its successors. */
1991 if (pss_not_empty && count_succs >= count_preds)
1993 int tmp = end;
1994 end = start;
1995 start = tmp;
1996 step = -1;
1999 /* Now that we've finalized the window, make END an exclusive rather
2000 than an inclusive bound. */
2001 end += step;
2003 *start_p = start;
2004 *step_p = step;
2005 *end_p = end;
2006 sbitmap_free (psp);
2007 sbitmap_free (pss);
2009 if ((start >= end && step == 1) || (start <= end && step == -1))
2011 if (dump_file)
2012 fprintf (dump_file, "\nEmpty window: start=%d, end=%d, step=%d\n",
2013 start, end, step);
2014 return -1;
2017 return 0;
2020 /* Calculate MUST_PRECEDE/MUST_FOLLOW bitmaps of U_NODE; which is the
2021 node currently been scheduled. At the end of the calculation
2022 MUST_PRECEDE/MUST_FOLLOW contains all predecessors/successors of
2023 U_NODE which are (1) already scheduled in the first/last row of
2024 U_NODE's scheduling window, (2) whose dependence inequality with U
2025 becomes an equality when U is scheduled in this same row, and (3)
2026 whose dependence latency is zero.
2028 The first and last rows are calculated using the following parameters:
2029 START/END rows - The cycles that begins/ends the traversal on the window;
2030 searching for an empty cycle to schedule U_NODE.
2031 STEP - The direction in which we traverse the window.
2032 II - The initiation interval. */
2034 static void
2035 calculate_must_precede_follow (ddg_node_ptr u_node, int start, int end,
2036 int step, int ii, sbitmap sched_nodes,
2037 sbitmap must_precede, sbitmap must_follow)
2039 ddg_edge_ptr e;
2040 int first_cycle_in_window, last_cycle_in_window;
2042 gcc_assert (must_precede && must_follow);
2044 /* Consider the following scheduling window:
2045 {first_cycle_in_window, first_cycle_in_window+1, ...,
2046 last_cycle_in_window}. If step is 1 then the following will be
2047 the order we traverse the window: {start=first_cycle_in_window,
2048 first_cycle_in_window+1, ..., end=last_cycle_in_window+1},
2049 or {start=last_cycle_in_window, last_cycle_in_window-1, ...,
2050 end=first_cycle_in_window-1} if step is -1. */
2051 first_cycle_in_window = (step == 1) ? start : end - step;
2052 last_cycle_in_window = (step == 1) ? end - step : start;
2054 sbitmap_zero (must_precede);
2055 sbitmap_zero (must_follow);
2057 if (dump_file)
2058 fprintf (dump_file, "\nmust_precede: ");
2060 /* Instead of checking if:
2061 (SMODULO (SCHED_TIME (e->src), ii) == first_row_in_window)
2062 && ((SCHED_TIME (e->src) + e->latency - (e->distance * ii)) ==
2063 first_cycle_in_window)
2064 && e->latency == 0
2065 we use the fact that latency is non-negative:
2066 SCHED_TIME (e->src) - (e->distance * ii) <=
2067 SCHED_TIME (e->src) + e->latency - (e->distance * ii)) <=
2068 first_cycle_in_window
2069 and check only if
2070 SCHED_TIME (e->src) - (e->distance * ii) == first_cycle_in_window */
2071 for (e = u_node->in; e != 0; e = e->next_in)
2072 if (TEST_BIT (sched_nodes, e->src->cuid)
2073 && ((SCHED_TIME (e->src->cuid) - (e->distance * ii)) ==
2074 first_cycle_in_window))
2076 if (dump_file)
2077 fprintf (dump_file, "%d ", e->src->cuid);
2079 SET_BIT (must_precede, e->src->cuid);
2082 if (dump_file)
2083 fprintf (dump_file, "\nmust_follow: ");
2085 /* Instead of checking if:
2086 (SMODULO (SCHED_TIME (e->dest), ii) == last_row_in_window)
2087 && ((SCHED_TIME (e->dest) - e->latency + (e->distance * ii)) ==
2088 last_cycle_in_window)
2089 && e->latency == 0
2090 we use the fact that latency is non-negative:
2091 SCHED_TIME (e->dest) + (e->distance * ii) >=
2092 SCHED_TIME (e->dest) - e->latency + (e->distance * ii)) >=
2093 last_cycle_in_window
2094 and check only if
2095 SCHED_TIME (e->dest) + (e->distance * ii) == last_cycle_in_window */
2096 for (e = u_node->out; e != 0; e = e->next_out)
2097 if (TEST_BIT (sched_nodes, e->dest->cuid)
2098 && ((SCHED_TIME (e->dest->cuid) + (e->distance * ii)) ==
2099 last_cycle_in_window))
2101 if (dump_file)
2102 fprintf (dump_file, "%d ", e->dest->cuid);
2104 SET_BIT (must_follow, e->dest->cuid);
2107 if (dump_file)
2108 fprintf (dump_file, "\n");
2111 /* Return 1 if U_NODE can be scheduled in CYCLE. Use the following
2112 parameters to decide if that's possible:
2113 PS - The partial schedule.
2114 U - The serial number of U_NODE.
2115 NUM_SPLITS - The number of row splits made so far.
2116 MUST_PRECEDE - The nodes that must precede U_NODE. (only valid at
2117 the first row of the scheduling window)
2118 MUST_FOLLOW - The nodes that must follow U_NODE. (only valid at the
2119 last row of the scheduling window) */
2121 static bool
2122 try_scheduling_node_in_cycle (partial_schedule_ptr ps,
2123 int u, int cycle, sbitmap sched_nodes,
2124 int *num_splits, sbitmap must_precede,
2125 sbitmap must_follow)
2127 ps_insn_ptr psi;
2128 bool success = 0;
2130 verify_partial_schedule (ps, sched_nodes);
2131 psi = ps_add_node_check_conflicts (ps, u, cycle, must_precede, must_follow);
2132 if (psi)
2134 SCHED_TIME (u) = cycle;
2135 SET_BIT (sched_nodes, u);
2136 success = 1;
2137 *num_splits = 0;
2138 if (dump_file)
2139 fprintf (dump_file, "Scheduled w/o split in %d\n", cycle);
2143 return success;
2146 /* This function implements the scheduling algorithm for SMS according to the
2147 above algorithm. */
2148 static partial_schedule_ptr
2149 sms_schedule_by_order (ddg_ptr g, int mii, int maxii, int *nodes_order)
2151 int ii = mii;
2152 int i, c, success, num_splits = 0;
2153 int flush_and_start_over = true;
2154 int num_nodes = g->num_nodes;
2155 int start, end, step; /* Place together into one struct? */
2156 sbitmap sched_nodes = sbitmap_alloc (num_nodes);
2157 sbitmap must_precede = sbitmap_alloc (num_nodes);
2158 sbitmap must_follow = sbitmap_alloc (num_nodes);
2159 sbitmap tobe_scheduled = sbitmap_alloc (num_nodes);
2161 partial_schedule_ptr ps = create_partial_schedule (ii, g, DFA_HISTORY);
2163 sbitmap_ones (tobe_scheduled);
2164 sbitmap_zero (sched_nodes);
2166 while (flush_and_start_over && (ii < maxii))
2169 if (dump_file)
2170 fprintf (dump_file, "Starting with ii=%d\n", ii);
2171 flush_and_start_over = false;
2172 sbitmap_zero (sched_nodes);
2174 for (i = 0; i < num_nodes; i++)
2176 int u = nodes_order[i];
2177 ddg_node_ptr u_node = &ps->g->nodes[u];
2178 rtx insn = u_node->insn;
2180 if (!NONDEBUG_INSN_P (insn))
2182 RESET_BIT (tobe_scheduled, u);
2183 continue;
2186 if (TEST_BIT (sched_nodes, u))
2187 continue;
2189 /* Try to get non-empty scheduling window. */
2190 success = 0;
2191 if (get_sched_window (ps, u_node, sched_nodes, ii, &start,
2192 &step, &end) == 0)
2194 if (dump_file)
2195 fprintf (dump_file, "\nTrying to schedule node %d "
2196 "INSN = %d in (%d .. %d) step %d\n", u, (INSN_UID
2197 (g->nodes[u].insn)), start, end, step);
2199 gcc_assert ((step > 0 && start < end)
2200 || (step < 0 && start > end));
2202 calculate_must_precede_follow (u_node, start, end, step, ii,
2203 sched_nodes, must_precede,
2204 must_follow);
2206 for (c = start; c != end; c += step)
2208 sbitmap tmp_precede, tmp_follow;
2210 set_must_precede_follow (&tmp_follow, must_follow,
2211 &tmp_precede, must_precede,
2212 c, start, end, step);
2213 success =
2214 try_scheduling_node_in_cycle (ps, u, c,
2215 sched_nodes,
2216 &num_splits, tmp_precede,
2217 tmp_follow);
2218 if (success)
2219 break;
2222 verify_partial_schedule (ps, sched_nodes);
2224 if (!success)
2226 int split_row;
2228 if (ii++ == maxii)
2229 break;
2231 if (num_splits >= MAX_SPLIT_NUM)
2233 num_splits = 0;
2234 flush_and_start_over = true;
2235 verify_partial_schedule (ps, sched_nodes);
2236 reset_partial_schedule (ps, ii);
2237 verify_partial_schedule (ps, sched_nodes);
2238 break;
2241 num_splits++;
2242 /* The scheduling window is exclusive of 'end'
2243 whereas compute_split_window() expects an inclusive,
2244 ordered range. */
2245 if (step == 1)
2246 split_row = compute_split_row (sched_nodes, start, end - 1,
2247 ps->ii, u_node);
2248 else
2249 split_row = compute_split_row (sched_nodes, end + 1, start,
2250 ps->ii, u_node);
2252 ps_insert_empty_row (ps, split_row, sched_nodes);
2253 i--; /* Go back and retry node i. */
2255 if (dump_file)
2256 fprintf (dump_file, "num_splits=%d\n", num_splits);
2259 /* ??? If (success), check register pressure estimates. */
2260 } /* Continue with next node. */
2261 } /* While flush_and_start_over. */
2262 if (ii >= maxii)
2264 free_partial_schedule (ps);
2265 ps = NULL;
2267 else
2268 gcc_assert (sbitmap_equal (tobe_scheduled, sched_nodes));
2270 sbitmap_free (sched_nodes);
2271 sbitmap_free (must_precede);
2272 sbitmap_free (must_follow);
2273 sbitmap_free (tobe_scheduled);
2275 return ps;
2278 /* This function inserts a new empty row into PS at the position
2279 according to SPLITROW, keeping all already scheduled instructions
2280 intact and updating their SCHED_TIME and cycle accordingly. */
2281 static void
2282 ps_insert_empty_row (partial_schedule_ptr ps, int split_row,
2283 sbitmap sched_nodes)
2285 ps_insn_ptr crr_insn;
2286 ps_insn_ptr *rows_new;
2287 int ii = ps->ii;
2288 int new_ii = ii + 1;
2289 int row;
2290 int *rows_length_new;
2292 verify_partial_schedule (ps, sched_nodes);
2294 /* We normalize sched_time and rotate ps to have only non-negative sched
2295 times, for simplicity of updating cycles after inserting new row. */
2296 split_row -= ps->min_cycle;
2297 split_row = SMODULO (split_row, ii);
2298 if (dump_file)
2299 fprintf (dump_file, "split_row=%d\n", split_row);
2301 reset_sched_times (ps, PS_MIN_CYCLE (ps));
2302 rotate_partial_schedule (ps, PS_MIN_CYCLE (ps));
2304 rows_new = (ps_insn_ptr *) xcalloc (new_ii, sizeof (ps_insn_ptr));
2305 rows_length_new = (int *) xcalloc (new_ii, sizeof (int));
2306 for (row = 0; row < split_row; row++)
2308 rows_new[row] = ps->rows[row];
2309 rows_length_new[row] = ps->rows_length[row];
2310 ps->rows[row] = NULL;
2311 for (crr_insn = rows_new[row];
2312 crr_insn; crr_insn = crr_insn->next_in_row)
2314 int u = crr_insn->id;
2315 int new_time = SCHED_TIME (u) + (SCHED_TIME (u) / ii);
2317 SCHED_TIME (u) = new_time;
2318 crr_insn->cycle = new_time;
2319 SCHED_ROW (u) = new_time % new_ii;
2320 SCHED_STAGE (u) = new_time / new_ii;
2325 rows_new[split_row] = NULL;
2327 for (row = split_row; row < ii; row++)
2329 rows_new[row + 1] = ps->rows[row];
2330 rows_length_new[row + 1] = ps->rows_length[row];
2331 ps->rows[row] = NULL;
2332 for (crr_insn = rows_new[row + 1];
2333 crr_insn; crr_insn = crr_insn->next_in_row)
2335 int u = crr_insn->id;
2336 int new_time = SCHED_TIME (u) + (SCHED_TIME (u) / ii) + 1;
2338 SCHED_TIME (u) = new_time;
2339 crr_insn->cycle = new_time;
2340 SCHED_ROW (u) = new_time % new_ii;
2341 SCHED_STAGE (u) = new_time / new_ii;
2345 /* Updating ps. */
2346 ps->min_cycle = ps->min_cycle + ps->min_cycle / ii
2347 + (SMODULO (ps->min_cycle, ii) >= split_row ? 1 : 0);
2348 ps->max_cycle = ps->max_cycle + ps->max_cycle / ii
2349 + (SMODULO (ps->max_cycle, ii) >= split_row ? 1 : 0);
2350 free (ps->rows);
2351 ps->rows = rows_new;
2352 free (ps->rows_length);
2353 ps->rows_length = rows_length_new;
2354 ps->ii = new_ii;
2355 gcc_assert (ps->min_cycle >= 0);
2357 verify_partial_schedule (ps, sched_nodes);
2359 if (dump_file)
2360 fprintf (dump_file, "min_cycle=%d, max_cycle=%d\n", ps->min_cycle,
2361 ps->max_cycle);
2364 /* Given U_NODE which is the node that failed to be scheduled; LOW and
2365 UP which are the boundaries of it's scheduling window; compute using
2366 SCHED_NODES and II a row in the partial schedule that can be split
2367 which will separate a critical predecessor from a critical successor
2368 thereby expanding the window, and return it. */
2369 static int
2370 compute_split_row (sbitmap sched_nodes, int low, int up, int ii,
2371 ddg_node_ptr u_node)
2373 ddg_edge_ptr e;
2374 int lower = INT_MIN, upper = INT_MAX;
2375 int crit_pred = -1;
2376 int crit_succ = -1;
2377 int crit_cycle;
2379 for (e = u_node->in; e != 0; e = e->next_in)
2381 int v = e->src->cuid;
2383 if (TEST_BIT (sched_nodes, v)
2384 && (low == SCHED_TIME (v) + e->latency - (e->distance * ii)))
2385 if (SCHED_TIME (v) > lower)
2387 crit_pred = v;
2388 lower = SCHED_TIME (v);
2392 if (crit_pred >= 0)
2394 crit_cycle = SCHED_TIME (crit_pred) + 1;
2395 return SMODULO (crit_cycle, ii);
2398 for (e = u_node->out; e != 0; e = e->next_out)
2400 int v = e->dest->cuid;
2402 if (TEST_BIT (sched_nodes, v)
2403 && (up == SCHED_TIME (v) - e->latency + (e->distance * ii)))
2404 if (SCHED_TIME (v) < upper)
2406 crit_succ = v;
2407 upper = SCHED_TIME (v);
2411 if (crit_succ >= 0)
2413 crit_cycle = SCHED_TIME (crit_succ);
2414 return SMODULO (crit_cycle, ii);
2417 if (dump_file)
2418 fprintf (dump_file, "Both crit_pred and crit_succ are NULL\n");
2420 return SMODULO ((low + up + 1) / 2, ii);
2423 static void
2424 verify_partial_schedule (partial_schedule_ptr ps, sbitmap sched_nodes)
2426 int row;
2427 ps_insn_ptr crr_insn;
2429 for (row = 0; row < ps->ii; row++)
2431 int length = 0;
2433 for (crr_insn = ps->rows[row]; crr_insn; crr_insn = crr_insn->next_in_row)
2435 int u = crr_insn->id;
2437 length++;
2438 gcc_assert (TEST_BIT (sched_nodes, u));
2439 /* ??? Test also that all nodes of sched_nodes are in ps, perhaps by
2440 popcount (sched_nodes) == number of insns in ps. */
2441 gcc_assert (SCHED_TIME (u) >= ps->min_cycle);
2442 gcc_assert (SCHED_TIME (u) <= ps->max_cycle);
2445 gcc_assert (ps->rows_length[row] == length);
2450 /* This page implements the algorithm for ordering the nodes of a DDG
2451 for modulo scheduling, activated through the
2452 "int sms_order_nodes (ddg_ptr, int mii, int * result)" API. */
2454 #define ORDER_PARAMS(x) ((struct node_order_params *) (x)->aux.info)
2455 #define ASAP(x) (ORDER_PARAMS ((x))->asap)
2456 #define ALAP(x) (ORDER_PARAMS ((x))->alap)
2457 #define HEIGHT(x) (ORDER_PARAMS ((x))->height)
2458 #define MOB(x) (ALAP ((x)) - ASAP ((x)))
2459 #define DEPTH(x) (ASAP ((x)))
2461 typedef struct node_order_params * nopa;
2463 static void order_nodes_of_sccs (ddg_all_sccs_ptr, int * result);
2464 static int order_nodes_in_scc (ddg_ptr, sbitmap, sbitmap, int*, int);
2465 static nopa calculate_order_params (ddg_ptr, int, int *);
2466 static int find_max_asap (ddg_ptr, sbitmap);
2467 static int find_max_hv_min_mob (ddg_ptr, sbitmap);
2468 static int find_max_dv_min_mob (ddg_ptr, sbitmap);
2470 enum sms_direction {BOTTOMUP, TOPDOWN};
2472 struct node_order_params
2474 int asap;
2475 int alap;
2476 int height;
2479 /* Check if NODE_ORDER contains a permutation of 0 .. NUM_NODES-1. */
2480 static void
2481 check_nodes_order (int *node_order, int num_nodes)
2483 int i;
2484 sbitmap tmp = sbitmap_alloc (num_nodes);
2486 sbitmap_zero (tmp);
2488 if (dump_file)
2489 fprintf (dump_file, "SMS final nodes order: \n");
2491 for (i = 0; i < num_nodes; i++)
2493 int u = node_order[i];
2495 if (dump_file)
2496 fprintf (dump_file, "%d ", u);
2497 gcc_assert (u < num_nodes && u >= 0 && !TEST_BIT (tmp, u));
2499 SET_BIT (tmp, u);
2502 if (dump_file)
2503 fprintf (dump_file, "\n");
2505 sbitmap_free (tmp);
2508 /* Order the nodes of G for scheduling and pass the result in
2509 NODE_ORDER. Also set aux.count of each node to ASAP.
2510 Put maximal ASAP to PMAX_ASAP. Return the recMII for the given DDG. */
2511 static int
2512 sms_order_nodes (ddg_ptr g, int mii, int * node_order, int *pmax_asap)
2514 int i;
2515 int rec_mii = 0;
2516 ddg_all_sccs_ptr sccs = create_ddg_all_sccs (g);
2518 nopa nops = calculate_order_params (g, mii, pmax_asap);
2520 if (dump_file)
2521 print_sccs (dump_file, sccs, g);
2523 order_nodes_of_sccs (sccs, node_order);
2525 if (sccs->num_sccs > 0)
2526 /* First SCC has the largest recurrence_length. */
2527 rec_mii = sccs->sccs[0]->recurrence_length;
2529 /* Save ASAP before destroying node_order_params. */
2530 for (i = 0; i < g->num_nodes; i++)
2532 ddg_node_ptr v = &g->nodes[i];
2533 v->aux.count = ASAP (v);
2536 free (nops);
2537 free_ddg_all_sccs (sccs);
2538 check_nodes_order (node_order, g->num_nodes);
2540 return rec_mii;
2543 static void
2544 order_nodes_of_sccs (ddg_all_sccs_ptr all_sccs, int * node_order)
2546 int i, pos = 0;
2547 ddg_ptr g = all_sccs->ddg;
2548 int num_nodes = g->num_nodes;
2549 sbitmap prev_sccs = sbitmap_alloc (num_nodes);
2550 sbitmap on_path = sbitmap_alloc (num_nodes);
2551 sbitmap tmp = sbitmap_alloc (num_nodes);
2552 sbitmap ones = sbitmap_alloc (num_nodes);
2554 sbitmap_zero (prev_sccs);
2555 sbitmap_ones (ones);
2557 /* Perform the node ordering starting from the SCC with the highest recMII.
2558 For each SCC order the nodes according to their ASAP/ALAP/HEIGHT etc. */
2559 for (i = 0; i < all_sccs->num_sccs; i++)
2561 ddg_scc_ptr scc = all_sccs->sccs[i];
2563 /* Add nodes on paths from previous SCCs to the current SCC. */
2564 find_nodes_on_paths (on_path, g, prev_sccs, scc->nodes);
2565 sbitmap_a_or_b (tmp, scc->nodes, on_path);
2567 /* Add nodes on paths from the current SCC to previous SCCs. */
2568 find_nodes_on_paths (on_path, g, scc->nodes, prev_sccs);
2569 sbitmap_a_or_b (tmp, tmp, on_path);
2571 /* Remove nodes of previous SCCs from current extended SCC. */
2572 sbitmap_difference (tmp, tmp, prev_sccs);
2574 pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos);
2575 /* Above call to order_nodes_in_scc updated prev_sccs |= tmp. */
2578 /* Handle the remaining nodes that do not belong to any scc. Each call
2579 to order_nodes_in_scc handles a single connected component. */
2580 while (pos < g->num_nodes)
2582 sbitmap_difference (tmp, ones, prev_sccs);
2583 pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos);
2585 sbitmap_free (prev_sccs);
2586 sbitmap_free (on_path);
2587 sbitmap_free (tmp);
2588 sbitmap_free (ones);
2591 /* MII is needed if we consider backarcs (that do not close recursive cycles). */
2592 static struct node_order_params *
2593 calculate_order_params (ddg_ptr g, int mii ATTRIBUTE_UNUSED, int *pmax_asap)
2595 int u;
2596 int max_asap;
2597 int num_nodes = g->num_nodes;
2598 ddg_edge_ptr e;
2599 /* Allocate a place to hold ordering params for each node in the DDG. */
2600 nopa node_order_params_arr;
2602 /* Initialize of ASAP/ALAP/HEIGHT to zero. */
2603 node_order_params_arr = (nopa) xcalloc (num_nodes,
2604 sizeof (struct node_order_params));
2606 /* Set the aux pointer of each node to point to its order_params structure. */
2607 for (u = 0; u < num_nodes; u++)
2608 g->nodes[u].aux.info = &node_order_params_arr[u];
2610 /* Disregarding a backarc from each recursive cycle to obtain a DAG,
2611 calculate ASAP, ALAP, mobility, distance, and height for each node
2612 in the dependence (direct acyclic) graph. */
2614 /* We assume that the nodes in the array are in topological order. */
2616 max_asap = 0;
2617 for (u = 0; u < num_nodes; u++)
2619 ddg_node_ptr u_node = &g->nodes[u];
2621 ASAP (u_node) = 0;
2622 for (e = u_node->in; e; e = e->next_in)
2623 if (e->distance == 0)
2624 ASAP (u_node) = MAX (ASAP (u_node),
2625 ASAP (e->src) + e->latency);
2626 max_asap = MAX (max_asap, ASAP (u_node));
2629 for (u = num_nodes - 1; u > -1; u--)
2631 ddg_node_ptr u_node = &g->nodes[u];
2633 ALAP (u_node) = max_asap;
2634 HEIGHT (u_node) = 0;
2635 for (e = u_node->out; e; e = e->next_out)
2636 if (e->distance == 0)
2638 ALAP (u_node) = MIN (ALAP (u_node),
2639 ALAP (e->dest) - e->latency);
2640 HEIGHT (u_node) = MAX (HEIGHT (u_node),
2641 HEIGHT (e->dest) + e->latency);
2644 if (dump_file)
2646 fprintf (dump_file, "\nOrder params\n");
2647 for (u = 0; u < num_nodes; u++)
2649 ddg_node_ptr u_node = &g->nodes[u];
2651 fprintf (dump_file, "node %d, ASAP: %d, ALAP: %d, HEIGHT: %d\n", u,
2652 ASAP (u_node), ALAP (u_node), HEIGHT (u_node));
2656 *pmax_asap = max_asap;
2657 return node_order_params_arr;
2660 static int
2661 find_max_asap (ddg_ptr g, sbitmap nodes)
2663 unsigned int u = 0;
2664 int max_asap = -1;
2665 int result = -1;
2666 sbitmap_iterator sbi;
2668 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
2670 ddg_node_ptr u_node = &g->nodes[u];
2672 if (max_asap < ASAP (u_node))
2674 max_asap = ASAP (u_node);
2675 result = u;
2678 return result;
2681 static int
2682 find_max_hv_min_mob (ddg_ptr g, sbitmap nodes)
2684 unsigned int u = 0;
2685 int max_hv = -1;
2686 int min_mob = INT_MAX;
2687 int result = -1;
2688 sbitmap_iterator sbi;
2690 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
2692 ddg_node_ptr u_node = &g->nodes[u];
2694 if (max_hv < HEIGHT (u_node))
2696 max_hv = HEIGHT (u_node);
2697 min_mob = MOB (u_node);
2698 result = u;
2700 else if ((max_hv == HEIGHT (u_node))
2701 && (min_mob > MOB (u_node)))
2703 min_mob = MOB (u_node);
2704 result = u;
2707 return result;
2710 static int
2711 find_max_dv_min_mob (ddg_ptr g, sbitmap nodes)
2713 unsigned int u = 0;
2714 int max_dv = -1;
2715 int min_mob = INT_MAX;
2716 int result = -1;
2717 sbitmap_iterator sbi;
2719 EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, u, sbi)
2721 ddg_node_ptr u_node = &g->nodes[u];
2723 if (max_dv < DEPTH (u_node))
2725 max_dv = DEPTH (u_node);
2726 min_mob = MOB (u_node);
2727 result = u;
2729 else if ((max_dv == DEPTH (u_node))
2730 && (min_mob > MOB (u_node)))
2732 min_mob = MOB (u_node);
2733 result = u;
2736 return result;
2739 /* Places the nodes of SCC into the NODE_ORDER array starting
2740 at position POS, according to the SMS ordering algorithm.
2741 NODES_ORDERED (in&out parameter) holds the bitset of all nodes in
2742 the NODE_ORDER array, starting from position zero. */
2743 static int
2744 order_nodes_in_scc (ddg_ptr g, sbitmap nodes_ordered, sbitmap scc,
2745 int * node_order, int pos)
2747 enum sms_direction dir;
2748 int num_nodes = g->num_nodes;
2749 sbitmap workset = sbitmap_alloc (num_nodes);
2750 sbitmap tmp = sbitmap_alloc (num_nodes);
2751 sbitmap zero_bitmap = sbitmap_alloc (num_nodes);
2752 sbitmap predecessors = sbitmap_alloc (num_nodes);
2753 sbitmap successors = sbitmap_alloc (num_nodes);
2755 sbitmap_zero (predecessors);
2756 find_predecessors (predecessors, g, nodes_ordered);
2758 sbitmap_zero (successors);
2759 find_successors (successors, g, nodes_ordered);
2761 sbitmap_zero (tmp);
2762 if (sbitmap_a_and_b_cg (tmp, predecessors, scc))
2764 sbitmap_copy (workset, tmp);
2765 dir = BOTTOMUP;
2767 else if (sbitmap_a_and_b_cg (tmp, successors, scc))
2769 sbitmap_copy (workset, tmp);
2770 dir = TOPDOWN;
2772 else
2774 int u;
2776 sbitmap_zero (workset);
2777 if ((u = find_max_asap (g, scc)) >= 0)
2778 SET_BIT (workset, u);
2779 dir = BOTTOMUP;
2782 sbitmap_zero (zero_bitmap);
2783 while (!sbitmap_equal (workset, zero_bitmap))
2785 int v;
2786 ddg_node_ptr v_node;
2787 sbitmap v_node_preds;
2788 sbitmap v_node_succs;
2790 if (dir == TOPDOWN)
2792 while (!sbitmap_equal (workset, zero_bitmap))
2794 v = find_max_hv_min_mob (g, workset);
2795 v_node = &g->nodes[v];
2796 node_order[pos++] = v;
2797 v_node_succs = NODE_SUCCESSORS (v_node);
2798 sbitmap_a_and_b (tmp, v_node_succs, scc);
2800 /* Don't consider the already ordered successors again. */
2801 sbitmap_difference (tmp, tmp, nodes_ordered);
2802 sbitmap_a_or_b (workset, workset, tmp);
2803 RESET_BIT (workset, v);
2804 SET_BIT (nodes_ordered, v);
2806 dir = BOTTOMUP;
2807 sbitmap_zero (predecessors);
2808 find_predecessors (predecessors, g, nodes_ordered);
2809 sbitmap_a_and_b (workset, predecessors, scc);
2811 else
2813 while (!sbitmap_equal (workset, zero_bitmap))
2815 v = find_max_dv_min_mob (g, workset);
2816 v_node = &g->nodes[v];
2817 node_order[pos++] = v;
2818 v_node_preds = NODE_PREDECESSORS (v_node);
2819 sbitmap_a_and_b (tmp, v_node_preds, scc);
2821 /* Don't consider the already ordered predecessors again. */
2822 sbitmap_difference (tmp, tmp, nodes_ordered);
2823 sbitmap_a_or_b (workset, workset, tmp);
2824 RESET_BIT (workset, v);
2825 SET_BIT (nodes_ordered, v);
2827 dir = TOPDOWN;
2828 sbitmap_zero (successors);
2829 find_successors (successors, g, nodes_ordered);
2830 sbitmap_a_and_b (workset, successors, scc);
2833 sbitmap_free (tmp);
2834 sbitmap_free (workset);
2835 sbitmap_free (zero_bitmap);
2836 sbitmap_free (predecessors);
2837 sbitmap_free (successors);
2838 return pos;
2842 /* This page contains functions for manipulating partial-schedules during
2843 modulo scheduling. */
2845 /* Create a partial schedule and allocate a memory to hold II rows. */
2847 static partial_schedule_ptr
2848 create_partial_schedule (int ii, ddg_ptr g, int history)
2850 partial_schedule_ptr ps = XNEW (struct partial_schedule);
2851 ps->rows = (ps_insn_ptr *) xcalloc (ii, sizeof (ps_insn_ptr));
2852 ps->rows_length = (int *) xcalloc (ii, sizeof (int));
2853 ps->reg_moves = NULL;
2854 ps->ii = ii;
2855 ps->history = history;
2856 ps->min_cycle = INT_MAX;
2857 ps->max_cycle = INT_MIN;
2858 ps->g = g;
2860 return ps;
2863 /* Free the PS_INSNs in rows array of the given partial schedule.
2864 ??? Consider caching the PS_INSN's. */
2865 static void
2866 free_ps_insns (partial_schedule_ptr ps)
2868 int i;
2870 for (i = 0; i < ps->ii; i++)
2872 while (ps->rows[i])
2874 ps_insn_ptr ps_insn = ps->rows[i]->next_in_row;
2876 free (ps->rows[i]);
2877 ps->rows[i] = ps_insn;
2879 ps->rows[i] = NULL;
2883 /* Free all the memory allocated to the partial schedule. */
2885 static void
2886 free_partial_schedule (partial_schedule_ptr ps)
2888 ps_reg_move_info *move;
2889 unsigned int i;
2891 if (!ps)
2892 return;
2894 FOR_EACH_VEC_ELT (ps_reg_move_info, ps->reg_moves, i, move)
2895 sbitmap_free (move->uses);
2896 VEC_free (ps_reg_move_info, heap, ps->reg_moves);
2898 free_ps_insns (ps);
2899 free (ps->rows);
2900 free (ps->rows_length);
2901 free (ps);
2904 /* Clear the rows array with its PS_INSNs, and create a new one with
2905 NEW_II rows. */
2907 static void
2908 reset_partial_schedule (partial_schedule_ptr ps, int new_ii)
2910 if (!ps)
2911 return;
2912 free_ps_insns (ps);
2913 if (new_ii == ps->ii)
2914 return;
2915 ps->rows = (ps_insn_ptr *) xrealloc (ps->rows, new_ii
2916 * sizeof (ps_insn_ptr));
2917 memset (ps->rows, 0, new_ii * sizeof (ps_insn_ptr));
2918 ps->rows_length = (int *) xrealloc (ps->rows_length, new_ii * sizeof (int));
2919 memset (ps->rows_length, 0, new_ii * sizeof (int));
2920 ps->ii = new_ii;
2921 ps->min_cycle = INT_MAX;
2922 ps->max_cycle = INT_MIN;
2925 /* Prints the partial schedule as an ii rows array, for each rows
2926 print the ids of the insns in it. */
2927 void
2928 print_partial_schedule (partial_schedule_ptr ps, FILE *dump)
2930 int i;
2932 for (i = 0; i < ps->ii; i++)
2934 ps_insn_ptr ps_i = ps->rows[i];
2936 fprintf (dump, "\n[ROW %d ]: ", i);
2937 while (ps_i)
2939 rtx insn = ps_rtl_insn (ps, ps_i->id);
2941 if (JUMP_P (insn))
2942 fprintf (dump, "%d (branch), ", INSN_UID (insn));
2943 else
2944 fprintf (dump, "%d, ", INSN_UID (insn));
2946 ps_i = ps_i->next_in_row;
2951 /* Creates an object of PS_INSN and initializes it to the given parameters. */
2952 static ps_insn_ptr
2953 create_ps_insn (int id, int cycle)
2955 ps_insn_ptr ps_i = XNEW (struct ps_insn);
2957 ps_i->id = id;
2958 ps_i->next_in_row = NULL;
2959 ps_i->prev_in_row = NULL;
2960 ps_i->cycle = cycle;
2962 return ps_i;
2966 /* Removes the given PS_INSN from the partial schedule. */
2967 static void
2968 remove_node_from_ps (partial_schedule_ptr ps, ps_insn_ptr ps_i)
2970 int row;
2972 gcc_assert (ps && ps_i);
2974 row = SMODULO (ps_i->cycle, ps->ii);
2975 if (! ps_i->prev_in_row)
2977 gcc_assert (ps_i == ps->rows[row]);
2978 ps->rows[row] = ps_i->next_in_row;
2979 if (ps->rows[row])
2980 ps->rows[row]->prev_in_row = NULL;
2982 else
2984 ps_i->prev_in_row->next_in_row = ps_i->next_in_row;
2985 if (ps_i->next_in_row)
2986 ps_i->next_in_row->prev_in_row = ps_i->prev_in_row;
2989 ps->rows_length[row] -= 1;
2990 free (ps_i);
2991 return;
2994 /* Unlike what literature describes for modulo scheduling (which focuses
2995 on VLIW machines) the order of the instructions inside a cycle is
2996 important. Given the bitmaps MUST_FOLLOW and MUST_PRECEDE we know
2997 where the current instruction should go relative to the already
2998 scheduled instructions in the given cycle. Go over these
2999 instructions and find the first possible column to put it in. */
3000 static bool
3001 ps_insn_find_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
3002 sbitmap must_precede, sbitmap must_follow)
3004 ps_insn_ptr next_ps_i;
3005 ps_insn_ptr first_must_follow = NULL;
3006 ps_insn_ptr last_must_precede = NULL;
3007 ps_insn_ptr last_in_row = NULL;
3008 int row;
3010 if (! ps_i)
3011 return false;
3013 row = SMODULO (ps_i->cycle, ps->ii);
3015 /* Find the first must follow and the last must precede
3016 and insert the node immediately after the must precede
3017 but make sure that it there is no must follow after it. */
3018 for (next_ps_i = ps->rows[row];
3019 next_ps_i;
3020 next_ps_i = next_ps_i->next_in_row)
3022 if (must_follow
3023 && TEST_BIT (must_follow, next_ps_i->id)
3024 && ! first_must_follow)
3025 first_must_follow = next_ps_i;
3026 if (must_precede && TEST_BIT (must_precede, next_ps_i->id))
3028 /* If we have already met a node that must follow, then
3029 there is no possible column. */
3030 if (first_must_follow)
3031 return false;
3032 else
3033 last_must_precede = next_ps_i;
3035 /* The closing branch must be the last in the row. */
3036 if (must_precede
3037 && TEST_BIT (must_precede, next_ps_i->id)
3038 && JUMP_P (ps_rtl_insn (ps, next_ps_i->id)))
3039 return false;
3041 last_in_row = next_ps_i;
3044 /* The closing branch is scheduled as well. Make sure there is no
3045 dependent instruction after it as the branch should be the last
3046 instruction in the row. */
3047 if (JUMP_P (ps_rtl_insn (ps, ps_i->id)))
3049 if (first_must_follow)
3050 return false;
3051 if (last_in_row)
3053 /* Make the branch the last in the row. New instructions
3054 will be inserted at the beginning of the row or after the
3055 last must_precede instruction thus the branch is guaranteed
3056 to remain the last instruction in the row. */
3057 last_in_row->next_in_row = ps_i;
3058 ps_i->prev_in_row = last_in_row;
3059 ps_i->next_in_row = NULL;
3061 else
3062 ps->rows[row] = ps_i;
3063 return true;
3066 /* Now insert the node after INSERT_AFTER_PSI. */
3068 if (! last_must_precede)
3070 ps_i->next_in_row = ps->rows[row];
3071 ps_i->prev_in_row = NULL;
3072 if (ps_i->next_in_row)
3073 ps_i->next_in_row->prev_in_row = ps_i;
3074 ps->rows[row] = ps_i;
3076 else
3078 ps_i->next_in_row = last_must_precede->next_in_row;
3079 last_must_precede->next_in_row = ps_i;
3080 ps_i->prev_in_row = last_must_precede;
3081 if (ps_i->next_in_row)
3082 ps_i->next_in_row->prev_in_row = ps_i;
3085 return true;
3088 /* Advances the PS_INSN one column in its current row; returns false
3089 in failure and true in success. Bit N is set in MUST_FOLLOW if
3090 the node with cuid N must be come after the node pointed to by
3091 PS_I when scheduled in the same cycle. */
3092 static int
3093 ps_insn_advance_column (partial_schedule_ptr ps, ps_insn_ptr ps_i,
3094 sbitmap must_follow)
3096 ps_insn_ptr prev, next;
3097 int row;
3099 if (!ps || !ps_i)
3100 return false;
3102 row = SMODULO (ps_i->cycle, ps->ii);
3104 if (! ps_i->next_in_row)
3105 return false;
3107 /* Check if next_in_row is dependent on ps_i, both having same sched
3108 times (typically ANTI_DEP). If so, ps_i cannot skip over it. */
3109 if (must_follow && TEST_BIT (must_follow, ps_i->next_in_row->id))
3110 return false;
3112 /* Advance PS_I over its next_in_row in the doubly linked list. */
3113 prev = ps_i->prev_in_row;
3114 next = ps_i->next_in_row;
3116 if (ps_i == ps->rows[row])
3117 ps->rows[row] = next;
3119 ps_i->next_in_row = next->next_in_row;
3121 if (next->next_in_row)
3122 next->next_in_row->prev_in_row = ps_i;
3124 next->next_in_row = ps_i;
3125 ps_i->prev_in_row = next;
3127 next->prev_in_row = prev;
3128 if (prev)
3129 prev->next_in_row = next;
3131 return true;
3134 /* Inserts a DDG_NODE to the given partial schedule at the given cycle.
3135 Returns 0 if this is not possible and a PS_INSN otherwise. Bit N is
3136 set in MUST_PRECEDE/MUST_FOLLOW if the node with cuid N must be come
3137 before/after (respectively) the node pointed to by PS_I when scheduled
3138 in the same cycle. */
3139 static ps_insn_ptr
3140 add_node_to_ps (partial_schedule_ptr ps, int id, int cycle,
3141 sbitmap must_precede, sbitmap must_follow)
3143 ps_insn_ptr ps_i;
3144 int row = SMODULO (cycle, ps->ii);
3146 if (ps->rows_length[row] >= issue_rate)
3147 return NULL;
3149 ps_i = create_ps_insn (id, cycle);
3151 /* Finds and inserts PS_I according to MUST_FOLLOW and
3152 MUST_PRECEDE. */
3153 if (! ps_insn_find_column (ps, ps_i, must_precede, must_follow))
3155 free (ps_i);
3156 return NULL;
3159 ps->rows_length[row] += 1;
3160 return ps_i;
3163 /* Advance time one cycle. Assumes DFA is being used. */
3164 static void
3165 advance_one_cycle (void)
3167 if (targetm.sched.dfa_pre_cycle_insn)
3168 state_transition (curr_state,
3169 targetm.sched.dfa_pre_cycle_insn ());
3171 state_transition (curr_state, NULL);
3173 if (targetm.sched.dfa_post_cycle_insn)
3174 state_transition (curr_state,
3175 targetm.sched.dfa_post_cycle_insn ());
3180 /* Checks if PS has resource conflicts according to DFA, starting from
3181 FROM cycle to TO cycle; returns true if there are conflicts and false
3182 if there are no conflicts. Assumes DFA is being used. */
3183 static int
3184 ps_has_conflicts (partial_schedule_ptr ps, int from, int to)
3186 int cycle;
3188 state_reset (curr_state);
3190 for (cycle = from; cycle <= to; cycle++)
3192 ps_insn_ptr crr_insn;
3193 /* Holds the remaining issue slots in the current row. */
3194 int can_issue_more = issue_rate;
3196 /* Walk through the DFA for the current row. */
3197 for (crr_insn = ps->rows[SMODULO (cycle, ps->ii)];
3198 crr_insn;
3199 crr_insn = crr_insn->next_in_row)
3201 rtx insn = ps_rtl_insn (ps, crr_insn->id);
3203 if (!NONDEBUG_INSN_P (insn))
3204 continue;
3206 /* Check if there is room for the current insn. */
3207 if (!can_issue_more || state_dead_lock_p (curr_state))
3208 return true;
3210 /* Update the DFA state and return with failure if the DFA found
3211 resource conflicts. */
3212 if (state_transition (curr_state, insn) >= 0)
3213 return true;
3215 if (targetm.sched.variable_issue)
3216 can_issue_more =
3217 targetm.sched.variable_issue (sched_dump, sched_verbose,
3218 insn, can_issue_more);
3219 /* A naked CLOBBER or USE generates no instruction, so don't
3220 let them consume issue slots. */
3221 else if (GET_CODE (PATTERN (insn)) != USE
3222 && GET_CODE (PATTERN (insn)) != CLOBBER)
3223 can_issue_more--;
3226 /* Advance the DFA to the next cycle. */
3227 advance_one_cycle ();
3229 return false;
3232 /* Checks if the given node causes resource conflicts when added to PS at
3233 cycle C. If not the node is added to PS and returned; otherwise zero
3234 is returned. Bit N is set in MUST_PRECEDE/MUST_FOLLOW if the node with
3235 cuid N must be come before/after (respectively) the node pointed to by
3236 PS_I when scheduled in the same cycle. */
3237 ps_insn_ptr
3238 ps_add_node_check_conflicts (partial_schedule_ptr ps, int n,
3239 int c, sbitmap must_precede,
3240 sbitmap must_follow)
3242 int has_conflicts = 0;
3243 ps_insn_ptr ps_i;
3245 /* First add the node to the PS, if this succeeds check for
3246 conflicts, trying different issue slots in the same row. */
3247 if (! (ps_i = add_node_to_ps (ps, n, c, must_precede, must_follow)))
3248 return NULL; /* Failed to insert the node at the given cycle. */
3250 has_conflicts = ps_has_conflicts (ps, c, c)
3251 || (ps->history > 0
3252 && ps_has_conflicts (ps,
3253 c - ps->history,
3254 c + ps->history));
3256 /* Try different issue slots to find one that the given node can be
3257 scheduled in without conflicts. */
3258 while (has_conflicts)
3260 if (! ps_insn_advance_column (ps, ps_i, must_follow))
3261 break;
3262 has_conflicts = ps_has_conflicts (ps, c, c)
3263 || (ps->history > 0
3264 && ps_has_conflicts (ps,
3265 c - ps->history,
3266 c + ps->history));
3269 if (has_conflicts)
3271 remove_node_from_ps (ps, ps_i);
3272 return NULL;
3275 ps->min_cycle = MIN (ps->min_cycle, c);
3276 ps->max_cycle = MAX (ps->max_cycle, c);
3277 return ps_i;
3280 /* Calculate the stage count of the partial schedule PS. The calculation
3281 takes into account the rotation amount passed in ROTATION_AMOUNT. */
3283 calculate_stage_count (partial_schedule_ptr ps, int rotation_amount)
3285 int new_min_cycle = PS_MIN_CYCLE (ps) - rotation_amount;
3286 int new_max_cycle = PS_MAX_CYCLE (ps) - rotation_amount;
3287 int stage_count = CALC_STAGE_COUNT (-1, new_min_cycle, ps->ii);
3289 /* The calculation of stage count is done adding the number of stages
3290 before cycle zero and after cycle zero. */
3291 stage_count += CALC_STAGE_COUNT (new_max_cycle, 0, ps->ii);
3293 return stage_count;
3296 /* Rotate the rows of PS such that insns scheduled at time
3297 START_CYCLE will appear in row 0. Updates max/min_cycles. */
3298 void
3299 rotate_partial_schedule (partial_schedule_ptr ps, int start_cycle)
3301 int i, row, backward_rotates;
3302 int last_row = ps->ii - 1;
3304 if (start_cycle == 0)
3305 return;
3307 backward_rotates = SMODULO (start_cycle, ps->ii);
3309 /* Revisit later and optimize this into a single loop. */
3310 for (i = 0; i < backward_rotates; i++)
3312 ps_insn_ptr first_row = ps->rows[0];
3313 int first_row_length = ps->rows_length[0];
3315 for (row = 0; row < last_row; row++)
3317 ps->rows[row] = ps->rows[row + 1];
3318 ps->rows_length[row] = ps->rows_length[row + 1];
3321 ps->rows[last_row] = first_row;
3322 ps->rows_length[last_row] = first_row_length;
3325 ps->max_cycle -= start_cycle;
3326 ps->min_cycle -= start_cycle;
3329 #endif /* INSN_SCHEDULING */
3331 static bool
3332 gate_handle_sms (void)
3334 return (optimize > 0 && flag_modulo_sched);
3338 /* Run instruction scheduler. */
3339 /* Perform SMS module scheduling. */
3340 static unsigned int
3341 rest_of_handle_sms (void)
3343 #ifdef INSN_SCHEDULING
3344 basic_block bb;
3346 /* Collect loop information to be used in SMS. */
3347 cfg_layout_initialize (0);
3348 sms_schedule ();
3350 /* Update the life information, because we add pseudos. */
3351 max_regno = max_reg_num ();
3353 /* Finalize layout changes. */
3354 FOR_EACH_BB (bb)
3355 if (bb->next_bb != EXIT_BLOCK_PTR)
3356 bb->aux = bb->next_bb;
3357 free_dominance_info (CDI_DOMINATORS);
3358 cfg_layout_finalize ();
3359 #endif /* INSN_SCHEDULING */
3360 return 0;
3363 struct rtl_opt_pass pass_sms =
3366 RTL_PASS,
3367 "sms", /* name */
3368 gate_handle_sms, /* gate */
3369 rest_of_handle_sms, /* execute */
3370 NULL, /* sub */
3371 NULL, /* next */
3372 0, /* static_pass_number */
3373 TV_SMS, /* tv_id */
3374 0, /* properties_required */
3375 0, /* properties_provided */
3376 0, /* properties_destroyed */
3377 0, /* todo_flags_start */
3378 TODO_df_finish
3379 | TODO_verify_flow
3380 | TODO_verify_rtl_sharing
3381 | TODO_ggc_collect /* todo_flags_finish */