gcc/
[official-gcc.git] / gcc / sched-deps.c
blobb62dc00a583a5fe83708d28f0ee00337f7643e9f
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "alias.h"
30 #include "symtab.h"
31 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
32 #include "tm_p.h"
33 #include "hard-reg-set.h"
34 #include "regs.h"
35 #include "function.h"
36 #include "flags.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
39 #include "except.h"
40 #include "recog.h"
41 #include "emit-rtl.h"
42 #include "dominance.h"
43 #include "cfg.h"
44 #include "cfgbuild.h"
45 #include "predict.h"
46 #include "basic-block.h"
47 #include "sched-int.h"
48 #include "params.h"
49 #include "alloc-pool.h"
50 #include "cselib.h"
51 #include "ira.h"
52 #include "target.h"
54 #ifdef INSN_SCHEDULING
56 #ifdef ENABLE_CHECKING
57 #define CHECK (true)
58 #else
59 #define CHECK (false)
60 #endif
62 /* Holds current parameters for the dependency analyzer. */
63 struct sched_deps_info_def *sched_deps_info;
65 /* The data is specific to the Haifa scheduler. */
66 vec<haifa_deps_insn_data_def>
67 h_d_i_d = vNULL;
69 /* Return the major type present in the DS. */
70 enum reg_note
71 ds_to_dk (ds_t ds)
73 if (ds & DEP_TRUE)
74 return REG_DEP_TRUE;
76 if (ds & DEP_OUTPUT)
77 return REG_DEP_OUTPUT;
79 if (ds & DEP_CONTROL)
80 return REG_DEP_CONTROL;
82 gcc_assert (ds & DEP_ANTI);
84 return REG_DEP_ANTI;
87 /* Return equivalent dep_status. */
88 ds_t
89 dk_to_ds (enum reg_note dk)
91 switch (dk)
93 case REG_DEP_TRUE:
94 return DEP_TRUE;
96 case REG_DEP_OUTPUT:
97 return DEP_OUTPUT;
99 case REG_DEP_CONTROL:
100 return DEP_CONTROL;
102 default:
103 gcc_assert (dk == REG_DEP_ANTI);
104 return DEP_ANTI;
108 /* Functions to operate with dependence information container - dep_t. */
110 /* Init DEP with the arguments. */
111 void
112 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
114 DEP_PRO (dep) = pro;
115 DEP_CON (dep) = con;
116 DEP_TYPE (dep) = type;
117 DEP_STATUS (dep) = ds;
118 DEP_COST (dep) = UNKNOWN_DEP_COST;
119 DEP_NONREG (dep) = 0;
120 DEP_MULTIPLE (dep) = 0;
121 DEP_REPLACE (dep) = NULL;
124 /* Init DEP with the arguments.
125 While most of the scheduler (including targets) only need the major type
126 of the dependency, it is convenient to hide full dep_status from them. */
127 void
128 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
130 ds_t ds;
132 if ((current_sched_info->flags & USE_DEPS_LIST))
133 ds = dk_to_ds (kind);
134 else
135 ds = 0;
137 init_dep_1 (dep, pro, con, kind, ds);
140 /* Make a copy of FROM in TO. */
141 static void
142 copy_dep (dep_t to, dep_t from)
144 memcpy (to, from, sizeof (*to));
147 static void dump_ds (FILE *, ds_t);
149 /* Define flags for dump_dep (). */
151 /* Dump producer of the dependence. */
152 #define DUMP_DEP_PRO (2)
154 /* Dump consumer of the dependence. */
155 #define DUMP_DEP_CON (4)
157 /* Dump type of the dependence. */
158 #define DUMP_DEP_TYPE (8)
160 /* Dump status of the dependence. */
161 #define DUMP_DEP_STATUS (16)
163 /* Dump all information about the dependence. */
164 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
165 |DUMP_DEP_STATUS)
167 /* Dump DEP to DUMP.
168 FLAGS is a bit mask specifying what information about DEP needs
169 to be printed.
170 If FLAGS has the very first bit set, then dump all information about DEP
171 and propagate this bit into the callee dump functions. */
172 static void
173 dump_dep (FILE *dump, dep_t dep, int flags)
175 if (flags & 1)
176 flags |= DUMP_DEP_ALL;
178 fprintf (dump, "<");
180 if (flags & DUMP_DEP_PRO)
181 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
183 if (flags & DUMP_DEP_CON)
184 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
186 if (flags & DUMP_DEP_TYPE)
188 char t;
189 enum reg_note type = DEP_TYPE (dep);
191 switch (type)
193 case REG_DEP_TRUE:
194 t = 't';
195 break;
197 case REG_DEP_OUTPUT:
198 t = 'o';
199 break;
201 case REG_DEP_CONTROL:
202 t = 'c';
203 break;
205 case REG_DEP_ANTI:
206 t = 'a';
207 break;
209 default:
210 gcc_unreachable ();
211 break;
214 fprintf (dump, "%c; ", t);
217 if (flags & DUMP_DEP_STATUS)
219 if (current_sched_info->flags & USE_DEPS_LIST)
220 dump_ds (dump, DEP_STATUS (dep));
223 fprintf (dump, ">");
226 /* Default flags for dump_dep (). */
227 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
229 /* Dump all fields of DEP to STDERR. */
230 void
231 sd_debug_dep (dep_t dep)
233 dump_dep (stderr, dep, 1);
234 fprintf (stderr, "\n");
237 /* Determine whether DEP is a dependency link of a non-debug insn on a
238 debug insn. */
240 static inline bool
241 depl_on_debug_p (dep_link_t dep)
243 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
244 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
247 /* Functions to operate with a single link from the dependencies lists -
248 dep_link_t. */
250 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
251 PREV_NEXT_P. */
252 static void
253 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
255 dep_link_t next = *prev_nextp;
257 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
258 && DEP_LINK_NEXT (l) == NULL);
260 /* Init node being inserted. */
261 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
262 DEP_LINK_NEXT (l) = next;
264 /* Fix next node. */
265 if (next != NULL)
267 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
269 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
272 /* Fix prev node. */
273 *prev_nextp = l;
276 /* Add dep_link LINK to deps_list L. */
277 static void
278 add_to_deps_list (dep_link_t link, deps_list_t l)
280 attach_dep_link (link, &DEPS_LIST_FIRST (l));
282 /* Don't count debug deps. */
283 if (!depl_on_debug_p (link))
284 ++DEPS_LIST_N_LINKS (l);
287 /* Detach dep_link L from the list. */
288 static void
289 detach_dep_link (dep_link_t l)
291 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
292 dep_link_t next = DEP_LINK_NEXT (l);
294 *prev_nextp = next;
296 if (next != NULL)
297 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
299 DEP_LINK_PREV_NEXTP (l) = NULL;
300 DEP_LINK_NEXT (l) = NULL;
303 /* Remove link LINK from list LIST. */
304 static void
305 remove_from_deps_list (dep_link_t link, deps_list_t list)
307 detach_dep_link (link);
309 /* Don't count debug deps. */
310 if (!depl_on_debug_p (link))
311 --DEPS_LIST_N_LINKS (list);
314 /* Move link LINK from list FROM to list TO. */
315 static void
316 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
318 remove_from_deps_list (link, from);
319 add_to_deps_list (link, to);
322 /* Return true of LINK is not attached to any list. */
323 static bool
324 dep_link_is_detached_p (dep_link_t link)
326 return DEP_LINK_PREV_NEXTP (link) == NULL;
329 /* Pool to hold all dependency nodes (dep_node_t). */
330 static pool_allocator<_dep_node> *dn_pool;
332 /* Number of dep_nodes out there. */
333 static int dn_pool_diff = 0;
335 /* Create a dep_node. */
336 static dep_node_t
337 create_dep_node (void)
339 dep_node_t n = dn_pool->allocate ();
340 dep_link_t back = DEP_NODE_BACK (n);
341 dep_link_t forw = DEP_NODE_FORW (n);
343 DEP_LINK_NODE (back) = n;
344 DEP_LINK_NEXT (back) = NULL;
345 DEP_LINK_PREV_NEXTP (back) = NULL;
347 DEP_LINK_NODE (forw) = n;
348 DEP_LINK_NEXT (forw) = NULL;
349 DEP_LINK_PREV_NEXTP (forw) = NULL;
351 ++dn_pool_diff;
353 return n;
356 /* Delete dep_node N. N must not be connected to any deps_list. */
357 static void
358 delete_dep_node (dep_node_t n)
360 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
361 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
363 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
365 --dn_pool_diff;
367 dn_pool->remove (n);
370 /* Pool to hold dependencies lists (deps_list_t). */
371 static pool_allocator<_deps_list> *dl_pool;
373 /* Number of deps_lists out there. */
374 static int dl_pool_diff = 0;
376 /* Functions to operate with dependences lists - deps_list_t. */
378 /* Return true if list L is empty. */
379 static bool
380 deps_list_empty_p (deps_list_t l)
382 return DEPS_LIST_N_LINKS (l) == 0;
385 /* Create a new deps_list. */
386 static deps_list_t
387 create_deps_list (void)
389 deps_list_t l = dl_pool->allocate ();
391 DEPS_LIST_FIRST (l) = NULL;
392 DEPS_LIST_N_LINKS (l) = 0;
394 ++dl_pool_diff;
395 return l;
398 /* Free deps_list L. */
399 static void
400 free_deps_list (deps_list_t l)
402 gcc_assert (deps_list_empty_p (l));
404 --dl_pool_diff;
406 dl_pool->remove (l);
409 /* Return true if there is no dep_nodes and deps_lists out there.
410 After the region is scheduled all the dependency nodes and lists
411 should [generally] be returned to pool. */
412 bool
413 deps_pools_are_empty_p (void)
415 return dn_pool_diff == 0 && dl_pool_diff == 0;
418 /* Remove all elements from L. */
419 static void
420 clear_deps_list (deps_list_t l)
424 dep_link_t link = DEPS_LIST_FIRST (l);
426 if (link == NULL)
427 break;
429 remove_from_deps_list (link, l);
431 while (1);
434 /* Decide whether a dependency should be treated as a hard or a speculative
435 dependency. */
436 static bool
437 dep_spec_p (dep_t dep)
439 if (current_sched_info->flags & DO_SPECULATION)
441 if (DEP_STATUS (dep) & SPECULATIVE)
442 return true;
444 if (current_sched_info->flags & DO_PREDICATION)
446 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
447 return true;
449 if (DEP_REPLACE (dep) != NULL)
450 return true;
451 return false;
454 static regset reg_pending_sets;
455 static regset reg_pending_clobbers;
456 static regset reg_pending_uses;
457 static regset reg_pending_control_uses;
458 static enum reg_pending_barrier_mode reg_pending_barrier;
460 /* Hard registers implicitly clobbered or used (or may be implicitly
461 clobbered or used) by the currently analyzed insn. For example,
462 insn in its constraint has one register class. Even if there is
463 currently no hard register in the insn, the particular hard
464 register will be in the insn after reload pass because the
465 constraint requires it. */
466 static HARD_REG_SET implicit_reg_pending_clobbers;
467 static HARD_REG_SET implicit_reg_pending_uses;
469 /* To speed up the test for duplicate dependency links we keep a
470 record of dependencies created by add_dependence when the average
471 number of instructions in a basic block is very large.
473 Studies have shown that there is typically around 5 instructions between
474 branches for typical C code. So we can make a guess that the average
475 basic block is approximately 5 instructions long; we will choose 100X
476 the average size as a very large basic block.
478 Each insn has associated bitmaps for its dependencies. Each bitmap
479 has enough entries to represent a dependency on any other insn in
480 the insn chain. All bitmap for true dependencies cache is
481 allocated then the rest two ones are also allocated. */
482 static bitmap_head *true_dependency_cache = NULL;
483 static bitmap_head *output_dependency_cache = NULL;
484 static bitmap_head *anti_dependency_cache = NULL;
485 static bitmap_head *control_dependency_cache = NULL;
486 static bitmap_head *spec_dependency_cache = NULL;
487 static int cache_size;
489 /* True if we should mark added dependencies as a non-register deps. */
490 static bool mark_as_hard;
492 static int deps_may_trap_p (const_rtx);
493 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
494 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
495 enum reg_note, bool);
496 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
497 rtx_insn_list **, int, enum reg_note,
498 bool);
499 static void delete_all_dependences (rtx_insn *);
500 static void chain_to_prev_insn (rtx_insn *);
502 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
503 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
504 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
505 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
507 static bool sched_has_condition_p (const rtx_insn *);
508 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
510 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
511 rtx, rtx);
512 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
514 #ifdef ENABLE_CHECKING
515 static void check_dep (dep_t, bool);
516 #endif
518 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
520 static int
521 deps_may_trap_p (const_rtx mem)
523 const_rtx addr = XEXP (mem, 0);
525 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
527 const_rtx t = get_reg_known_value (REGNO (addr));
528 if (t)
529 addr = t;
531 return rtx_addr_can_trap_p (addr);
535 /* Find the condition under which INSN is executed. If REV is not NULL,
536 it is set to TRUE when the returned comparison should be reversed
537 to get the actual condition. */
538 static rtx
539 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
541 rtx pat = PATTERN (insn);
542 rtx src;
544 if (rev)
545 *rev = false;
547 if (GET_CODE (pat) == COND_EXEC)
548 return COND_EXEC_TEST (pat);
550 if (!any_condjump_p (insn) || !onlyjump_p (insn))
551 return 0;
553 src = SET_SRC (pc_set (insn));
555 if (XEXP (src, 2) == pc_rtx)
556 return XEXP (src, 0);
557 else if (XEXP (src, 1) == pc_rtx)
559 rtx cond = XEXP (src, 0);
560 enum rtx_code revcode = reversed_comparison_code (cond, insn);
562 if (revcode == UNKNOWN)
563 return 0;
565 if (rev)
566 *rev = true;
567 return cond;
570 return 0;
573 /* Return the condition under which INSN does not execute (i.e. the
574 not-taken condition for a conditional branch), or NULL if we cannot
575 find such a condition. The caller should make a copy of the condition
576 before using it. */
578 sched_get_reverse_condition_uncached (const rtx_insn *insn)
580 bool rev;
581 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
582 if (cond == NULL_RTX)
583 return cond;
584 if (!rev)
586 enum rtx_code revcode = reversed_comparison_code (cond, insn);
587 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
588 XEXP (cond, 0),
589 XEXP (cond, 1));
591 return cond;
594 /* Caching variant of sched_get_condition_with_rev_uncached.
595 We only do actual work the first time we come here for an insn; the
596 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
597 static rtx
598 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
600 bool tmp;
602 if (INSN_LUID (insn) == 0)
603 return sched_get_condition_with_rev_uncached (insn, rev);
605 if (INSN_CACHED_COND (insn) == const_true_rtx)
606 return NULL_RTX;
608 if (INSN_CACHED_COND (insn) != NULL_RTX)
610 if (rev)
611 *rev = INSN_REVERSE_COND (insn);
612 return INSN_CACHED_COND (insn);
615 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
616 INSN_REVERSE_COND (insn) = tmp;
618 if (INSN_CACHED_COND (insn) == NULL_RTX)
620 INSN_CACHED_COND (insn) = const_true_rtx;
621 return NULL_RTX;
624 if (rev)
625 *rev = INSN_REVERSE_COND (insn);
626 return INSN_CACHED_COND (insn);
629 /* True when we can find a condition under which INSN is executed. */
630 static bool
631 sched_has_condition_p (const rtx_insn *insn)
633 return !! sched_get_condition_with_rev (insn, NULL);
638 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
639 static int
640 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
642 if (COMPARISON_P (cond1)
643 && COMPARISON_P (cond2)
644 && GET_CODE (cond1) ==
645 (rev1==rev2
646 ? reversed_comparison_code (cond2, NULL)
647 : GET_CODE (cond2))
648 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
649 && XEXP (cond1, 1) == XEXP (cond2, 1))
650 return 1;
651 return 0;
654 /* Return true if insn1 and insn2 can never depend on one another because
655 the conditions under which they are executed are mutually exclusive. */
656 bool
657 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
659 rtx cond1, cond2;
660 bool rev1 = false, rev2 = false;
662 /* df doesn't handle conditional lifetimes entirely correctly;
663 calls mess up the conditional lifetimes. */
664 if (!CALL_P (insn1) && !CALL_P (insn2))
666 cond1 = sched_get_condition_with_rev (insn1, &rev1);
667 cond2 = sched_get_condition_with_rev (insn2, &rev2);
668 if (cond1 && cond2
669 && conditions_mutex_p (cond1, cond2, rev1, rev2)
670 /* Make sure first instruction doesn't affect condition of second
671 instruction if switched. */
672 && !modified_in_p (cond1, insn2)
673 /* Make sure second instruction doesn't affect condition of first
674 instruction if switched. */
675 && !modified_in_p (cond2, insn1))
676 return true;
678 return false;
682 /* Return true if INSN can potentially be speculated with type DS. */
683 bool
684 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
686 if (HAS_INTERNAL_DEP (insn))
687 return false;
689 if (!NONJUMP_INSN_P (insn))
690 return false;
692 if (SCHED_GROUP_P (insn))
693 return false;
695 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
696 return false;
698 if (side_effects_p (PATTERN (insn)))
699 return false;
701 if (ds & BE_IN_SPEC)
702 /* The following instructions, which depend on a speculatively scheduled
703 instruction, cannot be speculatively scheduled along. */
705 if (may_trap_or_fault_p (PATTERN (insn)))
706 /* If instruction might fault, it cannot be speculatively scheduled.
707 For control speculation it's obvious why and for data speculation
708 it's because the insn might get wrong input if speculation
709 wasn't successful. */
710 return false;
712 if ((ds & BE_IN_DATA)
713 && sched_has_condition_p (insn))
714 /* If this is a predicated instruction, then it cannot be
715 speculatively scheduled. See PR35659. */
716 return false;
719 return true;
722 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
723 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
724 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
725 This function is used to switch sd_iterator to the next list.
726 !!! For internal use only. Might consider moving it to sched-int.h. */
727 void
728 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
729 deps_list_t *list_ptr, bool *resolved_p_ptr)
731 sd_list_types_def types = *types_ptr;
733 if (types & SD_LIST_HARD_BACK)
735 *list_ptr = INSN_HARD_BACK_DEPS (insn);
736 *resolved_p_ptr = false;
737 *types_ptr = types & ~SD_LIST_HARD_BACK;
739 else if (types & SD_LIST_SPEC_BACK)
741 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
742 *resolved_p_ptr = false;
743 *types_ptr = types & ~SD_LIST_SPEC_BACK;
745 else if (types & SD_LIST_FORW)
747 *list_ptr = INSN_FORW_DEPS (insn);
748 *resolved_p_ptr = false;
749 *types_ptr = types & ~SD_LIST_FORW;
751 else if (types & SD_LIST_RES_BACK)
753 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
754 *resolved_p_ptr = true;
755 *types_ptr = types & ~SD_LIST_RES_BACK;
757 else if (types & SD_LIST_RES_FORW)
759 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
760 *resolved_p_ptr = true;
761 *types_ptr = types & ~SD_LIST_RES_FORW;
763 else
765 *list_ptr = NULL;
766 *resolved_p_ptr = false;
767 *types_ptr = SD_LIST_NONE;
771 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
773 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
775 int size = 0;
777 while (list_types != SD_LIST_NONE)
779 deps_list_t list;
780 bool resolved_p;
782 sd_next_list (insn, &list_types, &list, &resolved_p);
783 if (list)
784 size += DEPS_LIST_N_LINKS (list);
787 return size;
790 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
792 bool
793 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
795 while (list_types != SD_LIST_NONE)
797 deps_list_t list;
798 bool resolved_p;
800 sd_next_list (insn, &list_types, &list, &resolved_p);
801 if (!deps_list_empty_p (list))
802 return false;
805 return true;
808 /* Initialize data for INSN. */
809 void
810 sd_init_insn (rtx_insn *insn)
812 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
813 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
814 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
815 INSN_FORW_DEPS (insn) = create_deps_list ();
816 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
818 /* ??? It would be nice to allocate dependency caches here. */
821 /* Free data for INSN. */
822 void
823 sd_finish_insn (rtx_insn *insn)
825 /* ??? It would be nice to deallocate dependency caches here. */
827 free_deps_list (INSN_HARD_BACK_DEPS (insn));
828 INSN_HARD_BACK_DEPS (insn) = NULL;
830 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
831 INSN_SPEC_BACK_DEPS (insn) = NULL;
833 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
834 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
836 free_deps_list (INSN_FORW_DEPS (insn));
837 INSN_FORW_DEPS (insn) = NULL;
839 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
840 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
843 /* Find a dependency between producer PRO and consumer CON.
844 Search through resolved dependency lists if RESOLVED_P is true.
845 If no such dependency is found return NULL,
846 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
847 with an iterator pointing to it. */
848 static dep_t
849 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
850 sd_iterator_def *sd_it_ptr)
852 sd_list_types_def pro_list_type;
853 sd_list_types_def con_list_type;
854 sd_iterator_def sd_it;
855 dep_t dep;
856 bool found_p = false;
858 if (resolved_p)
860 pro_list_type = SD_LIST_RES_FORW;
861 con_list_type = SD_LIST_RES_BACK;
863 else
865 pro_list_type = SD_LIST_FORW;
866 con_list_type = SD_LIST_BACK;
869 /* Walk through either back list of INSN or forw list of ELEM
870 depending on which one is shorter. */
871 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
873 /* Find the dep_link with producer PRO in consumer's back_deps. */
874 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
875 if (DEP_PRO (dep) == pro)
877 found_p = true;
878 break;
881 else
883 /* Find the dep_link with consumer CON in producer's forw_deps. */
884 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
885 if (DEP_CON (dep) == con)
887 found_p = true;
888 break;
892 if (found_p)
894 if (sd_it_ptr != NULL)
895 *sd_it_ptr = sd_it;
897 return dep;
900 return NULL;
903 /* Find a dependency between producer PRO and consumer CON.
904 Use dependency [if available] to check if dependency is present at all.
905 Search through resolved dependency lists if RESOLVED_P is true.
906 If the dependency or NULL if none found. */
907 dep_t
908 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
910 if (true_dependency_cache != NULL)
911 /* Avoiding the list walk below can cut compile times dramatically
912 for some code. */
914 int elem_luid = INSN_LUID (pro);
915 int insn_luid = INSN_LUID (con);
917 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
918 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
919 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
920 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
921 return NULL;
924 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
927 /* Add or update a dependence described by DEP.
928 MEM1 and MEM2, if non-null, correspond to memory locations in case of
929 data speculation.
931 The function returns a value indicating if an old entry has been changed
932 or a new entry has been added to insn's backward deps.
934 This function merely checks if producer and consumer is the same insn
935 and doesn't create a dep in this case. Actual manipulation of
936 dependence data structures is performed in add_or_update_dep_1. */
937 static enum DEPS_ADJUST_RESULT
938 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
940 rtx_insn *elem = DEP_PRO (dep);
941 rtx_insn *insn = DEP_CON (dep);
943 gcc_assert (INSN_P (insn) && INSN_P (elem));
945 /* Don't depend an insn on itself. */
946 if (insn == elem)
948 if (sched_deps_info->generate_spec_deps)
949 /* INSN has an internal dependence, which we can't overcome. */
950 HAS_INTERNAL_DEP (insn) = 1;
952 return DEP_NODEP;
955 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
958 /* Ask dependency caches what needs to be done for dependence DEP.
959 Return DEP_CREATED if new dependence should be created and there is no
960 need to try to find one searching the dependencies lists.
961 Return DEP_PRESENT if there already is a dependence described by DEP and
962 hence nothing is to be done.
963 Return DEP_CHANGED if there already is a dependence, but it should be
964 updated to incorporate additional information from DEP. */
965 static enum DEPS_ADJUST_RESULT
966 ask_dependency_caches (dep_t dep)
968 int elem_luid = INSN_LUID (DEP_PRO (dep));
969 int insn_luid = INSN_LUID (DEP_CON (dep));
971 gcc_assert (true_dependency_cache != NULL
972 && output_dependency_cache != NULL
973 && anti_dependency_cache != NULL
974 && control_dependency_cache != NULL);
976 if (!(current_sched_info->flags & USE_DEPS_LIST))
978 enum reg_note present_dep_type;
980 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
981 present_dep_type = REG_DEP_TRUE;
982 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
983 present_dep_type = REG_DEP_OUTPUT;
984 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
985 present_dep_type = REG_DEP_ANTI;
986 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
987 present_dep_type = REG_DEP_CONTROL;
988 else
989 /* There is no existing dep so it should be created. */
990 return DEP_CREATED;
992 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
993 /* DEP does not add anything to the existing dependence. */
994 return DEP_PRESENT;
996 else
998 ds_t present_dep_types = 0;
1000 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1001 present_dep_types |= DEP_TRUE;
1002 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1003 present_dep_types |= DEP_OUTPUT;
1004 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1005 present_dep_types |= DEP_ANTI;
1006 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1007 present_dep_types |= DEP_CONTROL;
1009 if (present_dep_types == 0)
1010 /* There is no existing dep so it should be created. */
1011 return DEP_CREATED;
1013 if (!(current_sched_info->flags & DO_SPECULATION)
1014 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1016 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1017 == present_dep_types)
1018 /* DEP does not add anything to the existing dependence. */
1019 return DEP_PRESENT;
1021 else
1023 /* Only true dependencies can be data speculative and
1024 only anti dependencies can be control speculative. */
1025 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1026 == present_dep_types);
1028 /* if (DEP is SPECULATIVE) then
1029 ..we should update DEP_STATUS
1030 else
1031 ..we should reset existing dep to non-speculative. */
1035 return DEP_CHANGED;
1038 /* Set dependency caches according to DEP. */
1039 static void
1040 set_dependency_caches (dep_t dep)
1042 int elem_luid = INSN_LUID (DEP_PRO (dep));
1043 int insn_luid = INSN_LUID (DEP_CON (dep));
1045 if (!(current_sched_info->flags & USE_DEPS_LIST))
1047 switch (DEP_TYPE (dep))
1049 case REG_DEP_TRUE:
1050 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1051 break;
1053 case REG_DEP_OUTPUT:
1054 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1055 break;
1057 case REG_DEP_ANTI:
1058 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1059 break;
1061 case REG_DEP_CONTROL:
1062 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1063 break;
1065 default:
1066 gcc_unreachable ();
1069 else
1071 ds_t ds = DEP_STATUS (dep);
1073 if (ds & DEP_TRUE)
1074 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1075 if (ds & DEP_OUTPUT)
1076 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1077 if (ds & DEP_ANTI)
1078 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1079 if (ds & DEP_CONTROL)
1080 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1082 if (ds & SPECULATIVE)
1084 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1085 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1090 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1091 caches accordingly. */
1092 static void
1093 update_dependency_caches (dep_t dep, enum reg_note old_type)
1095 int elem_luid = INSN_LUID (DEP_PRO (dep));
1096 int insn_luid = INSN_LUID (DEP_CON (dep));
1098 /* Clear corresponding cache entry because type of the link
1099 may have changed. Keep them if we use_deps_list. */
1100 if (!(current_sched_info->flags & USE_DEPS_LIST))
1102 switch (old_type)
1104 case REG_DEP_OUTPUT:
1105 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1106 break;
1108 case REG_DEP_ANTI:
1109 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1110 break;
1112 case REG_DEP_CONTROL:
1113 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1114 break;
1116 default:
1117 gcc_unreachable ();
1121 set_dependency_caches (dep);
1124 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1125 static void
1126 change_spec_dep_to_hard (sd_iterator_def sd_it)
1128 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1129 dep_link_t link = DEP_NODE_BACK (node);
1130 dep_t dep = DEP_NODE_DEP (node);
1131 rtx_insn *elem = DEP_PRO (dep);
1132 rtx_insn *insn = DEP_CON (dep);
1134 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1136 DEP_STATUS (dep) &= ~SPECULATIVE;
1138 if (true_dependency_cache != NULL)
1139 /* Clear the cache entry. */
1140 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1141 INSN_LUID (elem));
1144 /* Update DEP to incorporate information from NEW_DEP.
1145 SD_IT points to DEP in case it should be moved to another list.
1146 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1147 data-speculative dependence should be updated. */
1148 static enum DEPS_ADJUST_RESULT
1149 update_dep (dep_t dep, dep_t new_dep,
1150 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1151 rtx mem1 ATTRIBUTE_UNUSED,
1152 rtx mem2 ATTRIBUTE_UNUSED)
1154 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1155 enum reg_note old_type = DEP_TYPE (dep);
1156 bool was_spec = dep_spec_p (dep);
1158 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1159 DEP_MULTIPLE (dep) = 1;
1161 /* If this is a more restrictive type of dependence than the
1162 existing one, then change the existing dependence to this
1163 type. */
1164 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1166 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1167 res = DEP_CHANGED;
1170 if (current_sched_info->flags & USE_DEPS_LIST)
1171 /* Update DEP_STATUS. */
1173 ds_t dep_status = DEP_STATUS (dep);
1174 ds_t ds = DEP_STATUS (new_dep);
1175 ds_t new_status = ds | dep_status;
1177 if (new_status & SPECULATIVE)
1179 /* Either existing dep or a dep we're adding or both are
1180 speculative. */
1181 if (!(ds & SPECULATIVE)
1182 || !(dep_status & SPECULATIVE))
1183 /* The new dep can't be speculative. */
1184 new_status &= ~SPECULATIVE;
1185 else
1187 /* Both are speculative. Merge probabilities. */
1188 if (mem1 != NULL)
1190 dw_t dw;
1192 dw = estimate_dep_weak (mem1, mem2);
1193 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1196 new_status = ds_merge (dep_status, ds);
1200 ds = new_status;
1202 if (dep_status != ds)
1204 DEP_STATUS (dep) = ds;
1205 res = DEP_CHANGED;
1209 if (was_spec && !dep_spec_p (dep))
1210 /* The old dep was speculative, but now it isn't. */
1211 change_spec_dep_to_hard (sd_it);
1213 if (true_dependency_cache != NULL
1214 && res == DEP_CHANGED)
1215 update_dependency_caches (dep, old_type);
1217 return res;
1220 /* Add or update a dependence described by DEP.
1221 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1222 data speculation.
1224 The function returns a value indicating if an old entry has been changed
1225 or a new entry has been added to insn's backward deps or nothing has
1226 been updated at all. */
1227 static enum DEPS_ADJUST_RESULT
1228 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1229 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1231 bool maybe_present_p = true;
1232 bool present_p = false;
1234 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1235 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1237 #ifdef ENABLE_CHECKING
1238 check_dep (new_dep, mem1 != NULL);
1239 #endif
1241 if (true_dependency_cache != NULL)
1243 switch (ask_dependency_caches (new_dep))
1245 case DEP_PRESENT:
1246 dep_t present_dep;
1247 sd_iterator_def sd_it;
1249 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1250 DEP_CON (new_dep),
1251 resolved_p, &sd_it);
1252 DEP_MULTIPLE (present_dep) = 1;
1253 return DEP_PRESENT;
1255 case DEP_CHANGED:
1256 maybe_present_p = true;
1257 present_p = true;
1258 break;
1260 case DEP_CREATED:
1261 maybe_present_p = false;
1262 present_p = false;
1263 break;
1265 default:
1266 gcc_unreachable ();
1267 break;
1271 /* Check that we don't already have this dependence. */
1272 if (maybe_present_p)
1274 dep_t present_dep;
1275 sd_iterator_def sd_it;
1277 gcc_assert (true_dependency_cache == NULL || present_p);
1279 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1280 DEP_CON (new_dep),
1281 resolved_p, &sd_it);
1283 if (present_dep != NULL)
1284 /* We found an existing dependency between ELEM and INSN. */
1285 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1286 else
1287 /* We didn't find a dep, it shouldn't present in the cache. */
1288 gcc_assert (!present_p);
1291 /* Might want to check one level of transitivity to save conses.
1292 This check should be done in maybe_add_or_update_dep_1.
1293 Since we made it to add_or_update_dep_1, we must create
1294 (or update) a link. */
1296 if (mem1 != NULL_RTX)
1298 gcc_assert (sched_deps_info->generate_spec_deps);
1299 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1300 estimate_dep_weak (mem1, mem2));
1303 sd_add_dep (new_dep, resolved_p);
1305 return DEP_CREATED;
1308 /* Initialize BACK_LIST_PTR with consumer's backward list and
1309 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1310 initialize with lists that hold resolved deps. */
1311 static void
1312 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1313 deps_list_t *back_list_ptr,
1314 deps_list_t *forw_list_ptr)
1316 rtx_insn *con = DEP_CON (dep);
1318 if (!resolved_p)
1320 if (dep_spec_p (dep))
1321 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1322 else
1323 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1325 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1327 else
1329 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1330 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1334 /* Add dependence described by DEP.
1335 If RESOLVED_P is true treat the dependence as a resolved one. */
1336 void
1337 sd_add_dep (dep_t dep, bool resolved_p)
1339 dep_node_t n = create_dep_node ();
1340 deps_list_t con_back_deps;
1341 deps_list_t pro_forw_deps;
1342 rtx_insn *elem = DEP_PRO (dep);
1343 rtx_insn *insn = DEP_CON (dep);
1345 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1347 if ((current_sched_info->flags & DO_SPECULATION) == 0
1348 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1349 DEP_STATUS (dep) &= ~SPECULATIVE;
1351 copy_dep (DEP_NODE_DEP (n), dep);
1353 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1355 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1357 #ifdef ENABLE_CHECKING
1358 check_dep (dep, false);
1359 #endif
1361 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1363 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1364 in the bitmap caches of dependency information. */
1365 if (true_dependency_cache != NULL)
1366 set_dependency_caches (dep);
1369 /* Add or update backward dependence between INSN and ELEM
1370 with given type DEP_TYPE and dep_status DS.
1371 This function is a convenience wrapper. */
1372 enum DEPS_ADJUST_RESULT
1373 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1375 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1378 /* Resolved dependence pointed to by SD_IT.
1379 SD_IT will advance to the next element. */
1380 void
1381 sd_resolve_dep (sd_iterator_def sd_it)
1383 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1384 dep_t dep = DEP_NODE_DEP (node);
1385 rtx_insn *pro = DEP_PRO (dep);
1386 rtx_insn *con = DEP_CON (dep);
1388 if (dep_spec_p (dep))
1389 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1390 INSN_RESOLVED_BACK_DEPS (con));
1391 else
1392 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1393 INSN_RESOLVED_BACK_DEPS (con));
1395 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1396 INSN_RESOLVED_FORW_DEPS (pro));
1399 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1400 pointed to by SD_IT to unresolved state. */
1401 void
1402 sd_unresolve_dep (sd_iterator_def sd_it)
1404 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1405 dep_t dep = DEP_NODE_DEP (node);
1406 rtx_insn *pro = DEP_PRO (dep);
1407 rtx_insn *con = DEP_CON (dep);
1409 if (dep_spec_p (dep))
1410 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1411 INSN_SPEC_BACK_DEPS (con));
1412 else
1413 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1414 INSN_HARD_BACK_DEPS (con));
1416 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1417 INSN_FORW_DEPS (pro));
1420 /* Make TO depend on all the FROM's producers.
1421 If RESOLVED_P is true add dependencies to the resolved lists. */
1422 void
1423 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1425 sd_list_types_def list_type;
1426 sd_iterator_def sd_it;
1427 dep_t dep;
1429 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1431 FOR_EACH_DEP (from, list_type, sd_it, dep)
1433 dep_def _new_dep, *new_dep = &_new_dep;
1435 copy_dep (new_dep, dep);
1436 DEP_CON (new_dep) = to;
1437 sd_add_dep (new_dep, resolved_p);
1441 /* Remove a dependency referred to by SD_IT.
1442 SD_IT will point to the next dependence after removal. */
1443 void
1444 sd_delete_dep (sd_iterator_def sd_it)
1446 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1447 dep_t dep = DEP_NODE_DEP (n);
1448 rtx_insn *pro = DEP_PRO (dep);
1449 rtx_insn *con = DEP_CON (dep);
1450 deps_list_t con_back_deps;
1451 deps_list_t pro_forw_deps;
1453 if (true_dependency_cache != NULL)
1455 int elem_luid = INSN_LUID (pro);
1456 int insn_luid = INSN_LUID (con);
1458 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1459 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1460 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1461 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1463 if (current_sched_info->flags & DO_SPECULATION)
1464 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1467 get_back_and_forw_lists (dep, sd_it.resolved_p,
1468 &con_back_deps, &pro_forw_deps);
1470 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1471 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1473 delete_dep_node (n);
1476 /* Dump size of the lists. */
1477 #define DUMP_LISTS_SIZE (2)
1479 /* Dump dependencies of the lists. */
1480 #define DUMP_LISTS_DEPS (4)
1482 /* Dump all information about the lists. */
1483 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1485 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1486 FLAGS is a bit mask specifying what information about the lists needs
1487 to be printed.
1488 If FLAGS has the very first bit set, then dump all information about
1489 the lists and propagate this bit into the callee dump functions. */
1490 static void
1491 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1493 sd_iterator_def sd_it;
1494 dep_t dep;
1495 int all;
1497 all = (flags & 1);
1499 if (all)
1500 flags |= DUMP_LISTS_ALL;
1502 fprintf (dump, "[");
1504 if (flags & DUMP_LISTS_SIZE)
1505 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1507 if (flags & DUMP_LISTS_DEPS)
1509 FOR_EACH_DEP (insn, types, sd_it, dep)
1511 dump_dep (dump, dep, dump_dep_flags | all);
1512 fprintf (dump, " ");
1517 /* Dump all information about deps_lists of INSN specified by TYPES
1518 to STDERR. */
1519 void
1520 sd_debug_lists (rtx insn, sd_list_types_def types)
1522 dump_lists (stderr, insn, types, 1);
1523 fprintf (stderr, "\n");
1526 /* A wrapper around add_dependence_1, to add a dependence of CON on
1527 PRO, with type DEP_TYPE. This function implements special handling
1528 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1529 the type to REG_DEP_ANTI if we can determine that predication is
1530 impossible; otherwise we add additional true dependencies on the
1531 INSN_COND_DEPS list of the jump (which PRO must be). */
1532 void
1533 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1535 if (dep_type == REG_DEP_CONTROL
1536 && !(current_sched_info->flags & DO_PREDICATION))
1537 dep_type = REG_DEP_ANTI;
1539 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1540 so we must also make the insn dependent on the setter of the
1541 condition. */
1542 if (dep_type == REG_DEP_CONTROL)
1544 rtx_insn *real_pro = pro;
1545 rtx_insn *other = real_insn_for_shadow (real_pro);
1546 rtx cond;
1548 if (other != NULL_RTX)
1549 real_pro = other;
1550 cond = sched_get_reverse_condition_uncached (real_pro);
1551 /* Verify that the insn does not use a different value in
1552 the condition register than the one that was present at
1553 the jump. */
1554 if (cond == NULL_RTX)
1555 dep_type = REG_DEP_ANTI;
1556 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1558 HARD_REG_SET uses;
1559 CLEAR_HARD_REG_SET (uses);
1560 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1561 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1562 dep_type = REG_DEP_ANTI;
1564 if (dep_type == REG_DEP_CONTROL)
1566 if (sched_verbose >= 5)
1567 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1568 INSN_UID (real_pro));
1569 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1570 REG_DEP_TRUE, false);
1574 add_dependence_1 (con, pro, dep_type);
1577 /* A convenience wrapper to operate on an entire list. HARD should be
1578 true if DEP_NONREG should be set on newly created dependencies. */
1580 static void
1581 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1582 enum reg_note dep_type, bool hard)
1584 mark_as_hard = hard;
1585 for (; list; list = list->next ())
1587 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1588 add_dependence (insn, list->insn (), dep_type);
1590 mark_as_hard = false;
1593 /* Similar, but free *LISTP at the same time, when the context
1594 is not readonly. HARD should be true if DEP_NONREG should be set on
1595 newly created dependencies. */
1597 static void
1598 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1599 rtx_insn_list **listp,
1600 int uncond, enum reg_note dep_type, bool hard)
1602 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1604 /* We don't want to short-circuit dependencies involving debug
1605 insns, because they may cause actual dependencies to be
1606 disregarded. */
1607 if (deps->readonly || DEBUG_INSN_P (insn))
1608 return;
1610 free_INSN_LIST_list (listp);
1613 /* Remove all occurrences of INSN from LIST. Return the number of
1614 occurrences removed. */
1616 static int
1617 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1619 int removed = 0;
1621 while (*listp)
1623 if ((*listp)->insn () == insn)
1625 remove_free_INSN_LIST_node (listp);
1626 removed++;
1627 continue;
1630 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1633 return removed;
1636 /* Same as above, but process two lists at once. */
1637 static int
1638 remove_from_both_dependence_lists (rtx_insn *insn,
1639 rtx_insn_list **listp,
1640 rtx_expr_list **exprp)
1642 int removed = 0;
1644 while (*listp)
1646 if (XEXP (*listp, 0) == insn)
1648 remove_free_INSN_LIST_node (listp);
1649 remove_free_EXPR_LIST_node (exprp);
1650 removed++;
1651 continue;
1654 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1655 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1658 return removed;
1661 /* Clear all dependencies for an insn. */
1662 static void
1663 delete_all_dependences (rtx_insn *insn)
1665 sd_iterator_def sd_it;
1666 dep_t dep;
1668 /* The below cycle can be optimized to clear the caches and back_deps
1669 in one call but that would provoke duplication of code from
1670 delete_dep (). */
1672 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1673 sd_iterator_cond (&sd_it, &dep);)
1674 sd_delete_dep (sd_it);
1677 /* All insns in a scheduling group except the first should only have
1678 dependencies on the previous insn in the group. So we find the
1679 first instruction in the scheduling group by walking the dependence
1680 chains backwards. Then we add the dependencies for the group to
1681 the previous nonnote insn. */
1683 static void
1684 chain_to_prev_insn (rtx_insn *insn)
1686 sd_iterator_def sd_it;
1687 dep_t dep;
1688 rtx_insn *prev_nonnote;
1690 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1692 rtx_insn *i = insn;
1693 rtx_insn *pro = DEP_PRO (dep);
1697 i = prev_nonnote_insn (i);
1699 if (pro == i)
1700 goto next_link;
1701 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1703 if (! sched_insns_conditions_mutex_p (i, pro))
1704 add_dependence (i, pro, DEP_TYPE (dep));
1705 next_link:;
1708 delete_all_dependences (insn);
1710 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1711 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1712 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1713 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1716 /* Process an insn's memory dependencies. There are four kinds of
1717 dependencies:
1719 (0) read dependence: read follows read
1720 (1) true dependence: read follows write
1721 (2) output dependence: write follows write
1722 (3) anti dependence: write follows read
1724 We are careful to build only dependencies which actually exist, and
1725 use transitivity to avoid building too many links. */
1727 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1728 The MEM is a memory reference contained within INSN, which we are saving
1729 so that we can do memory aliasing on it. */
1731 static void
1732 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1733 rtx_insn *insn, rtx mem)
1735 rtx_insn_list **insn_list;
1736 rtx_insn_list *insn_node;
1737 rtx_expr_list **mem_list;
1738 rtx_expr_list *mem_node;
1740 gcc_assert (!deps->readonly);
1741 if (read_p)
1743 insn_list = &deps->pending_read_insns;
1744 mem_list = &deps->pending_read_mems;
1745 if (!DEBUG_INSN_P (insn))
1746 deps->pending_read_list_length++;
1748 else
1750 insn_list = &deps->pending_write_insns;
1751 mem_list = &deps->pending_write_mems;
1752 deps->pending_write_list_length++;
1755 insn_node = alloc_INSN_LIST (insn, *insn_list);
1756 *insn_list = insn_node;
1758 if (sched_deps_info->use_cselib)
1760 mem = shallow_copy_rtx (mem);
1761 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1762 GET_MODE (mem), insn);
1764 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1765 *mem_list = mem_node;
1768 /* Make a dependency between every memory reference on the pending lists
1769 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1770 dependencies for a read operation, similarly with FOR_WRITE. */
1772 static void
1773 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1774 int for_write)
1776 if (for_write)
1778 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1779 1, REG_DEP_ANTI, true);
1780 if (!deps->readonly)
1782 free_EXPR_LIST_list (&deps->pending_read_mems);
1783 deps->pending_read_list_length = 0;
1787 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1788 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1789 true);
1791 add_dependence_list_and_free (deps, insn,
1792 &deps->last_pending_memory_flush, 1,
1793 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1794 true);
1796 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1797 REG_DEP_ANTI, true);
1799 if (DEBUG_INSN_P (insn))
1801 if (for_write)
1802 free_INSN_LIST_list (&deps->pending_read_insns);
1803 free_INSN_LIST_list (&deps->pending_write_insns);
1804 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1805 free_INSN_LIST_list (&deps->pending_jump_insns);
1808 if (!deps->readonly)
1810 free_EXPR_LIST_list (&deps->pending_write_mems);
1811 deps->pending_write_list_length = 0;
1813 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1814 deps->pending_flush_length = 1;
1816 mark_as_hard = false;
1819 /* Instruction which dependencies we are analyzing. */
1820 static rtx_insn *cur_insn = NULL;
1822 /* Implement hooks for haifa scheduler. */
1824 static void
1825 haifa_start_insn (rtx_insn *insn)
1827 gcc_assert (insn && !cur_insn);
1829 cur_insn = insn;
1832 static void
1833 haifa_finish_insn (void)
1835 cur_insn = NULL;
1838 void
1839 haifa_note_reg_set (int regno)
1841 SET_REGNO_REG_SET (reg_pending_sets, regno);
1844 void
1845 haifa_note_reg_clobber (int regno)
1847 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1850 void
1851 haifa_note_reg_use (int regno)
1853 SET_REGNO_REG_SET (reg_pending_uses, regno);
1856 static void
1857 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1859 if (!(ds & SPECULATIVE))
1861 mem = NULL_RTX;
1862 pending_mem = NULL_RTX;
1864 else
1865 gcc_assert (ds & BEGIN_DATA);
1868 dep_def _dep, *dep = &_dep;
1870 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1871 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1872 DEP_NONREG (dep) = 1;
1873 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1878 static void
1879 haifa_note_dep (rtx_insn *elem, ds_t ds)
1881 dep_def _dep;
1882 dep_t dep = &_dep;
1884 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1885 if (mark_as_hard)
1886 DEP_NONREG (dep) = 1;
1887 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1890 static void
1891 note_reg_use (int r)
1893 if (sched_deps_info->note_reg_use)
1894 sched_deps_info->note_reg_use (r);
1897 static void
1898 note_reg_set (int r)
1900 if (sched_deps_info->note_reg_set)
1901 sched_deps_info->note_reg_set (r);
1904 static void
1905 note_reg_clobber (int r)
1907 if (sched_deps_info->note_reg_clobber)
1908 sched_deps_info->note_reg_clobber (r);
1911 static void
1912 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1914 if (sched_deps_info->note_mem_dep)
1915 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1918 static void
1919 note_dep (rtx_insn *e, ds_t ds)
1921 if (sched_deps_info->note_dep)
1922 sched_deps_info->note_dep (e, ds);
1925 /* Return corresponding to DS reg_note. */
1926 enum reg_note
1927 ds_to_dt (ds_t ds)
1929 if (ds & DEP_TRUE)
1930 return REG_DEP_TRUE;
1931 else if (ds & DEP_OUTPUT)
1932 return REG_DEP_OUTPUT;
1933 else if (ds & DEP_ANTI)
1934 return REG_DEP_ANTI;
1935 else
1937 gcc_assert (ds & DEP_CONTROL);
1938 return REG_DEP_CONTROL;
1944 /* Functions for computation of info needed for register pressure
1945 sensitive insn scheduling. */
1948 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1949 static struct reg_use_data *
1950 create_insn_reg_use (int regno, rtx_insn *insn)
1952 struct reg_use_data *use;
1954 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1955 use->regno = regno;
1956 use->insn = insn;
1957 use->next_insn_use = INSN_REG_USE_LIST (insn);
1958 INSN_REG_USE_LIST (insn) = use;
1959 return use;
1962 /* Allocate reg_set_data structure for REGNO and INSN. */
1963 static void
1964 create_insn_reg_set (int regno, rtx insn)
1966 struct reg_set_data *set;
1968 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1969 set->regno = regno;
1970 set->insn = insn;
1971 set->next_insn_set = INSN_REG_SET_LIST (insn);
1972 INSN_REG_SET_LIST (insn) = set;
1975 /* Set up insn register uses for INSN and dependency context DEPS. */
1976 static void
1977 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1979 unsigned i;
1980 reg_set_iterator rsi;
1981 struct reg_use_data *use, *use2, *next;
1982 struct deps_reg *reg_last;
1984 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1986 if (i < FIRST_PSEUDO_REGISTER
1987 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1988 continue;
1990 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1991 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1992 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1993 /* Ignore use which is not dying. */
1994 continue;
1996 use = create_insn_reg_use (i, insn);
1997 use->next_regno_use = use;
1998 reg_last = &deps->reg_last[i];
2000 /* Create the cycle list of uses. */
2001 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
2003 use2 = create_insn_reg_use (i, list->insn ());
2004 next = use->next_regno_use;
2005 use->next_regno_use = use2;
2006 use2->next_regno_use = next;
2011 /* Register pressure info for the currently processed insn. */
2012 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2014 /* Return TRUE if INSN has the use structure for REGNO. */
2015 static bool
2016 insn_use_p (rtx insn, int regno)
2018 struct reg_use_data *use;
2020 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2021 if (use->regno == regno)
2022 return true;
2023 return false;
2026 /* Update the register pressure info after birth of pseudo register REGNO
2027 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2028 the register is in clobber or unused after the insn. */
2029 static void
2030 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2032 int incr, new_incr;
2033 enum reg_class cl;
2035 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2036 cl = sched_regno_pressure_class[regno];
2037 if (cl != NO_REGS)
2039 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2040 if (clobber_p)
2042 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2043 reg_pressure_info[cl].clobber_increase = new_incr;
2045 else if (unused_p)
2047 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2048 reg_pressure_info[cl].unused_set_increase = new_incr;
2050 else
2052 new_incr = reg_pressure_info[cl].set_increase + incr;
2053 reg_pressure_info[cl].set_increase = new_incr;
2054 if (! insn_use_p (insn, regno))
2055 reg_pressure_info[cl].change += incr;
2056 create_insn_reg_set (regno, insn);
2058 gcc_assert (new_incr < (1 << INCREASE_BITS));
2062 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2063 hard registers involved in the birth. */
2064 static void
2065 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2066 bool clobber_p, bool unused_p)
2068 enum reg_class cl;
2069 int new_incr, last = regno + nregs;
2071 while (regno < last)
2073 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2074 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2076 cl = sched_regno_pressure_class[regno];
2077 if (cl != NO_REGS)
2079 if (clobber_p)
2081 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2082 reg_pressure_info[cl].clobber_increase = new_incr;
2084 else if (unused_p)
2086 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2087 reg_pressure_info[cl].unused_set_increase = new_incr;
2089 else
2091 new_incr = reg_pressure_info[cl].set_increase + 1;
2092 reg_pressure_info[cl].set_increase = new_incr;
2093 if (! insn_use_p (insn, regno))
2094 reg_pressure_info[cl].change += 1;
2095 create_insn_reg_set (regno, insn);
2097 gcc_assert (new_incr < (1 << INCREASE_BITS));
2100 regno++;
2104 /* Update the register pressure info after birth of pseudo or hard
2105 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2106 correspondingly that the register is in clobber or unused after the
2107 insn. */
2108 static void
2109 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2111 int regno;
2113 if (GET_CODE (reg) == SUBREG)
2114 reg = SUBREG_REG (reg);
2116 if (! REG_P (reg))
2117 return;
2119 regno = REGNO (reg);
2120 if (regno < FIRST_PSEUDO_REGISTER)
2121 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2122 clobber_p, unused_p);
2123 else
2124 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2127 /* Update the register pressure info after death of pseudo register
2128 REGNO. */
2129 static void
2130 mark_pseudo_death (int regno)
2132 int incr;
2133 enum reg_class cl;
2135 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2136 cl = sched_regno_pressure_class[regno];
2137 if (cl != NO_REGS)
2139 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2140 reg_pressure_info[cl].change -= incr;
2144 /* Like mark_pseudo_death except that NREGS saying how many hard
2145 registers involved in the death. */
2146 static void
2147 mark_hard_regno_death (int regno, int nregs)
2149 enum reg_class cl;
2150 int last = regno + nregs;
2152 while (regno < last)
2154 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2155 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2157 cl = sched_regno_pressure_class[regno];
2158 if (cl != NO_REGS)
2159 reg_pressure_info[cl].change -= 1;
2161 regno++;
2165 /* Update the register pressure info after death of pseudo or hard
2166 register REG. */
2167 static void
2168 mark_reg_death (rtx reg)
2170 int regno;
2172 if (GET_CODE (reg) == SUBREG)
2173 reg = SUBREG_REG (reg);
2175 if (! REG_P (reg))
2176 return;
2178 regno = REGNO (reg);
2179 if (regno < FIRST_PSEUDO_REGISTER)
2180 mark_hard_regno_death (regno, REG_NREGS (reg));
2181 else
2182 mark_pseudo_death (regno);
2185 /* Process SETTER of REG. DATA is an insn containing the setter. */
2186 static void
2187 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2189 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2190 return;
2191 mark_insn_reg_birth
2192 ((rtx) data, reg, false,
2193 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2196 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2197 static void
2198 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2200 if (GET_CODE (setter) == CLOBBER)
2201 mark_insn_reg_birth ((rtx) data, reg, true, false);
2204 /* Set up reg pressure info related to INSN. */
2205 void
2206 init_insn_reg_pressure_info (rtx_insn *insn)
2208 int i, len;
2209 enum reg_class cl;
2210 static struct reg_pressure_data *pressure_info;
2211 rtx link;
2213 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2215 if (! INSN_P (insn))
2216 return;
2218 for (i = 0; i < ira_pressure_classes_num; i++)
2220 cl = ira_pressure_classes[i];
2221 reg_pressure_info[cl].clobber_increase = 0;
2222 reg_pressure_info[cl].set_increase = 0;
2223 reg_pressure_info[cl].unused_set_increase = 0;
2224 reg_pressure_info[cl].change = 0;
2227 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2229 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2231 #ifdef AUTO_INC_DEC
2232 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2233 if (REG_NOTE_KIND (link) == REG_INC)
2234 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2235 #endif
2237 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2238 if (REG_NOTE_KIND (link) == REG_DEAD)
2239 mark_reg_death (XEXP (link, 0));
2241 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2242 pressure_info
2243 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2244 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2245 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2246 * sizeof (int), 1);
2247 for (i = 0; i < ira_pressure_classes_num; i++)
2249 cl = ira_pressure_classes[i];
2250 pressure_info[i].clobber_increase
2251 = reg_pressure_info[cl].clobber_increase;
2252 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2253 pressure_info[i].unused_set_increase
2254 = reg_pressure_info[cl].unused_set_increase;
2255 pressure_info[i].change = reg_pressure_info[cl].change;
2262 /* Internal variable for sched_analyze_[12] () functions.
2263 If it is nonzero, this means that sched_analyze_[12] looks
2264 at the most toplevel SET. */
2265 static bool can_start_lhs_rhs_p;
2267 /* Extend reg info for the deps context DEPS given that
2268 we have just generated a register numbered REGNO. */
2269 static void
2270 extend_deps_reg_info (struct deps_desc *deps, int regno)
2272 int max_regno = regno + 1;
2274 gcc_assert (!reload_completed);
2276 /* In a readonly context, it would not hurt to extend info,
2277 but it should not be needed. */
2278 if (reload_completed && deps->readonly)
2280 deps->max_reg = max_regno;
2281 return;
2284 if (max_regno > deps->max_reg)
2286 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2287 max_regno);
2288 memset (&deps->reg_last[deps->max_reg],
2289 0, (max_regno - deps->max_reg)
2290 * sizeof (struct deps_reg));
2291 deps->max_reg = max_regno;
2295 /* Extends REG_INFO_P if needed. */
2296 void
2297 maybe_extend_reg_info_p (void)
2299 /* Extend REG_INFO_P, if needed. */
2300 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2302 size_t new_reg_info_p_size = max_regno + 128;
2304 gcc_assert (!reload_completed && sel_sched_p ());
2306 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2307 new_reg_info_p_size,
2308 reg_info_p_size,
2309 sizeof (*reg_info_p));
2310 reg_info_p_size = new_reg_info_p_size;
2314 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2315 The type of the reference is specified by REF and can be SET,
2316 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2318 static void
2319 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2320 enum rtx_code ref, rtx_insn *insn)
2322 /* We could emit new pseudos in renaming. Extend the reg structures. */
2323 if (!reload_completed && sel_sched_p ()
2324 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2325 extend_deps_reg_info (deps, regno);
2327 maybe_extend_reg_info_p ();
2329 /* A hard reg in a wide mode may really be multiple registers.
2330 If so, mark all of them just like the first. */
2331 if (regno < FIRST_PSEUDO_REGISTER)
2333 int i = hard_regno_nregs[regno][mode];
2334 if (ref == SET)
2336 while (--i >= 0)
2337 note_reg_set (regno + i);
2339 else if (ref == USE)
2341 while (--i >= 0)
2342 note_reg_use (regno + i);
2344 else
2346 while (--i >= 0)
2347 note_reg_clobber (regno + i);
2351 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2352 it does not reload. Ignore these as they have served their
2353 purpose already. */
2354 else if (regno >= deps->max_reg)
2356 enum rtx_code code = GET_CODE (PATTERN (insn));
2357 gcc_assert (code == USE || code == CLOBBER);
2360 else
2362 if (ref == SET)
2363 note_reg_set (regno);
2364 else if (ref == USE)
2365 note_reg_use (regno);
2366 else
2367 note_reg_clobber (regno);
2369 /* Pseudos that are REG_EQUIV to something may be replaced
2370 by that during reloading. We need only add dependencies for
2371 the address in the REG_EQUIV note. */
2372 if (!reload_completed && get_reg_known_equiv_p (regno))
2374 rtx t = get_reg_known_value (regno);
2375 if (MEM_P (t))
2376 sched_analyze_2 (deps, XEXP (t, 0), insn);
2379 /* Don't let it cross a call after scheduling if it doesn't
2380 already cross one. */
2381 if (REG_N_CALLS_CROSSED (regno) == 0)
2383 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2384 deps->sched_before_next_call
2385 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2386 else
2387 add_dependence_list (insn, deps->last_function_call, 1,
2388 REG_DEP_ANTI, false);
2393 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2394 rtx, X, creating all dependencies generated by the write to the
2395 destination of X, and reads of everything mentioned. */
2397 static void
2398 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2400 rtx dest = XEXP (x, 0);
2401 enum rtx_code code = GET_CODE (x);
2402 bool cslr_p = can_start_lhs_rhs_p;
2404 can_start_lhs_rhs_p = false;
2406 gcc_assert (dest);
2407 if (dest == 0)
2408 return;
2410 if (cslr_p && sched_deps_info->start_lhs)
2411 sched_deps_info->start_lhs (dest);
2413 if (GET_CODE (dest) == PARALLEL)
2415 int i;
2417 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2418 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2419 sched_analyze_1 (deps,
2420 gen_rtx_CLOBBER (VOIDmode,
2421 XEXP (XVECEXP (dest, 0, i), 0)),
2422 insn);
2424 if (cslr_p && sched_deps_info->finish_lhs)
2425 sched_deps_info->finish_lhs ();
2427 if (code == SET)
2429 can_start_lhs_rhs_p = cslr_p;
2431 sched_analyze_2 (deps, SET_SRC (x), insn);
2433 can_start_lhs_rhs_p = false;
2436 return;
2439 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2440 || GET_CODE (dest) == ZERO_EXTRACT)
2442 if (GET_CODE (dest) == STRICT_LOW_PART
2443 || GET_CODE (dest) == ZERO_EXTRACT
2444 || df_read_modify_subreg_p (dest))
2446 /* These both read and modify the result. We must handle
2447 them as writes to get proper dependencies for following
2448 instructions. We must handle them as reads to get proper
2449 dependencies from this to previous instructions.
2450 Thus we need to call sched_analyze_2. */
2452 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2454 if (GET_CODE (dest) == ZERO_EXTRACT)
2456 /* The second and third arguments are values read by this insn. */
2457 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2458 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2460 dest = XEXP (dest, 0);
2463 if (REG_P (dest))
2465 int regno = REGNO (dest);
2466 machine_mode mode = GET_MODE (dest);
2468 sched_analyze_reg (deps, regno, mode, code, insn);
2470 #ifdef STACK_REGS
2471 /* Treat all writes to a stack register as modifying the TOS. */
2472 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2474 /* Avoid analyzing the same register twice. */
2475 if (regno != FIRST_STACK_REG)
2476 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2478 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2479 FIRST_STACK_REG);
2481 #endif
2483 else if (MEM_P (dest))
2485 /* Writing memory. */
2486 rtx t = dest;
2488 if (sched_deps_info->use_cselib)
2490 machine_mode address_mode = get_address_mode (dest);
2492 t = shallow_copy_rtx (dest);
2493 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2494 GET_MODE (t), insn);
2495 XEXP (t, 0)
2496 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2497 insn);
2499 t = canon_rtx (t);
2501 /* Pending lists can't get larger with a readonly context. */
2502 if (!deps->readonly
2503 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2504 >= MAX_PENDING_LIST_LENGTH))
2506 /* Flush all pending reads and writes to prevent the pending lists
2507 from getting any larger. Insn scheduling runs too slowly when
2508 these lists get long. When compiling GCC with itself,
2509 this flush occurs 8 times for sparc, and 10 times for m88k using
2510 the default value of 32. */
2511 flush_pending_lists (deps, insn, false, true);
2513 else
2515 rtx_insn_list *pending;
2516 rtx_expr_list *pending_mem;
2518 pending = deps->pending_read_insns;
2519 pending_mem = deps->pending_read_mems;
2520 while (pending)
2522 if (anti_dependence (pending_mem->element (), t)
2523 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2524 note_mem_dep (t, pending_mem->element (), pending->insn (),
2525 DEP_ANTI);
2527 pending = pending->next ();
2528 pending_mem = pending_mem->next ();
2531 pending = deps->pending_write_insns;
2532 pending_mem = deps->pending_write_mems;
2533 while (pending)
2535 if (output_dependence (pending_mem->element (), t)
2536 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2537 note_mem_dep (t, pending_mem->element (),
2538 pending->insn (),
2539 DEP_OUTPUT);
2541 pending = pending->next ();
2542 pending_mem = pending_mem-> next ();
2545 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2546 REG_DEP_ANTI, true);
2547 add_dependence_list (insn, deps->pending_jump_insns, 1,
2548 REG_DEP_CONTROL, true);
2550 if (!deps->readonly)
2551 add_insn_mem_dependence (deps, false, insn, dest);
2553 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2556 if (cslr_p && sched_deps_info->finish_lhs)
2557 sched_deps_info->finish_lhs ();
2559 /* Analyze reads. */
2560 if (GET_CODE (x) == SET)
2562 can_start_lhs_rhs_p = cslr_p;
2564 sched_analyze_2 (deps, SET_SRC (x), insn);
2566 can_start_lhs_rhs_p = false;
2570 /* Analyze the uses of memory and registers in rtx X in INSN. */
2571 static void
2572 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2574 int i;
2575 int j;
2576 enum rtx_code code;
2577 const char *fmt;
2578 bool cslr_p = can_start_lhs_rhs_p;
2580 can_start_lhs_rhs_p = false;
2582 gcc_assert (x);
2583 if (x == 0)
2584 return;
2586 if (cslr_p && sched_deps_info->start_rhs)
2587 sched_deps_info->start_rhs (x);
2589 code = GET_CODE (x);
2591 switch (code)
2593 CASE_CONST_ANY:
2594 case SYMBOL_REF:
2595 case CONST:
2596 case LABEL_REF:
2597 /* Ignore constants. */
2598 if (cslr_p && sched_deps_info->finish_rhs)
2599 sched_deps_info->finish_rhs ();
2601 return;
2603 case CC0:
2604 if (!HAVE_cc0)
2605 gcc_unreachable ();
2607 /* User of CC0 depends on immediately preceding insn. */
2608 SCHED_GROUP_P (insn) = 1;
2609 /* Don't move CC0 setter to another block (it can set up the
2610 same flag for previous CC0 users which is safe). */
2611 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2613 if (cslr_p && sched_deps_info->finish_rhs)
2614 sched_deps_info->finish_rhs ();
2616 return;
2618 case REG:
2620 int regno = REGNO (x);
2621 machine_mode mode = GET_MODE (x);
2623 sched_analyze_reg (deps, regno, mode, USE, insn);
2625 #ifdef STACK_REGS
2626 /* Treat all reads of a stack register as modifying the TOS. */
2627 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2629 /* Avoid analyzing the same register twice. */
2630 if (regno != FIRST_STACK_REG)
2631 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2632 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2634 #endif
2636 if (cslr_p && sched_deps_info->finish_rhs)
2637 sched_deps_info->finish_rhs ();
2639 return;
2642 case MEM:
2644 /* Reading memory. */
2645 rtx_insn_list *u;
2646 rtx_insn_list *pending;
2647 rtx_expr_list *pending_mem;
2648 rtx t = x;
2650 if (sched_deps_info->use_cselib)
2652 machine_mode address_mode = get_address_mode (t);
2654 t = shallow_copy_rtx (t);
2655 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2656 GET_MODE (t), insn);
2657 XEXP (t, 0)
2658 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2659 insn);
2662 if (!DEBUG_INSN_P (insn))
2664 t = canon_rtx (t);
2665 pending = deps->pending_read_insns;
2666 pending_mem = deps->pending_read_mems;
2667 while (pending)
2669 if (read_dependence (pending_mem->element (), t)
2670 && ! sched_insns_conditions_mutex_p (insn,
2671 pending->insn ()))
2672 note_mem_dep (t, pending_mem->element (),
2673 pending->insn (),
2674 DEP_ANTI);
2676 pending = pending->next ();
2677 pending_mem = pending_mem->next ();
2680 pending = deps->pending_write_insns;
2681 pending_mem = deps->pending_write_mems;
2682 while (pending)
2684 if (true_dependence (pending_mem->element (), VOIDmode, t)
2685 && ! sched_insns_conditions_mutex_p (insn,
2686 pending->insn ()))
2687 note_mem_dep (t, pending_mem->element (),
2688 pending->insn (),
2689 sched_deps_info->generate_spec_deps
2690 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2692 pending = pending->next ();
2693 pending_mem = pending_mem->next ();
2696 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2697 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2699 for (u = deps->pending_jump_insns; u; u = u->next ())
2700 if (deps_may_trap_p (x))
2702 if ((sched_deps_info->generate_spec_deps)
2703 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2705 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2706 MAX_DEP_WEAK);
2708 note_dep (u->insn (), ds);
2710 else
2711 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2715 /* Always add these dependencies to pending_reads, since
2716 this insn may be followed by a write. */
2717 if (!deps->readonly)
2719 if ((deps->pending_read_list_length
2720 + deps->pending_write_list_length)
2721 >= MAX_PENDING_LIST_LENGTH
2722 && !DEBUG_INSN_P (insn))
2723 flush_pending_lists (deps, insn, true, true);
2724 add_insn_mem_dependence (deps, true, insn, x);
2727 sched_analyze_2 (deps, XEXP (x, 0), insn);
2729 if (cslr_p && sched_deps_info->finish_rhs)
2730 sched_deps_info->finish_rhs ();
2732 return;
2735 /* Force pending stores to memory in case a trap handler needs them. */
2736 case TRAP_IF:
2737 flush_pending_lists (deps, insn, true, false);
2738 break;
2740 case PREFETCH:
2741 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2742 reg_pending_barrier = TRUE_BARRIER;
2743 /* Prefetch insn contains addresses only. So if the prefetch
2744 address has no registers, there will be no dependencies on
2745 the prefetch insn. This is wrong with result code
2746 correctness point of view as such prefetch can be moved below
2747 a jump insn which usually generates MOVE_BARRIER preventing
2748 to move insns containing registers or memories through the
2749 barrier. It is also wrong with generated code performance
2750 point of view as prefetch withouth dependecies will have a
2751 tendency to be issued later instead of earlier. It is hard
2752 to generate accurate dependencies for prefetch insns as
2753 prefetch has only the start address but it is better to have
2754 something than nothing. */
2755 if (!deps->readonly)
2757 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2758 if (sched_deps_info->use_cselib)
2759 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2760 add_insn_mem_dependence (deps, true, insn, x);
2762 break;
2764 case UNSPEC_VOLATILE:
2765 flush_pending_lists (deps, insn, true, true);
2766 /* FALLTHRU */
2768 case ASM_OPERANDS:
2769 case ASM_INPUT:
2771 /* Traditional and volatile asm instructions must be considered to use
2772 and clobber all hard registers, all pseudo-registers and all of
2773 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2775 Consider for instance a volatile asm that changes the fpu rounding
2776 mode. An insn should not be moved across this even if it only uses
2777 pseudo-regs because it might give an incorrectly rounded result. */
2778 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2779 && !DEBUG_INSN_P (insn))
2780 reg_pending_barrier = TRUE_BARRIER;
2782 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2783 We can not just fall through here since then we would be confused
2784 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2785 traditional asms unlike their normal usage. */
2787 if (code == ASM_OPERANDS)
2789 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2790 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2792 if (cslr_p && sched_deps_info->finish_rhs)
2793 sched_deps_info->finish_rhs ();
2795 return;
2797 break;
2800 case PRE_DEC:
2801 case POST_DEC:
2802 case PRE_INC:
2803 case POST_INC:
2804 /* These both read and modify the result. We must handle them as writes
2805 to get proper dependencies for following instructions. We must handle
2806 them as reads to get proper dependencies from this to previous
2807 instructions. Thus we need to pass them to both sched_analyze_1
2808 and sched_analyze_2. We must call sched_analyze_2 first in order
2809 to get the proper antecedent for the read. */
2810 sched_analyze_2 (deps, XEXP (x, 0), insn);
2811 sched_analyze_1 (deps, x, insn);
2813 if (cslr_p && sched_deps_info->finish_rhs)
2814 sched_deps_info->finish_rhs ();
2816 return;
2818 case POST_MODIFY:
2819 case PRE_MODIFY:
2820 /* op0 = op0 + op1 */
2821 sched_analyze_2 (deps, XEXP (x, 0), insn);
2822 sched_analyze_2 (deps, XEXP (x, 1), insn);
2823 sched_analyze_1 (deps, x, insn);
2825 if (cslr_p && sched_deps_info->finish_rhs)
2826 sched_deps_info->finish_rhs ();
2828 return;
2830 default:
2831 break;
2834 /* Other cases: walk the insn. */
2835 fmt = GET_RTX_FORMAT (code);
2836 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2838 if (fmt[i] == 'e')
2839 sched_analyze_2 (deps, XEXP (x, i), insn);
2840 else if (fmt[i] == 'E')
2841 for (j = 0; j < XVECLEN (x, i); j++)
2842 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2845 if (cslr_p && sched_deps_info->finish_rhs)
2846 sched_deps_info->finish_rhs ();
2849 /* Try to group two fusible insns together to prevent scheduler
2850 from scheduling them apart. */
2852 static void
2853 sched_macro_fuse_insns (rtx_insn *insn)
2855 rtx_insn *prev;
2857 if (any_condjump_p (insn))
2859 unsigned int condreg1, condreg2;
2860 rtx cc_reg_1;
2861 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2862 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2863 prev = prev_nonnote_nondebug_insn (insn);
2864 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2865 || !prev
2866 || !modified_in_p (cc_reg_1, prev))
2867 return;
2869 else
2871 rtx insn_set = single_set (insn);
2873 prev = prev_nonnote_nondebug_insn (insn);
2874 if (!prev
2875 || !insn_set
2876 || !single_set (prev))
2877 return;
2881 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2882 SCHED_GROUP_P (insn) = 1;
2886 /* Analyze an INSN with pattern X to find all dependencies. */
2887 static void
2888 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2890 RTX_CODE code = GET_CODE (x);
2891 rtx link;
2892 unsigned i;
2893 reg_set_iterator rsi;
2895 if (! reload_completed)
2897 HARD_REG_SET temp;
2899 extract_insn (insn);
2900 preprocess_constraints (insn);
2901 ira_implicitly_set_insn_hard_regs (&temp);
2902 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2903 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2906 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2907 && code == SET);
2909 /* Group compare and branch insns for macro-fusion. */
2910 if (targetm.sched.macro_fusion_p
2911 && targetm.sched.macro_fusion_p ())
2912 sched_macro_fuse_insns (insn);
2914 if (may_trap_p (x))
2915 /* Avoid moving trapping instructions across function calls that might
2916 not always return. */
2917 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2918 1, REG_DEP_ANTI, true);
2920 /* We must avoid creating a situation in which two successors of the
2921 current block have different unwind info after scheduling. If at any
2922 point the two paths re-join this leads to incorrect unwind info. */
2923 /* ??? There are certain situations involving a forced frame pointer in
2924 which, with extra effort, we could fix up the unwind info at a later
2925 CFG join. However, it seems better to notice these cases earlier
2926 during prologue generation and avoid marking the frame pointer setup
2927 as frame-related at all. */
2928 if (RTX_FRAME_RELATED_P (insn))
2930 /* Make sure prologue insn is scheduled before next jump. */
2931 deps->sched_before_next_jump
2932 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2934 /* Make sure epilogue insn is scheduled after preceding jumps. */
2935 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2936 true);
2939 if (code == COND_EXEC)
2941 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2943 /* ??? Should be recording conditions so we reduce the number of
2944 false dependencies. */
2945 x = COND_EXEC_CODE (x);
2946 code = GET_CODE (x);
2948 if (code == SET || code == CLOBBER)
2950 sched_analyze_1 (deps, x, insn);
2952 /* Bare clobber insns are used for letting life analysis, reg-stack
2953 and others know that a value is dead. Depend on the last call
2954 instruction so that reg-stack won't get confused. */
2955 if (code == CLOBBER)
2956 add_dependence_list (insn, deps->last_function_call, 1,
2957 REG_DEP_OUTPUT, true);
2959 else if (code == PARALLEL)
2961 for (i = XVECLEN (x, 0); i--;)
2963 rtx sub = XVECEXP (x, 0, i);
2964 code = GET_CODE (sub);
2966 if (code == COND_EXEC)
2968 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2969 sub = COND_EXEC_CODE (sub);
2970 code = GET_CODE (sub);
2972 if (code == SET || code == CLOBBER)
2973 sched_analyze_1 (deps, sub, insn);
2974 else
2975 sched_analyze_2 (deps, sub, insn);
2978 else
2979 sched_analyze_2 (deps, x, insn);
2981 /* Mark registers CLOBBERED or used by called function. */
2982 if (CALL_P (insn))
2984 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2986 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2987 sched_analyze_1 (deps, XEXP (link, 0), insn);
2988 else if (GET_CODE (XEXP (link, 0)) != SET)
2989 sched_analyze_2 (deps, XEXP (link, 0), insn);
2991 /* Don't schedule anything after a tail call, tail call needs
2992 to use at least all call-saved registers. */
2993 if (SIBLING_CALL_P (insn))
2994 reg_pending_barrier = TRUE_BARRIER;
2995 else if (find_reg_note (insn, REG_SETJMP, NULL))
2996 reg_pending_barrier = MOVE_BARRIER;
2999 if (JUMP_P (insn))
3001 rtx_insn *next = next_nonnote_nondebug_insn (insn);
3002 if (next && BARRIER_P (next))
3003 reg_pending_barrier = MOVE_BARRIER;
3004 else
3006 rtx_insn_list *pending;
3007 rtx_expr_list *pending_mem;
3009 if (sched_deps_info->compute_jump_reg_dependencies)
3011 (*sched_deps_info->compute_jump_reg_dependencies)
3012 (insn, reg_pending_control_uses);
3014 /* Make latency of jump equal to 0 by using anti-dependence. */
3015 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3017 struct deps_reg *reg_last = &deps->reg_last[i];
3018 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3019 false);
3020 add_dependence_list (insn, reg_last->implicit_sets,
3021 0, REG_DEP_ANTI, false);
3022 add_dependence_list (insn, reg_last->clobbers, 0,
3023 REG_DEP_ANTI, false);
3027 /* All memory writes and volatile reads must happen before the
3028 jump. Non-volatile reads must happen before the jump iff
3029 the result is needed by the above register used mask. */
3031 pending = deps->pending_write_insns;
3032 pending_mem = deps->pending_write_mems;
3033 while (pending)
3035 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3036 add_dependence (insn, pending->insn (),
3037 REG_DEP_OUTPUT);
3038 pending = pending->next ();
3039 pending_mem = pending_mem->next ();
3042 pending = deps->pending_read_insns;
3043 pending_mem = deps->pending_read_mems;
3044 while (pending)
3046 if (MEM_VOLATILE_P (pending_mem->element ())
3047 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3048 add_dependence (insn, pending->insn (),
3049 REG_DEP_OUTPUT);
3050 pending = pending->next ();
3051 pending_mem = pending_mem->next ();
3054 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3055 REG_DEP_ANTI, true);
3056 add_dependence_list (insn, deps->pending_jump_insns, 1,
3057 REG_DEP_ANTI, true);
3061 /* If this instruction can throw an exception, then moving it changes
3062 where block boundaries fall. This is mighty confusing elsewhere.
3063 Therefore, prevent such an instruction from being moved. Same for
3064 non-jump instructions that define block boundaries.
3065 ??? Unclear whether this is still necessary in EBB mode. If not,
3066 add_branch_dependences should be adjusted for RGN mode instead. */
3067 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3068 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3069 reg_pending_barrier = MOVE_BARRIER;
3071 if (sched_pressure != SCHED_PRESSURE_NONE)
3073 setup_insn_reg_uses (deps, insn);
3074 init_insn_reg_pressure_info (insn);
3077 /* Add register dependencies for insn. */
3078 if (DEBUG_INSN_P (insn))
3080 rtx_insn *prev = deps->last_debug_insn;
3081 rtx_insn_list *u;
3083 if (!deps->readonly)
3084 deps->last_debug_insn = insn;
3086 if (prev)
3087 add_dependence (insn, prev, REG_DEP_ANTI);
3089 add_dependence_list (insn, deps->last_function_call, 1,
3090 REG_DEP_ANTI, false);
3092 if (!sel_sched_p ())
3093 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3094 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3096 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3098 struct deps_reg *reg_last = &deps->reg_last[i];
3099 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3100 /* There's no point in making REG_DEP_CONTROL dependencies for
3101 debug insns. */
3102 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3103 false);
3105 if (!deps->readonly)
3106 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3108 CLEAR_REG_SET (reg_pending_uses);
3110 /* Quite often, a debug insn will refer to stuff in the
3111 previous instruction, but the reason we want this
3112 dependency here is to make sure the scheduler doesn't
3113 gratuitously move a debug insn ahead. This could dirty
3114 DF flags and cause additional analysis that wouldn't have
3115 occurred in compilation without debug insns, and such
3116 additional analysis can modify the generated code. */
3117 prev = PREV_INSN (insn);
3119 if (prev && NONDEBUG_INSN_P (prev))
3120 add_dependence (insn, prev, REG_DEP_ANTI);
3122 else
3124 regset_head set_or_clobbered;
3126 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3128 struct deps_reg *reg_last = &deps->reg_last[i];
3129 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3130 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3131 false);
3132 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3133 false);
3135 if (!deps->readonly)
3137 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3138 reg_last->uses_length++;
3142 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3143 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3145 struct deps_reg *reg_last = &deps->reg_last[i];
3146 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3147 add_dependence_list (insn, reg_last->implicit_sets, 0,
3148 REG_DEP_ANTI, false);
3149 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3150 false);
3152 if (!deps->readonly)
3154 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3155 reg_last->uses_length++;
3159 if (targetm.sched.exposed_pipeline)
3161 INIT_REG_SET (&set_or_clobbered);
3162 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3163 reg_pending_sets);
3164 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3166 struct deps_reg *reg_last = &deps->reg_last[i];
3167 rtx list;
3168 for (list = reg_last->uses; list; list = XEXP (list, 1))
3170 rtx other = XEXP (list, 0);
3171 if (INSN_CACHED_COND (other) != const_true_rtx
3172 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3173 INSN_CACHED_COND (other) = const_true_rtx;
3178 /* If the current insn is conditional, we can't free any
3179 of the lists. */
3180 if (sched_has_condition_p (insn))
3182 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3184 struct deps_reg *reg_last = &deps->reg_last[i];
3185 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3186 false);
3187 add_dependence_list (insn, reg_last->implicit_sets, 0,
3188 REG_DEP_ANTI, false);
3189 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3190 false);
3191 add_dependence_list (insn, reg_last->control_uses, 0,
3192 REG_DEP_CONTROL, false);
3194 if (!deps->readonly)
3196 reg_last->clobbers
3197 = alloc_INSN_LIST (insn, reg_last->clobbers);
3198 reg_last->clobbers_length++;
3201 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3203 struct deps_reg *reg_last = &deps->reg_last[i];
3204 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3205 false);
3206 add_dependence_list (insn, reg_last->implicit_sets, 0,
3207 REG_DEP_ANTI, false);
3208 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3209 false);
3210 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3211 false);
3212 add_dependence_list (insn, reg_last->control_uses, 0,
3213 REG_DEP_CONTROL, false);
3215 if (!deps->readonly)
3216 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3219 else
3221 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3223 struct deps_reg *reg_last = &deps->reg_last[i];
3224 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3225 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3227 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3228 REG_DEP_OUTPUT, false);
3229 add_dependence_list_and_free (deps, insn,
3230 &reg_last->implicit_sets, 0,
3231 REG_DEP_ANTI, false);
3232 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3233 REG_DEP_ANTI, false);
3234 add_dependence_list_and_free (deps, insn,
3235 &reg_last->control_uses, 0,
3236 REG_DEP_ANTI, false);
3237 add_dependence_list_and_free (deps, insn,
3238 &reg_last->clobbers, 0,
3239 REG_DEP_OUTPUT, false);
3241 if (!deps->readonly)
3243 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3244 reg_last->clobbers_length = 0;
3245 reg_last->uses_length = 0;
3248 else
3250 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3251 false);
3252 add_dependence_list (insn, reg_last->implicit_sets, 0,
3253 REG_DEP_ANTI, false);
3254 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3255 false);
3256 add_dependence_list (insn, reg_last->control_uses, 0,
3257 REG_DEP_CONTROL, false);
3260 if (!deps->readonly)
3262 reg_last->clobbers_length++;
3263 reg_last->clobbers
3264 = alloc_INSN_LIST (insn, reg_last->clobbers);
3267 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3269 struct deps_reg *reg_last = &deps->reg_last[i];
3271 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3272 REG_DEP_OUTPUT, false);
3273 add_dependence_list_and_free (deps, insn,
3274 &reg_last->implicit_sets,
3275 0, REG_DEP_ANTI, false);
3276 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3277 REG_DEP_OUTPUT, false);
3278 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3279 REG_DEP_ANTI, false);
3280 add_dependence_list (insn, reg_last->control_uses, 0,
3281 REG_DEP_CONTROL, false);
3283 if (!deps->readonly)
3285 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3286 reg_last->uses_length = 0;
3287 reg_last->clobbers_length = 0;
3291 if (!deps->readonly)
3293 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3295 struct deps_reg *reg_last = &deps->reg_last[i];
3296 reg_last->control_uses
3297 = alloc_INSN_LIST (insn, reg_last->control_uses);
3302 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3303 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3305 struct deps_reg *reg_last = &deps->reg_last[i];
3306 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3307 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3308 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3309 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3310 false);
3312 if (!deps->readonly)
3313 reg_last->implicit_sets
3314 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3317 if (!deps->readonly)
3319 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3320 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3321 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3322 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3323 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3324 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3325 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3327 /* Set up the pending barrier found. */
3328 deps->last_reg_pending_barrier = reg_pending_barrier;
3331 CLEAR_REG_SET (reg_pending_uses);
3332 CLEAR_REG_SET (reg_pending_clobbers);
3333 CLEAR_REG_SET (reg_pending_sets);
3334 CLEAR_REG_SET (reg_pending_control_uses);
3335 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3336 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3338 /* Add dependencies if a scheduling barrier was found. */
3339 if (reg_pending_barrier)
3341 /* In the case of barrier the most added dependencies are not
3342 real, so we use anti-dependence here. */
3343 if (sched_has_condition_p (insn))
3345 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3347 struct deps_reg *reg_last = &deps->reg_last[i];
3348 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3349 true);
3350 add_dependence_list (insn, reg_last->sets, 0,
3351 reg_pending_barrier == TRUE_BARRIER
3352 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3353 add_dependence_list (insn, reg_last->implicit_sets, 0,
3354 REG_DEP_ANTI, true);
3355 add_dependence_list (insn, reg_last->clobbers, 0,
3356 reg_pending_barrier == TRUE_BARRIER
3357 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3360 else
3362 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3364 struct deps_reg *reg_last = &deps->reg_last[i];
3365 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3366 REG_DEP_ANTI, true);
3367 add_dependence_list_and_free (deps, insn,
3368 &reg_last->control_uses, 0,
3369 REG_DEP_CONTROL, true);
3370 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3371 reg_pending_barrier == TRUE_BARRIER
3372 ? REG_DEP_TRUE : REG_DEP_ANTI,
3373 true);
3374 add_dependence_list_and_free (deps, insn,
3375 &reg_last->implicit_sets, 0,
3376 REG_DEP_ANTI, true);
3377 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3378 reg_pending_barrier == TRUE_BARRIER
3379 ? REG_DEP_TRUE : REG_DEP_ANTI,
3380 true);
3382 if (!deps->readonly)
3384 reg_last->uses_length = 0;
3385 reg_last->clobbers_length = 0;
3390 if (!deps->readonly)
3391 for (i = 0; i < (unsigned)deps->max_reg; i++)
3393 struct deps_reg *reg_last = &deps->reg_last[i];
3394 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3395 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3398 /* Don't flush pending lists on speculative checks for
3399 selective scheduling. */
3400 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3401 flush_pending_lists (deps, insn, true, true);
3403 reg_pending_barrier = NOT_A_BARRIER;
3406 /* If a post-call group is still open, see if it should remain so.
3407 This insn must be a simple move of a hard reg to a pseudo or
3408 vice-versa.
3410 We must avoid moving these insns for correctness on targets
3411 with small register classes, and for special registers like
3412 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3413 hard regs for all targets. */
3415 if (deps->in_post_call_group_p)
3417 rtx tmp, set = single_set (insn);
3418 int src_regno, dest_regno;
3420 if (set == NULL)
3422 if (DEBUG_INSN_P (insn))
3423 /* We don't want to mark debug insns as part of the same
3424 sched group. We know they really aren't, but if we use
3425 debug insns to tell that a call group is over, we'll
3426 get different code if debug insns are not there and
3427 instructions that follow seem like they should be part
3428 of the call group.
3430 Also, if we did, chain_to_prev_insn would move the
3431 deps of the debug insn to the call insn, modifying
3432 non-debug post-dependency counts of the debug insn
3433 dependencies and otherwise messing with the scheduling
3434 order.
3436 Instead, let such debug insns be scheduled freely, but
3437 keep the call group open in case there are insns that
3438 should be part of it afterwards. Since we grant debug
3439 insns higher priority than even sched group insns, it
3440 will all turn out all right. */
3441 goto debug_dont_end_call_group;
3442 else
3443 goto end_call_group;
3446 tmp = SET_DEST (set);
3447 if (GET_CODE (tmp) == SUBREG)
3448 tmp = SUBREG_REG (tmp);
3449 if (REG_P (tmp))
3450 dest_regno = REGNO (tmp);
3451 else
3452 goto end_call_group;
3454 tmp = SET_SRC (set);
3455 if (GET_CODE (tmp) == SUBREG)
3456 tmp = SUBREG_REG (tmp);
3457 if ((GET_CODE (tmp) == PLUS
3458 || GET_CODE (tmp) == MINUS)
3459 && REG_P (XEXP (tmp, 0))
3460 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3461 && dest_regno == STACK_POINTER_REGNUM)
3462 src_regno = STACK_POINTER_REGNUM;
3463 else if (REG_P (tmp))
3464 src_regno = REGNO (tmp);
3465 else
3466 goto end_call_group;
3468 if (src_regno < FIRST_PSEUDO_REGISTER
3469 || dest_regno < FIRST_PSEUDO_REGISTER)
3471 if (!deps->readonly
3472 && deps->in_post_call_group_p == post_call_initial)
3473 deps->in_post_call_group_p = post_call;
3475 if (!sel_sched_p () || sched_emulate_haifa_p)
3477 SCHED_GROUP_P (insn) = 1;
3478 CANT_MOVE (insn) = 1;
3481 else
3483 end_call_group:
3484 if (!deps->readonly)
3485 deps->in_post_call_group_p = not_post_call;
3489 debug_dont_end_call_group:
3490 if ((current_sched_info->flags & DO_SPECULATION)
3491 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3492 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3493 be speculated. */
3495 if (sel_sched_p ())
3496 sel_mark_hard_insn (insn);
3497 else
3499 sd_iterator_def sd_it;
3500 dep_t dep;
3502 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3503 sd_iterator_cond (&sd_it, &dep);)
3504 change_spec_dep_to_hard (sd_it);
3508 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3509 honor their original ordering. */
3510 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3512 if (deps->last_args_size)
3513 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3514 deps->last_args_size = insn;
3518 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3519 longjmp, loop forever, ...). */
3520 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3521 test for ECF_NORETURN? */
3522 static bool
3523 call_may_noreturn_p (rtx_insn *insn)
3525 rtx call;
3527 /* const or pure calls that aren't looping will always return. */
3528 if (RTL_CONST_OR_PURE_CALL_P (insn)
3529 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3530 return false;
3532 call = get_call_rtx_from (insn);
3533 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3535 rtx symbol = XEXP (XEXP (call, 0), 0);
3536 if (SYMBOL_REF_DECL (symbol)
3537 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3539 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3540 == BUILT_IN_NORMAL)
3541 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3543 case BUILT_IN_BCMP:
3544 case BUILT_IN_BCOPY:
3545 case BUILT_IN_BZERO:
3546 case BUILT_IN_INDEX:
3547 case BUILT_IN_MEMCHR:
3548 case BUILT_IN_MEMCMP:
3549 case BUILT_IN_MEMCPY:
3550 case BUILT_IN_MEMMOVE:
3551 case BUILT_IN_MEMPCPY:
3552 case BUILT_IN_MEMSET:
3553 case BUILT_IN_RINDEX:
3554 case BUILT_IN_STPCPY:
3555 case BUILT_IN_STPNCPY:
3556 case BUILT_IN_STRCAT:
3557 case BUILT_IN_STRCHR:
3558 case BUILT_IN_STRCMP:
3559 case BUILT_IN_STRCPY:
3560 case BUILT_IN_STRCSPN:
3561 case BUILT_IN_STRLEN:
3562 case BUILT_IN_STRNCAT:
3563 case BUILT_IN_STRNCMP:
3564 case BUILT_IN_STRNCPY:
3565 case BUILT_IN_STRPBRK:
3566 case BUILT_IN_STRRCHR:
3567 case BUILT_IN_STRSPN:
3568 case BUILT_IN_STRSTR:
3569 /* Assume certain string/memory builtins always return. */
3570 return false;
3571 default:
3572 break;
3577 /* For all other calls assume that they might not always return. */
3578 return true;
3581 /* Return true if INSN should be made dependent on the previous instruction
3582 group, and if all INSN's dependencies should be moved to the first
3583 instruction of that group. */
3585 static bool
3586 chain_to_prev_insn_p (rtx_insn *insn)
3588 /* INSN forms a group with the previous instruction. */
3589 if (SCHED_GROUP_P (insn))
3590 return true;
3592 /* If the previous instruction clobbers a register R and this one sets
3593 part of R, the clobber was added specifically to help us track the
3594 liveness of R. There's no point scheduling the clobber and leaving
3595 INSN behind, especially if we move the clobber to another block. */
3596 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3597 if (prev
3598 && INSN_P (prev)
3599 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3600 && GET_CODE (PATTERN (prev)) == CLOBBER)
3602 rtx x = XEXP (PATTERN (prev), 0);
3603 if (set_of (x, insn))
3604 return true;
3607 return false;
3610 /* Analyze INSN with DEPS as a context. */
3611 void
3612 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3614 if (sched_deps_info->start_insn)
3615 sched_deps_info->start_insn (insn);
3617 /* Record the condition for this insn. */
3618 if (NONDEBUG_INSN_P (insn))
3620 rtx t;
3621 sched_get_condition_with_rev (insn, NULL);
3622 t = INSN_CACHED_COND (insn);
3623 INSN_COND_DEPS (insn) = NULL;
3624 if (reload_completed
3625 && (current_sched_info->flags & DO_PREDICATION)
3626 && COMPARISON_P (t)
3627 && REG_P (XEXP (t, 0))
3628 && CONSTANT_P (XEXP (t, 1)))
3630 unsigned int regno;
3631 int nregs;
3632 rtx_insn_list *cond_deps = NULL;
3633 t = XEXP (t, 0);
3634 regno = REGNO (t);
3635 nregs = REG_NREGS (t);
3636 while (nregs-- > 0)
3638 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3639 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3640 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3641 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3643 INSN_COND_DEPS (insn) = cond_deps;
3647 if (JUMP_P (insn))
3649 /* Make each JUMP_INSN (but not a speculative check)
3650 a scheduling barrier for memory references. */
3651 if (!deps->readonly
3652 && !(sel_sched_p ()
3653 && sel_insn_is_speculation_check (insn)))
3655 /* Keep the list a reasonable size. */
3656 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3657 flush_pending_lists (deps, insn, true, true);
3658 else
3659 deps->pending_jump_insns
3660 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3663 /* For each insn which shouldn't cross a jump, add a dependence. */
3664 add_dependence_list_and_free (deps, insn,
3665 &deps->sched_before_next_jump, 1,
3666 REG_DEP_ANTI, true);
3668 sched_analyze_insn (deps, PATTERN (insn), insn);
3670 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3672 sched_analyze_insn (deps, PATTERN (insn), insn);
3674 else if (CALL_P (insn))
3676 int i;
3678 CANT_MOVE (insn) = 1;
3680 if (find_reg_note (insn, REG_SETJMP, NULL))
3682 /* This is setjmp. Assume that all registers, not just
3683 hard registers, may be clobbered by this call. */
3684 reg_pending_barrier = MOVE_BARRIER;
3686 else
3688 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3689 /* A call may read and modify global register variables. */
3690 if (global_regs[i])
3692 SET_REGNO_REG_SET (reg_pending_sets, i);
3693 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3695 /* Other call-clobbered hard regs may be clobbered.
3696 Since we only have a choice between 'might be clobbered'
3697 and 'definitely not clobbered', we must include all
3698 partly call-clobbered registers here. */
3699 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3700 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3701 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3702 /* We don't know what set of fixed registers might be used
3703 by the function, but it is certain that the stack pointer
3704 is among them, but be conservative. */
3705 else if (fixed_regs[i])
3706 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3707 /* The frame pointer is normally not used by the function
3708 itself, but by the debugger. */
3709 /* ??? MIPS o32 is an exception. It uses the frame pointer
3710 in the macro expansion of jal but does not represent this
3711 fact in the call_insn rtl. */
3712 else if (i == FRAME_POINTER_REGNUM
3713 || (i == HARD_FRAME_POINTER_REGNUM
3714 && (! reload_completed || frame_pointer_needed)))
3715 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3718 /* For each insn which shouldn't cross a call, add a dependence
3719 between that insn and this call insn. */
3720 add_dependence_list_and_free (deps, insn,
3721 &deps->sched_before_next_call, 1,
3722 REG_DEP_ANTI, true);
3724 sched_analyze_insn (deps, PATTERN (insn), insn);
3726 /* If CALL would be in a sched group, then this will violate
3727 convention that sched group insns have dependencies only on the
3728 previous instruction.
3730 Of course one can say: "Hey! What about head of the sched group?"
3731 And I will answer: "Basic principles (one dep per insn) are always
3732 the same." */
3733 gcc_assert (!SCHED_GROUP_P (insn));
3735 /* In the absence of interprocedural alias analysis, we must flush
3736 all pending reads and writes, and start new dependencies starting
3737 from here. But only flush writes for constant calls (which may
3738 be passed a pointer to something we haven't written yet). */
3739 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3741 if (!deps->readonly)
3743 /* Remember the last function call for limiting lifetimes. */
3744 free_INSN_LIST_list (&deps->last_function_call);
3745 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3747 if (call_may_noreturn_p (insn))
3749 /* Remember the last function call that might not always return
3750 normally for limiting moves of trapping insns. */
3751 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3752 deps->last_function_call_may_noreturn
3753 = alloc_INSN_LIST (insn, NULL_RTX);
3756 /* Before reload, begin a post-call group, so as to keep the
3757 lifetimes of hard registers correct. */
3758 if (! reload_completed)
3759 deps->in_post_call_group_p = post_call;
3763 if (sched_deps_info->use_cselib)
3764 cselib_process_insn (insn);
3766 if (sched_deps_info->finish_insn)
3767 sched_deps_info->finish_insn ();
3769 /* Fixup the dependencies in the sched group. */
3770 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3771 && chain_to_prev_insn_p (insn)
3772 && !sel_sched_p ())
3773 chain_to_prev_insn (insn);
3776 /* Initialize DEPS for the new block beginning with HEAD. */
3777 void
3778 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3780 gcc_assert (!deps->readonly);
3782 /* Before reload, if the previous block ended in a call, show that
3783 we are inside a post-call group, so as to keep the lifetimes of
3784 hard registers correct. */
3785 if (! reload_completed && !LABEL_P (head))
3787 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3789 if (insn && CALL_P (insn))
3790 deps->in_post_call_group_p = post_call_initial;
3794 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3795 dependencies for each insn. */
3796 void
3797 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3799 rtx_insn *insn;
3801 if (sched_deps_info->use_cselib)
3802 cselib_init (CSELIB_RECORD_MEMORY);
3804 deps_start_bb (deps, head);
3806 for (insn = head;; insn = NEXT_INSN (insn))
3809 if (INSN_P (insn))
3811 /* And initialize deps_lists. */
3812 sd_init_insn (insn);
3813 /* Clean up SCHED_GROUP_P which may be set by last
3814 scheduler pass. */
3815 if (SCHED_GROUP_P (insn))
3816 SCHED_GROUP_P (insn) = 0;
3819 deps_analyze_insn (deps, insn);
3821 if (insn == tail)
3823 if (sched_deps_info->use_cselib)
3824 cselib_finish ();
3825 return;
3828 gcc_unreachable ();
3831 /* Helper for sched_free_deps ().
3832 Delete INSN's (RESOLVED_P) backward dependencies. */
3833 static void
3834 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3836 sd_iterator_def sd_it;
3837 dep_t dep;
3838 sd_list_types_def types;
3840 if (resolved_p)
3841 types = SD_LIST_RES_BACK;
3842 else
3843 types = SD_LIST_BACK;
3845 for (sd_it = sd_iterator_start (insn, types);
3846 sd_iterator_cond (&sd_it, &dep);)
3848 dep_link_t link = *sd_it.linkp;
3849 dep_node_t node = DEP_LINK_NODE (link);
3850 deps_list_t back_list;
3851 deps_list_t forw_list;
3853 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3854 remove_from_deps_list (link, back_list);
3855 delete_dep_node (node);
3859 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3860 deps_lists. */
3861 void
3862 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3864 rtx_insn *insn;
3865 rtx_insn *next_tail = NEXT_INSN (tail);
3867 /* We make two passes since some insns may be scheduled before their
3868 dependencies are resolved. */
3869 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3870 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3872 /* Clear forward deps and leave the dep_nodes to the
3873 corresponding back_deps list. */
3874 if (resolved_p)
3875 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3876 else
3877 clear_deps_list (INSN_FORW_DEPS (insn));
3879 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3880 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3882 /* Clear resolved back deps together with its dep_nodes. */
3883 delete_dep_nodes_in_back_deps (insn, resolved_p);
3885 sd_finish_insn (insn);
3889 /* Initialize variables for region data dependence analysis.
3890 When LAZY_REG_LAST is true, do not allocate reg_last array
3891 of struct deps_desc immediately. */
3893 void
3894 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3896 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3898 deps->max_reg = max_reg;
3899 if (lazy_reg_last)
3900 deps->reg_last = NULL;
3901 else
3902 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3903 INIT_REG_SET (&deps->reg_last_in_use);
3905 deps->pending_read_insns = 0;
3906 deps->pending_read_mems = 0;
3907 deps->pending_write_insns = 0;
3908 deps->pending_write_mems = 0;
3909 deps->pending_jump_insns = 0;
3910 deps->pending_read_list_length = 0;
3911 deps->pending_write_list_length = 0;
3912 deps->pending_flush_length = 0;
3913 deps->last_pending_memory_flush = 0;
3914 deps->last_function_call = 0;
3915 deps->last_function_call_may_noreturn = 0;
3916 deps->sched_before_next_call = 0;
3917 deps->sched_before_next_jump = 0;
3918 deps->in_post_call_group_p = not_post_call;
3919 deps->last_debug_insn = 0;
3920 deps->last_args_size = 0;
3921 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3922 deps->readonly = 0;
3925 /* Init only reg_last field of DEPS, which was not allocated before as
3926 we inited DEPS lazily. */
3927 void
3928 init_deps_reg_last (struct deps_desc *deps)
3930 gcc_assert (deps && deps->max_reg > 0);
3931 gcc_assert (deps->reg_last == NULL);
3933 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3937 /* Free insn lists found in DEPS. */
3939 void
3940 free_deps (struct deps_desc *deps)
3942 unsigned i;
3943 reg_set_iterator rsi;
3945 /* We set max_reg to 0 when this context was already freed. */
3946 if (deps->max_reg == 0)
3948 gcc_assert (deps->reg_last == NULL);
3949 return;
3951 deps->max_reg = 0;
3953 free_INSN_LIST_list (&deps->pending_read_insns);
3954 free_EXPR_LIST_list (&deps->pending_read_mems);
3955 free_INSN_LIST_list (&deps->pending_write_insns);
3956 free_EXPR_LIST_list (&deps->pending_write_mems);
3957 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3959 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3960 times. For a testcase with 42000 regs and 8000 small basic blocks,
3961 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3962 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3964 struct deps_reg *reg_last = &deps->reg_last[i];
3965 if (reg_last->uses)
3966 free_INSN_LIST_list (&reg_last->uses);
3967 if (reg_last->sets)
3968 free_INSN_LIST_list (&reg_last->sets);
3969 if (reg_last->implicit_sets)
3970 free_INSN_LIST_list (&reg_last->implicit_sets);
3971 if (reg_last->control_uses)
3972 free_INSN_LIST_list (&reg_last->control_uses);
3973 if (reg_last->clobbers)
3974 free_INSN_LIST_list (&reg_last->clobbers);
3976 CLEAR_REG_SET (&deps->reg_last_in_use);
3978 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3979 it at all. */
3980 free (deps->reg_last);
3981 deps->reg_last = NULL;
3983 deps = NULL;
3986 /* Remove INSN from dependence contexts DEPS. */
3987 void
3988 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
3990 int removed;
3991 unsigned i;
3992 reg_set_iterator rsi;
3994 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3995 &deps->pending_read_mems);
3996 if (!DEBUG_INSN_P (insn))
3997 deps->pending_read_list_length -= removed;
3998 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3999 &deps->pending_write_mems);
4000 deps->pending_write_list_length -= removed;
4002 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4003 deps->pending_flush_length -= removed;
4004 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4005 deps->pending_flush_length -= removed;
4007 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4009 struct deps_reg *reg_last = &deps->reg_last[i];
4010 if (reg_last->uses)
4011 remove_from_dependence_list (insn, &reg_last->uses);
4012 if (reg_last->sets)
4013 remove_from_dependence_list (insn, &reg_last->sets);
4014 if (reg_last->implicit_sets)
4015 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4016 if (reg_last->clobbers)
4017 remove_from_dependence_list (insn, &reg_last->clobbers);
4018 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4019 && !reg_last->clobbers)
4020 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4023 if (CALL_P (insn))
4025 remove_from_dependence_list (insn, &deps->last_function_call);
4026 remove_from_dependence_list (insn,
4027 &deps->last_function_call_may_noreturn);
4029 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4032 /* Init deps data vector. */
4033 static void
4034 init_deps_data_vector (void)
4036 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4037 if (reserve > 0 && ! h_d_i_d.space (reserve))
4038 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4041 /* If it is profitable to use them, initialize or extend (depending on
4042 GLOBAL_P) dependency data. */
4043 void
4044 sched_deps_init (bool global_p)
4046 /* Average number of insns in the basic block.
4047 '+ 1' is used to make it nonzero. */
4048 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4050 init_deps_data_vector ();
4052 /* We use another caching mechanism for selective scheduling, so
4053 we don't use this one. */
4054 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4056 /* ?!? We could save some memory by computing a per-region luid mapping
4057 which could reduce both the number of vectors in the cache and the
4058 size of each vector. Instead we just avoid the cache entirely unless
4059 the average number of instructions in a basic block is very high. See
4060 the comment before the declaration of true_dependency_cache for
4061 what we consider "very high". */
4062 cache_size = 0;
4063 extend_dependency_caches (sched_max_luid, true);
4066 if (global_p)
4068 dl_pool = new pool_allocator<_deps_list> ("deps_list",
4069 /* Allocate lists for one block at a time. */
4070 insns_in_block);
4071 dn_pool = new pool_allocator<_dep_node> ("dep_node",
4072 /* Allocate nodes for one block at a time.
4073 We assume that average insn has
4074 5 producers. */
4075 5 * insns_in_block);
4080 /* Create or extend (depending on CREATE_P) dependency caches to
4081 size N. */
4082 void
4083 extend_dependency_caches (int n, bool create_p)
4085 if (create_p || true_dependency_cache)
4087 int i, luid = cache_size + n;
4089 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4090 luid);
4091 output_dependency_cache = XRESIZEVEC (bitmap_head,
4092 output_dependency_cache, luid);
4093 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4094 luid);
4095 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4096 luid);
4098 if (current_sched_info->flags & DO_SPECULATION)
4099 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4100 luid);
4102 for (i = cache_size; i < luid; i++)
4104 bitmap_initialize (&true_dependency_cache[i], 0);
4105 bitmap_initialize (&output_dependency_cache[i], 0);
4106 bitmap_initialize (&anti_dependency_cache[i], 0);
4107 bitmap_initialize (&control_dependency_cache[i], 0);
4109 if (current_sched_info->flags & DO_SPECULATION)
4110 bitmap_initialize (&spec_dependency_cache[i], 0);
4112 cache_size = luid;
4116 /* Finalize dependency information for the whole function. */
4117 void
4118 sched_deps_finish (void)
4120 gcc_assert (deps_pools_are_empty_p ());
4121 dn_pool->release_if_empty ();
4122 dn_pool = NULL;
4123 dl_pool->release_if_empty ();
4124 dl_pool = NULL;
4126 h_d_i_d.release ();
4127 cache_size = 0;
4129 if (true_dependency_cache)
4131 int i;
4133 for (i = 0; i < cache_size; i++)
4135 bitmap_clear (&true_dependency_cache[i]);
4136 bitmap_clear (&output_dependency_cache[i]);
4137 bitmap_clear (&anti_dependency_cache[i]);
4138 bitmap_clear (&control_dependency_cache[i]);
4140 if (sched_deps_info->generate_spec_deps)
4141 bitmap_clear (&spec_dependency_cache[i]);
4143 free (true_dependency_cache);
4144 true_dependency_cache = NULL;
4145 free (output_dependency_cache);
4146 output_dependency_cache = NULL;
4147 free (anti_dependency_cache);
4148 anti_dependency_cache = NULL;
4149 free (control_dependency_cache);
4150 control_dependency_cache = NULL;
4152 if (sched_deps_info->generate_spec_deps)
4154 free (spec_dependency_cache);
4155 spec_dependency_cache = NULL;
4161 /* Initialize some global variables needed by the dependency analysis
4162 code. */
4164 void
4165 init_deps_global (void)
4167 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4168 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4169 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4170 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4171 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4172 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4173 reg_pending_barrier = NOT_A_BARRIER;
4175 if (!sel_sched_p () || sched_emulate_haifa_p)
4177 sched_deps_info->start_insn = haifa_start_insn;
4178 sched_deps_info->finish_insn = haifa_finish_insn;
4180 sched_deps_info->note_reg_set = haifa_note_reg_set;
4181 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4182 sched_deps_info->note_reg_use = haifa_note_reg_use;
4184 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4185 sched_deps_info->note_dep = haifa_note_dep;
4189 /* Free everything used by the dependency analysis code. */
4191 void
4192 finish_deps_global (void)
4194 FREE_REG_SET (reg_pending_sets);
4195 FREE_REG_SET (reg_pending_clobbers);
4196 FREE_REG_SET (reg_pending_uses);
4197 FREE_REG_SET (reg_pending_control_uses);
4200 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4201 dw_t
4202 estimate_dep_weak (rtx mem1, rtx mem2)
4204 rtx r1, r2;
4206 if (mem1 == mem2)
4207 /* MEMs are the same - don't speculate. */
4208 return MIN_DEP_WEAK;
4210 r1 = XEXP (mem1, 0);
4211 r2 = XEXP (mem2, 0);
4213 if (r1 == r2
4214 || (REG_P (r1) && REG_P (r2)
4215 && REGNO (r1) == REGNO (r2)))
4216 /* Again, MEMs are the same. */
4217 return MIN_DEP_WEAK;
4218 else if ((REG_P (r1) && !REG_P (r2))
4219 || (!REG_P (r1) && REG_P (r2)))
4220 /* Different addressing modes - reason to be more speculative,
4221 than usual. */
4222 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4223 else
4224 /* We can't say anything about the dependence. */
4225 return UNCERTAIN_DEP_WEAK;
4228 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4229 This function can handle same INSN and ELEM (INSN == ELEM).
4230 It is a convenience wrapper. */
4231 static void
4232 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4234 ds_t ds;
4235 bool internal;
4237 if (dep_type == REG_DEP_TRUE)
4238 ds = DEP_TRUE;
4239 else if (dep_type == REG_DEP_OUTPUT)
4240 ds = DEP_OUTPUT;
4241 else if (dep_type == REG_DEP_CONTROL)
4242 ds = DEP_CONTROL;
4243 else
4245 gcc_assert (dep_type == REG_DEP_ANTI);
4246 ds = DEP_ANTI;
4249 /* When add_dependence is called from inside sched-deps.c, we expect
4250 cur_insn to be non-null. */
4251 internal = cur_insn != NULL;
4252 if (internal)
4253 gcc_assert (insn == cur_insn);
4254 else
4255 cur_insn = insn;
4257 note_dep (elem, ds);
4258 if (!internal)
4259 cur_insn = NULL;
4262 /* Return weakness of speculative type TYPE in the dep_status DS,
4263 without checking to prevent ICEs on malformed input. */
4264 static dw_t
4265 get_dep_weak_1 (ds_t ds, ds_t type)
4267 ds = ds & type;
4269 switch (type)
4271 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4272 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4273 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4274 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4275 default: gcc_unreachable ();
4278 return (dw_t) ds;
4281 /* Return weakness of speculative type TYPE in the dep_status DS. */
4282 dw_t
4283 get_dep_weak (ds_t ds, ds_t type)
4285 dw_t dw = get_dep_weak_1 (ds, type);
4287 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4288 return dw;
4291 /* Return the dep_status, which has the same parameters as DS, except for
4292 speculative type TYPE, that will have weakness DW. */
4293 ds_t
4294 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4296 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4298 ds &= ~type;
4299 switch (type)
4301 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4302 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4303 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4304 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4305 default: gcc_unreachable ();
4307 return ds;
4310 /* Return the join of two dep_statuses DS1 and DS2.
4311 If MAX_P is true then choose the greater probability,
4312 otherwise multiply probabilities.
4313 This function assumes that both DS1 and DS2 contain speculative bits. */
4314 static ds_t
4315 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4317 ds_t ds, t;
4319 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4321 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4323 t = FIRST_SPEC_TYPE;
4326 if ((ds1 & t) && !(ds2 & t))
4327 ds |= ds1 & t;
4328 else if (!(ds1 & t) && (ds2 & t))
4329 ds |= ds2 & t;
4330 else if ((ds1 & t) && (ds2 & t))
4332 dw_t dw1 = get_dep_weak (ds1, t);
4333 dw_t dw2 = get_dep_weak (ds2, t);
4334 ds_t dw;
4336 if (!max_p)
4338 dw = ((ds_t) dw1) * ((ds_t) dw2);
4339 dw /= MAX_DEP_WEAK;
4340 if (dw < MIN_DEP_WEAK)
4341 dw = MIN_DEP_WEAK;
4343 else
4345 if (dw1 >= dw2)
4346 dw = dw1;
4347 else
4348 dw = dw2;
4351 ds = set_dep_weak (ds, t, (dw_t) dw);
4354 if (t == LAST_SPEC_TYPE)
4355 break;
4356 t <<= SPEC_TYPE_SHIFT;
4358 while (1);
4360 return ds;
4363 /* Return the join of two dep_statuses DS1 and DS2.
4364 This function assumes that both DS1 and DS2 contain speculative bits. */
4365 ds_t
4366 ds_merge (ds_t ds1, ds_t ds2)
4368 return ds_merge_1 (ds1, ds2, false);
4371 /* Return the join of two dep_statuses DS1 and DS2. */
4372 ds_t
4373 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4375 ds_t new_status = ds | ds2;
4377 if (new_status & SPECULATIVE)
4379 if ((ds && !(ds & SPECULATIVE))
4380 || (ds2 && !(ds2 & SPECULATIVE)))
4381 /* Then this dep can't be speculative. */
4382 new_status &= ~SPECULATIVE;
4383 else
4385 /* Both are speculative. Merging probabilities. */
4386 if (mem1)
4388 dw_t dw;
4390 dw = estimate_dep_weak (mem1, mem2);
4391 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4394 if (!ds)
4395 new_status = ds2;
4396 else if (!ds2)
4397 new_status = ds;
4398 else
4399 new_status = ds_merge (ds2, ds);
4403 return new_status;
4406 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4407 probabilities. */
4408 ds_t
4409 ds_max_merge (ds_t ds1, ds_t ds2)
4411 if (ds1 == 0 && ds2 == 0)
4412 return 0;
4414 if (ds1 == 0 && ds2 != 0)
4415 return ds2;
4417 if (ds1 != 0 && ds2 == 0)
4418 return ds1;
4420 return ds_merge_1 (ds1, ds2, true);
4423 /* Return the probability of speculation success for the speculation
4424 status DS. */
4425 dw_t
4426 ds_weak (ds_t ds)
4428 ds_t res = 1, dt;
4429 int n = 0;
4431 dt = FIRST_SPEC_TYPE;
4434 if (ds & dt)
4436 res *= (ds_t) get_dep_weak (ds, dt);
4437 n++;
4440 if (dt == LAST_SPEC_TYPE)
4441 break;
4442 dt <<= SPEC_TYPE_SHIFT;
4444 while (1);
4446 gcc_assert (n);
4447 while (--n)
4448 res /= MAX_DEP_WEAK;
4450 if (res < MIN_DEP_WEAK)
4451 res = MIN_DEP_WEAK;
4453 gcc_assert (res <= MAX_DEP_WEAK);
4455 return (dw_t) res;
4458 /* Return a dep status that contains all speculation types of DS. */
4459 ds_t
4460 ds_get_speculation_types (ds_t ds)
4462 if (ds & BEGIN_DATA)
4463 ds |= BEGIN_DATA;
4464 if (ds & BE_IN_DATA)
4465 ds |= BE_IN_DATA;
4466 if (ds & BEGIN_CONTROL)
4467 ds |= BEGIN_CONTROL;
4468 if (ds & BE_IN_CONTROL)
4469 ds |= BE_IN_CONTROL;
4471 return ds & SPECULATIVE;
4474 /* Return a dep status that contains maximal weakness for each speculation
4475 type present in DS. */
4476 ds_t
4477 ds_get_max_dep_weak (ds_t ds)
4479 if (ds & BEGIN_DATA)
4480 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4481 if (ds & BE_IN_DATA)
4482 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4483 if (ds & BEGIN_CONTROL)
4484 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4485 if (ds & BE_IN_CONTROL)
4486 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4488 return ds;
4491 /* Dump information about the dependence status S. */
4492 static void
4493 dump_ds (FILE *f, ds_t s)
4495 fprintf (f, "{");
4497 if (s & BEGIN_DATA)
4498 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4499 if (s & BE_IN_DATA)
4500 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4501 if (s & BEGIN_CONTROL)
4502 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4503 if (s & BE_IN_CONTROL)
4504 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4506 if (s & HARD_DEP)
4507 fprintf (f, "HARD_DEP; ");
4509 if (s & DEP_TRUE)
4510 fprintf (f, "DEP_TRUE; ");
4511 if (s & DEP_OUTPUT)
4512 fprintf (f, "DEP_OUTPUT; ");
4513 if (s & DEP_ANTI)
4514 fprintf (f, "DEP_ANTI; ");
4515 if (s & DEP_CONTROL)
4516 fprintf (f, "DEP_CONTROL; ");
4518 fprintf (f, "}");
4521 DEBUG_FUNCTION void
4522 debug_ds (ds_t s)
4524 dump_ds (stderr, s);
4525 fprintf (stderr, "\n");
4528 #ifdef ENABLE_CHECKING
4529 /* Verify that dependence type and status are consistent.
4530 If RELAXED_P is true, then skip dep_weakness checks. */
4531 static void
4532 check_dep (dep_t dep, bool relaxed_p)
4534 enum reg_note dt = DEP_TYPE (dep);
4535 ds_t ds = DEP_STATUS (dep);
4537 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4539 if (!(current_sched_info->flags & USE_DEPS_LIST))
4541 gcc_assert (ds == 0);
4542 return;
4545 /* Check that dependence type contains the same bits as the status. */
4546 if (dt == REG_DEP_TRUE)
4547 gcc_assert (ds & DEP_TRUE);
4548 else if (dt == REG_DEP_OUTPUT)
4549 gcc_assert ((ds & DEP_OUTPUT)
4550 && !(ds & DEP_TRUE));
4551 else if (dt == REG_DEP_ANTI)
4552 gcc_assert ((ds & DEP_ANTI)
4553 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4554 else
4555 gcc_assert (dt == REG_DEP_CONTROL
4556 && (ds & DEP_CONTROL)
4557 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4559 /* HARD_DEP can not appear in dep_status of a link. */
4560 gcc_assert (!(ds & HARD_DEP));
4562 /* Check that dependence status is set correctly when speculation is not
4563 supported. */
4564 if (!sched_deps_info->generate_spec_deps)
4565 gcc_assert (!(ds & SPECULATIVE));
4566 else if (ds & SPECULATIVE)
4568 if (!relaxed_p)
4570 ds_t type = FIRST_SPEC_TYPE;
4572 /* Check that dependence weakness is in proper range. */
4575 if (ds & type)
4576 get_dep_weak (ds, type);
4578 if (type == LAST_SPEC_TYPE)
4579 break;
4580 type <<= SPEC_TYPE_SHIFT;
4582 while (1);
4585 if (ds & BEGIN_SPEC)
4587 /* Only true dependence can be data speculative. */
4588 if (ds & BEGIN_DATA)
4589 gcc_assert (ds & DEP_TRUE);
4591 /* Control dependencies in the insn scheduler are represented by
4592 anti-dependencies, therefore only anti dependence can be
4593 control speculative. */
4594 if (ds & BEGIN_CONTROL)
4595 gcc_assert (ds & DEP_ANTI);
4597 else
4599 /* Subsequent speculations should resolve true dependencies. */
4600 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4603 /* Check that true and anti dependencies can't have other speculative
4604 statuses. */
4605 if (ds & DEP_TRUE)
4606 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4607 /* An output dependence can't be speculative at all. */
4608 gcc_assert (!(ds & DEP_OUTPUT));
4609 if (ds & DEP_ANTI)
4610 gcc_assert (ds & BEGIN_CONTROL);
4613 #endif /* ENABLE_CHECKING */
4615 /* The following code discovers opportunities to switch a memory reference
4616 and an increment by modifying the address. We ensure that this is done
4617 only for dependencies that are only used to show a single register
4618 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4619 instruction involved is subject to only one dep that can cause a pattern
4620 change.
4622 When we discover a suitable dependency, we fill in the dep_replacement
4623 structure to show how to modify the memory reference. */
4625 /* Holds information about a pair of memory reference and register increment
4626 insns which depend on each other, but could possibly be interchanged. */
4627 struct mem_inc_info
4629 rtx_insn *inc_insn;
4630 rtx_insn *mem_insn;
4632 rtx *mem_loc;
4633 /* A register occurring in the memory address for which we wish to break
4634 the dependence. This must be identical to the destination register of
4635 the increment. */
4636 rtx mem_reg0;
4637 /* Any kind of index that is added to that register. */
4638 rtx mem_index;
4639 /* The constant offset used in the memory address. */
4640 HOST_WIDE_INT mem_constant;
4641 /* The constant added in the increment insn. Negated if the increment is
4642 after the memory address. */
4643 HOST_WIDE_INT inc_constant;
4644 /* The source register used in the increment. May be different from mem_reg0
4645 if the increment occurs before the memory address. */
4646 rtx inc_input;
4649 /* Verify that the memory location described in MII can be replaced with
4650 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4651 insn remains unchanged by this function. */
4653 static rtx
4654 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4656 rtx mem = *mii->mem_loc;
4657 rtx new_mem;
4659 /* Jump through a lot of hoops to keep the attributes up to date. We
4660 do not want to call one of the change address variants that take
4661 an offset even though we know the offset in many cases. These
4662 assume you are changing where the address is pointing by the
4663 offset. */
4664 new_mem = replace_equiv_address_nv (mem, new_addr);
4665 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4667 if (sched_verbose >= 5)
4668 fprintf (sched_dump, "validation failure\n");
4669 return NULL_RTX;
4672 /* Put back the old one. */
4673 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4675 return new_mem;
4678 /* Return true if INSN is of a form "a = b op c" where a and b are
4679 regs. op is + if c is a reg and +|- if c is a const. Fill in
4680 informantion in MII about what is found.
4681 BEFORE_MEM indicates whether the increment is found before or after
4682 a corresponding memory reference. */
4684 static bool
4685 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4687 rtx pat = single_set (insn);
4688 rtx src, cst;
4689 bool regs_equal;
4691 if (RTX_FRAME_RELATED_P (insn) || !pat)
4692 return false;
4694 /* Result must be single reg. */
4695 if (!REG_P (SET_DEST (pat)))
4696 return false;
4698 if (GET_CODE (SET_SRC (pat)) != PLUS)
4699 return false;
4701 mii->inc_insn = insn;
4702 src = SET_SRC (pat);
4703 mii->inc_input = XEXP (src, 0);
4705 if (!REG_P (XEXP (src, 0)))
4706 return false;
4708 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4709 return false;
4711 cst = XEXP (src, 1);
4712 if (!CONST_INT_P (cst))
4713 return false;
4714 mii->inc_constant = INTVAL (cst);
4716 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4718 if (!before_mem)
4720 mii->inc_constant = -mii->inc_constant;
4721 if (!regs_equal)
4722 return false;
4725 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4727 /* Note that the sign has already been reversed for !before_mem. */
4728 if (STACK_GROWS_DOWNWARD)
4729 return mii->inc_constant > 0;
4730 else
4731 return mii->inc_constant < 0;
4733 return true;
4736 /* Once a suitable mem reference has been found and the corresponding data
4737 in MII has been filled in, this function is called to find a suitable
4738 add or inc insn involving the register we found in the memory
4739 reference. */
4741 static bool
4742 find_inc (struct mem_inc_info *mii, bool backwards)
4744 sd_iterator_def sd_it;
4745 dep_t dep;
4747 sd_it = sd_iterator_start (mii->mem_insn,
4748 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4749 while (sd_iterator_cond (&sd_it, &dep))
4751 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4752 rtx_insn *pro = DEP_PRO (dep);
4753 rtx_insn *con = DEP_CON (dep);
4754 rtx_insn *inc_cand = backwards ? pro : con;
4755 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4756 goto next;
4757 if (parse_add_or_inc (mii, inc_cand, backwards))
4759 struct dep_replacement *desc;
4760 df_ref def;
4761 rtx newaddr, newmem;
4763 if (sched_verbose >= 5)
4764 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4765 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4767 /* Need to assure that none of the operands of the inc
4768 instruction are assigned to by the mem insn. */
4769 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4770 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4771 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4773 if (sched_verbose >= 5)
4774 fprintf (sched_dump,
4775 "inc conflicts with store failure.\n");
4776 goto next;
4779 newaddr = mii->inc_input;
4780 if (mii->mem_index != NULL_RTX)
4781 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4782 mii->mem_index);
4783 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4784 mii->mem_constant + mii->inc_constant);
4785 newmem = attempt_change (mii, newaddr);
4786 if (newmem == NULL_RTX)
4787 goto next;
4788 if (sched_verbose >= 5)
4789 fprintf (sched_dump, "successful address replacement\n");
4790 desc = XCNEW (struct dep_replacement);
4791 DEP_REPLACE (dep) = desc;
4792 desc->loc = mii->mem_loc;
4793 desc->newval = newmem;
4794 desc->orig = *desc->loc;
4795 desc->insn = mii->mem_insn;
4796 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4797 INSN_SPEC_BACK_DEPS (con));
4798 if (backwards)
4800 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4801 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4802 REG_DEP_TRUE);
4804 else
4806 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4807 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4808 REG_DEP_ANTI);
4810 return true;
4812 next:
4813 sd_iterator_next (&sd_it);
4815 return false;
4818 /* A recursive function that walks ADDRESS_OF_X to find memory references
4819 which could be modified during scheduling. We call find_inc for each
4820 one we find that has a recognizable form. MII holds information about
4821 the pair of memory/increment instructions.
4822 We ensure that every instruction with a memory reference (which will be
4823 the location of the replacement) is assigned at most one breakable
4824 dependency. */
4826 static bool
4827 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4829 rtx x = *address_of_x;
4830 enum rtx_code code = GET_CODE (x);
4831 const char *const fmt = GET_RTX_FORMAT (code);
4832 int i;
4834 if (code == MEM)
4836 rtx reg0 = XEXP (x, 0);
4838 mii->mem_loc = address_of_x;
4839 mii->mem_index = NULL_RTX;
4840 mii->mem_constant = 0;
4841 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4843 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4844 reg0 = XEXP (reg0, 0);
4846 if (GET_CODE (reg0) == PLUS)
4848 mii->mem_index = XEXP (reg0, 1);
4849 reg0 = XEXP (reg0, 0);
4851 if (REG_P (reg0))
4853 df_ref use;
4854 int occurrences = 0;
4856 /* Make sure this reg appears only once in this insn. Can't use
4857 count_occurrences since that only works for pseudos. */
4858 FOR_EACH_INSN_USE (use, mii->mem_insn)
4859 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4860 if (++occurrences > 1)
4862 if (sched_verbose >= 5)
4863 fprintf (sched_dump, "mem count failure\n");
4864 return false;
4867 mii->mem_reg0 = reg0;
4868 return find_inc (mii, true) || find_inc (mii, false);
4870 return false;
4873 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4875 /* If REG occurs inside a MEM used in a bit-field reference,
4876 that is unacceptable. */
4877 return false;
4880 /* Time for some deep diving. */
4881 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4883 if (fmt[i] == 'e')
4885 if (find_mem (mii, &XEXP (x, i)))
4886 return true;
4888 else if (fmt[i] == 'E')
4890 int j;
4891 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4892 if (find_mem (mii, &XVECEXP (x, i, j)))
4893 return true;
4896 return false;
4900 /* Examine the instructions between HEAD and TAIL and try to find
4901 dependencies that can be broken by modifying one of the patterns. */
4903 void
4904 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4906 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4907 int success_in_block = 0;
4909 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4911 struct mem_inc_info mii;
4913 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4914 continue;
4916 mii.mem_insn = insn;
4917 if (find_mem (&mii, &PATTERN (insn)))
4918 success_in_block++;
4920 if (success_in_block && sched_verbose >= 5)
4921 fprintf (sched_dump, "%d candidates for address modification found.\n",
4922 success_in_block);
4925 #endif /* INSN_SCHEDULING */