remove most ifdef STACK_GROWS_DOWNWARD
[official-gcc.git] / gcc / sched-deps.c
blobc1cfc1f3b70368240e6da75d47f2d432e0859609
1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2015 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "diagnostic-core.h"
28 #include "rtl.h"
29 #include "hash-set.h"
30 #include "machmode.h"
31 #include "vec.h"
32 #include "double-int.h"
33 #include "input.h"
34 #include "alias.h"
35 #include "symtab.h"
36 #include "wide-int.h"
37 #include "inchash.h"
38 #include "tree.h" /* FIXME: Used by call_may_noreturn_p. */
39 #include "tm_p.h"
40 #include "hard-reg-set.h"
41 #include "regs.h"
42 #include "input.h"
43 #include "function.h"
44 #include "flags.h"
45 #include "insn-config.h"
46 #include "insn-attr.h"
47 #include "except.h"
48 #include "recog.h"
49 #include "emit-rtl.h"
50 #include "dominance.h"
51 #include "cfg.h"
52 #include "cfgbuild.h"
53 #include "predict.h"
54 #include "basic-block.h"
55 #include "sched-int.h"
56 #include "params.h"
57 #include "cselib.h"
58 #include "ira.h"
59 #include "target.h"
61 #ifdef INSN_SCHEDULING
63 #ifdef ENABLE_CHECKING
64 #define CHECK (true)
65 #else
66 #define CHECK (false)
67 #endif
69 /* Holds current parameters for the dependency analyzer. */
70 struct sched_deps_info_def *sched_deps_info;
72 /* The data is specific to the Haifa scheduler. */
73 vec<haifa_deps_insn_data_def>
74 h_d_i_d = vNULL;
76 /* Return the major type present in the DS. */
77 enum reg_note
78 ds_to_dk (ds_t ds)
80 if (ds & DEP_TRUE)
81 return REG_DEP_TRUE;
83 if (ds & DEP_OUTPUT)
84 return REG_DEP_OUTPUT;
86 if (ds & DEP_CONTROL)
87 return REG_DEP_CONTROL;
89 gcc_assert (ds & DEP_ANTI);
91 return REG_DEP_ANTI;
94 /* Return equivalent dep_status. */
95 ds_t
96 dk_to_ds (enum reg_note dk)
98 switch (dk)
100 case REG_DEP_TRUE:
101 return DEP_TRUE;
103 case REG_DEP_OUTPUT:
104 return DEP_OUTPUT;
106 case REG_DEP_CONTROL:
107 return DEP_CONTROL;
109 default:
110 gcc_assert (dk == REG_DEP_ANTI);
111 return DEP_ANTI;
115 /* Functions to operate with dependence information container - dep_t. */
117 /* Init DEP with the arguments. */
118 void
119 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
121 DEP_PRO (dep) = pro;
122 DEP_CON (dep) = con;
123 DEP_TYPE (dep) = type;
124 DEP_STATUS (dep) = ds;
125 DEP_COST (dep) = UNKNOWN_DEP_COST;
126 DEP_NONREG (dep) = 0;
127 DEP_MULTIPLE (dep) = 0;
128 DEP_REPLACE (dep) = NULL;
131 /* Init DEP with the arguments.
132 While most of the scheduler (including targets) only need the major type
133 of the dependency, it is convenient to hide full dep_status from them. */
134 void
135 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
137 ds_t ds;
139 if ((current_sched_info->flags & USE_DEPS_LIST))
140 ds = dk_to_ds (kind);
141 else
142 ds = 0;
144 init_dep_1 (dep, pro, con, kind, ds);
147 /* Make a copy of FROM in TO. */
148 static void
149 copy_dep (dep_t to, dep_t from)
151 memcpy (to, from, sizeof (*to));
154 static void dump_ds (FILE *, ds_t);
156 /* Define flags for dump_dep (). */
158 /* Dump producer of the dependence. */
159 #define DUMP_DEP_PRO (2)
161 /* Dump consumer of the dependence. */
162 #define DUMP_DEP_CON (4)
164 /* Dump type of the dependence. */
165 #define DUMP_DEP_TYPE (8)
167 /* Dump status of the dependence. */
168 #define DUMP_DEP_STATUS (16)
170 /* Dump all information about the dependence. */
171 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
172 |DUMP_DEP_STATUS)
174 /* Dump DEP to DUMP.
175 FLAGS is a bit mask specifying what information about DEP needs
176 to be printed.
177 If FLAGS has the very first bit set, then dump all information about DEP
178 and propagate this bit into the callee dump functions. */
179 static void
180 dump_dep (FILE *dump, dep_t dep, int flags)
182 if (flags & 1)
183 flags |= DUMP_DEP_ALL;
185 fprintf (dump, "<");
187 if (flags & DUMP_DEP_PRO)
188 fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
190 if (flags & DUMP_DEP_CON)
191 fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
193 if (flags & DUMP_DEP_TYPE)
195 char t;
196 enum reg_note type = DEP_TYPE (dep);
198 switch (type)
200 case REG_DEP_TRUE:
201 t = 't';
202 break;
204 case REG_DEP_OUTPUT:
205 t = 'o';
206 break;
208 case REG_DEP_CONTROL:
209 t = 'c';
210 break;
212 case REG_DEP_ANTI:
213 t = 'a';
214 break;
216 default:
217 gcc_unreachable ();
218 break;
221 fprintf (dump, "%c; ", t);
224 if (flags & DUMP_DEP_STATUS)
226 if (current_sched_info->flags & USE_DEPS_LIST)
227 dump_ds (dump, DEP_STATUS (dep));
230 fprintf (dump, ">");
233 /* Default flags for dump_dep (). */
234 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
236 /* Dump all fields of DEP to STDERR. */
237 void
238 sd_debug_dep (dep_t dep)
240 dump_dep (stderr, dep, 1);
241 fprintf (stderr, "\n");
244 /* Determine whether DEP is a dependency link of a non-debug insn on a
245 debug insn. */
247 static inline bool
248 depl_on_debug_p (dep_link_t dep)
250 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
251 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
254 /* Functions to operate with a single link from the dependencies lists -
255 dep_link_t. */
257 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
258 PREV_NEXT_P. */
259 static void
260 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
262 dep_link_t next = *prev_nextp;
264 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
265 && DEP_LINK_NEXT (l) == NULL);
267 /* Init node being inserted. */
268 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
269 DEP_LINK_NEXT (l) = next;
271 /* Fix next node. */
272 if (next != NULL)
274 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
276 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
279 /* Fix prev node. */
280 *prev_nextp = l;
283 /* Add dep_link LINK to deps_list L. */
284 static void
285 add_to_deps_list (dep_link_t link, deps_list_t l)
287 attach_dep_link (link, &DEPS_LIST_FIRST (l));
289 /* Don't count debug deps. */
290 if (!depl_on_debug_p (link))
291 ++DEPS_LIST_N_LINKS (l);
294 /* Detach dep_link L from the list. */
295 static void
296 detach_dep_link (dep_link_t l)
298 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
299 dep_link_t next = DEP_LINK_NEXT (l);
301 *prev_nextp = next;
303 if (next != NULL)
304 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
306 DEP_LINK_PREV_NEXTP (l) = NULL;
307 DEP_LINK_NEXT (l) = NULL;
310 /* Remove link LINK from list LIST. */
311 static void
312 remove_from_deps_list (dep_link_t link, deps_list_t list)
314 detach_dep_link (link);
316 /* Don't count debug deps. */
317 if (!depl_on_debug_p (link))
318 --DEPS_LIST_N_LINKS (list);
321 /* Move link LINK from list FROM to list TO. */
322 static void
323 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
325 remove_from_deps_list (link, from);
326 add_to_deps_list (link, to);
329 /* Return true of LINK is not attached to any list. */
330 static bool
331 dep_link_is_detached_p (dep_link_t link)
333 return DEP_LINK_PREV_NEXTP (link) == NULL;
336 /* Pool to hold all dependency nodes (dep_node_t). */
337 static alloc_pool dn_pool;
339 /* Number of dep_nodes out there. */
340 static int dn_pool_diff = 0;
342 /* Create a dep_node. */
343 static dep_node_t
344 create_dep_node (void)
346 dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
347 dep_link_t back = DEP_NODE_BACK (n);
348 dep_link_t forw = DEP_NODE_FORW (n);
350 DEP_LINK_NODE (back) = n;
351 DEP_LINK_NEXT (back) = NULL;
352 DEP_LINK_PREV_NEXTP (back) = NULL;
354 DEP_LINK_NODE (forw) = n;
355 DEP_LINK_NEXT (forw) = NULL;
356 DEP_LINK_PREV_NEXTP (forw) = NULL;
358 ++dn_pool_diff;
360 return n;
363 /* Delete dep_node N. N must not be connected to any deps_list. */
364 static void
365 delete_dep_node (dep_node_t n)
367 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
368 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
370 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
372 --dn_pool_diff;
374 pool_free (dn_pool, n);
377 /* Pool to hold dependencies lists (deps_list_t). */
378 static alloc_pool dl_pool;
380 /* Number of deps_lists out there. */
381 static int dl_pool_diff = 0;
383 /* Functions to operate with dependences lists - deps_list_t. */
385 /* Return true if list L is empty. */
386 static bool
387 deps_list_empty_p (deps_list_t l)
389 return DEPS_LIST_N_LINKS (l) == 0;
392 /* Create a new deps_list. */
393 static deps_list_t
394 create_deps_list (void)
396 deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
398 DEPS_LIST_FIRST (l) = NULL;
399 DEPS_LIST_N_LINKS (l) = 0;
401 ++dl_pool_diff;
402 return l;
405 /* Free deps_list L. */
406 static void
407 free_deps_list (deps_list_t l)
409 gcc_assert (deps_list_empty_p (l));
411 --dl_pool_diff;
413 pool_free (dl_pool, l);
416 /* Return true if there is no dep_nodes and deps_lists out there.
417 After the region is scheduled all the dependency nodes and lists
418 should [generally] be returned to pool. */
419 bool
420 deps_pools_are_empty_p (void)
422 return dn_pool_diff == 0 && dl_pool_diff == 0;
425 /* Remove all elements from L. */
426 static void
427 clear_deps_list (deps_list_t l)
431 dep_link_t link = DEPS_LIST_FIRST (l);
433 if (link == NULL)
434 break;
436 remove_from_deps_list (link, l);
438 while (1);
441 /* Decide whether a dependency should be treated as a hard or a speculative
442 dependency. */
443 static bool
444 dep_spec_p (dep_t dep)
446 if (current_sched_info->flags & DO_SPECULATION)
448 if (DEP_STATUS (dep) & SPECULATIVE)
449 return true;
451 if (current_sched_info->flags & DO_PREDICATION)
453 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
454 return true;
456 if (DEP_REPLACE (dep) != NULL)
457 return true;
458 return false;
461 static regset reg_pending_sets;
462 static regset reg_pending_clobbers;
463 static regset reg_pending_uses;
464 static regset reg_pending_control_uses;
465 static enum reg_pending_barrier_mode reg_pending_barrier;
467 /* Hard registers implicitly clobbered or used (or may be implicitly
468 clobbered or used) by the currently analyzed insn. For example,
469 insn in its constraint has one register class. Even if there is
470 currently no hard register in the insn, the particular hard
471 register will be in the insn after reload pass because the
472 constraint requires it. */
473 static HARD_REG_SET implicit_reg_pending_clobbers;
474 static HARD_REG_SET implicit_reg_pending_uses;
476 /* To speed up the test for duplicate dependency links we keep a
477 record of dependencies created by add_dependence when the average
478 number of instructions in a basic block is very large.
480 Studies have shown that there is typically around 5 instructions between
481 branches for typical C code. So we can make a guess that the average
482 basic block is approximately 5 instructions long; we will choose 100X
483 the average size as a very large basic block.
485 Each insn has associated bitmaps for its dependencies. Each bitmap
486 has enough entries to represent a dependency on any other insn in
487 the insn chain. All bitmap for true dependencies cache is
488 allocated then the rest two ones are also allocated. */
489 static bitmap_head *true_dependency_cache = NULL;
490 static bitmap_head *output_dependency_cache = NULL;
491 static bitmap_head *anti_dependency_cache = NULL;
492 static bitmap_head *control_dependency_cache = NULL;
493 static bitmap_head *spec_dependency_cache = NULL;
494 static int cache_size;
496 /* True if we should mark added dependencies as a non-register deps. */
497 static bool mark_as_hard;
499 static int deps_may_trap_p (const_rtx);
500 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
501 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
502 enum reg_note, bool);
503 static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
504 rtx_insn_list **, int, enum reg_note,
505 bool);
506 static void delete_all_dependences (rtx_insn *);
507 static void chain_to_prev_insn (rtx_insn *);
509 static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
510 static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
511 static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
512 static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
514 static bool sched_has_condition_p (const rtx_insn *);
515 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
517 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
518 rtx, rtx);
519 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
521 #ifdef ENABLE_CHECKING
522 static void check_dep (dep_t, bool);
523 #endif
525 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
527 static int
528 deps_may_trap_p (const_rtx mem)
530 const_rtx addr = XEXP (mem, 0);
532 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
534 const_rtx t = get_reg_known_value (REGNO (addr));
535 if (t)
536 addr = t;
538 return rtx_addr_can_trap_p (addr);
542 /* Find the condition under which INSN is executed. If REV is not NULL,
543 it is set to TRUE when the returned comparison should be reversed
544 to get the actual condition. */
545 static rtx
546 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
548 rtx pat = PATTERN (insn);
549 rtx src;
551 if (rev)
552 *rev = false;
554 if (GET_CODE (pat) == COND_EXEC)
555 return COND_EXEC_TEST (pat);
557 if (!any_condjump_p (insn) || !onlyjump_p (insn))
558 return 0;
560 src = SET_SRC (pc_set (insn));
562 if (XEXP (src, 2) == pc_rtx)
563 return XEXP (src, 0);
564 else if (XEXP (src, 1) == pc_rtx)
566 rtx cond = XEXP (src, 0);
567 enum rtx_code revcode = reversed_comparison_code (cond, insn);
569 if (revcode == UNKNOWN)
570 return 0;
572 if (rev)
573 *rev = true;
574 return cond;
577 return 0;
580 /* Return the condition under which INSN does not execute (i.e. the
581 not-taken condition for a conditional branch), or NULL if we cannot
582 find such a condition. The caller should make a copy of the condition
583 before using it. */
585 sched_get_reverse_condition_uncached (const rtx_insn *insn)
587 bool rev;
588 rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
589 if (cond == NULL_RTX)
590 return cond;
591 if (!rev)
593 enum rtx_code revcode = reversed_comparison_code (cond, insn);
594 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
595 XEXP (cond, 0),
596 XEXP (cond, 1));
598 return cond;
601 /* Caching variant of sched_get_condition_with_rev_uncached.
602 We only do actual work the first time we come here for an insn; the
603 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
604 static rtx
605 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
607 bool tmp;
609 if (INSN_LUID (insn) == 0)
610 return sched_get_condition_with_rev_uncached (insn, rev);
612 if (INSN_CACHED_COND (insn) == const_true_rtx)
613 return NULL_RTX;
615 if (INSN_CACHED_COND (insn) != NULL_RTX)
617 if (rev)
618 *rev = INSN_REVERSE_COND (insn);
619 return INSN_CACHED_COND (insn);
622 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
623 INSN_REVERSE_COND (insn) = tmp;
625 if (INSN_CACHED_COND (insn) == NULL_RTX)
627 INSN_CACHED_COND (insn) = const_true_rtx;
628 return NULL_RTX;
631 if (rev)
632 *rev = INSN_REVERSE_COND (insn);
633 return INSN_CACHED_COND (insn);
636 /* True when we can find a condition under which INSN is executed. */
637 static bool
638 sched_has_condition_p (const rtx_insn *insn)
640 return !! sched_get_condition_with_rev (insn, NULL);
645 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
646 static int
647 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
649 if (COMPARISON_P (cond1)
650 && COMPARISON_P (cond2)
651 && GET_CODE (cond1) ==
652 (rev1==rev2
653 ? reversed_comparison_code (cond2, NULL)
654 : GET_CODE (cond2))
655 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
656 && XEXP (cond1, 1) == XEXP (cond2, 1))
657 return 1;
658 return 0;
661 /* Return true if insn1 and insn2 can never depend on one another because
662 the conditions under which they are executed are mutually exclusive. */
663 bool
664 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
666 rtx cond1, cond2;
667 bool rev1 = false, rev2 = false;
669 /* df doesn't handle conditional lifetimes entirely correctly;
670 calls mess up the conditional lifetimes. */
671 if (!CALL_P (insn1) && !CALL_P (insn2))
673 cond1 = sched_get_condition_with_rev (insn1, &rev1);
674 cond2 = sched_get_condition_with_rev (insn2, &rev2);
675 if (cond1 && cond2
676 && conditions_mutex_p (cond1, cond2, rev1, rev2)
677 /* Make sure first instruction doesn't affect condition of second
678 instruction if switched. */
679 && !modified_in_p (cond1, insn2)
680 /* Make sure second instruction doesn't affect condition of first
681 instruction if switched. */
682 && !modified_in_p (cond2, insn1))
683 return true;
685 return false;
689 /* Return true if INSN can potentially be speculated with type DS. */
690 bool
691 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
693 if (HAS_INTERNAL_DEP (insn))
694 return false;
696 if (!NONJUMP_INSN_P (insn))
697 return false;
699 if (SCHED_GROUP_P (insn))
700 return false;
702 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
703 return false;
705 if (side_effects_p (PATTERN (insn)))
706 return false;
708 if (ds & BE_IN_SPEC)
709 /* The following instructions, which depend on a speculatively scheduled
710 instruction, cannot be speculatively scheduled along. */
712 if (may_trap_or_fault_p (PATTERN (insn)))
713 /* If instruction might fault, it cannot be speculatively scheduled.
714 For control speculation it's obvious why and for data speculation
715 it's because the insn might get wrong input if speculation
716 wasn't successful. */
717 return false;
719 if ((ds & BE_IN_DATA)
720 && sched_has_condition_p (insn))
721 /* If this is a predicated instruction, then it cannot be
722 speculatively scheduled. See PR35659. */
723 return false;
726 return true;
729 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
730 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
731 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
732 This function is used to switch sd_iterator to the next list.
733 !!! For internal use only. Might consider moving it to sched-int.h. */
734 void
735 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
736 deps_list_t *list_ptr, bool *resolved_p_ptr)
738 sd_list_types_def types = *types_ptr;
740 if (types & SD_LIST_HARD_BACK)
742 *list_ptr = INSN_HARD_BACK_DEPS (insn);
743 *resolved_p_ptr = false;
744 *types_ptr = types & ~SD_LIST_HARD_BACK;
746 else if (types & SD_LIST_SPEC_BACK)
748 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
749 *resolved_p_ptr = false;
750 *types_ptr = types & ~SD_LIST_SPEC_BACK;
752 else if (types & SD_LIST_FORW)
754 *list_ptr = INSN_FORW_DEPS (insn);
755 *resolved_p_ptr = false;
756 *types_ptr = types & ~SD_LIST_FORW;
758 else if (types & SD_LIST_RES_BACK)
760 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
761 *resolved_p_ptr = true;
762 *types_ptr = types & ~SD_LIST_RES_BACK;
764 else if (types & SD_LIST_RES_FORW)
766 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
767 *resolved_p_ptr = true;
768 *types_ptr = types & ~SD_LIST_RES_FORW;
770 else
772 *list_ptr = NULL;
773 *resolved_p_ptr = false;
774 *types_ptr = SD_LIST_NONE;
778 /* Return the summary size of INSN's lists defined by LIST_TYPES. */
780 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
782 int size = 0;
784 while (list_types != SD_LIST_NONE)
786 deps_list_t list;
787 bool resolved_p;
789 sd_next_list (insn, &list_types, &list, &resolved_p);
790 if (list)
791 size += DEPS_LIST_N_LINKS (list);
794 return size;
797 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
799 bool
800 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
802 while (list_types != SD_LIST_NONE)
804 deps_list_t list;
805 bool resolved_p;
807 sd_next_list (insn, &list_types, &list, &resolved_p);
808 if (!deps_list_empty_p (list))
809 return false;
812 return true;
815 /* Initialize data for INSN. */
816 void
817 sd_init_insn (rtx_insn *insn)
819 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
820 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
821 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
822 INSN_FORW_DEPS (insn) = create_deps_list ();
823 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
825 /* ??? It would be nice to allocate dependency caches here. */
828 /* Free data for INSN. */
829 void
830 sd_finish_insn (rtx_insn *insn)
832 /* ??? It would be nice to deallocate dependency caches here. */
834 free_deps_list (INSN_HARD_BACK_DEPS (insn));
835 INSN_HARD_BACK_DEPS (insn) = NULL;
837 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
838 INSN_SPEC_BACK_DEPS (insn) = NULL;
840 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
841 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
843 free_deps_list (INSN_FORW_DEPS (insn));
844 INSN_FORW_DEPS (insn) = NULL;
846 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
847 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
850 /* Find a dependency between producer PRO and consumer CON.
851 Search through resolved dependency lists if RESOLVED_P is true.
852 If no such dependency is found return NULL,
853 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
854 with an iterator pointing to it. */
855 static dep_t
856 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
857 sd_iterator_def *sd_it_ptr)
859 sd_list_types_def pro_list_type;
860 sd_list_types_def con_list_type;
861 sd_iterator_def sd_it;
862 dep_t dep;
863 bool found_p = false;
865 if (resolved_p)
867 pro_list_type = SD_LIST_RES_FORW;
868 con_list_type = SD_LIST_RES_BACK;
870 else
872 pro_list_type = SD_LIST_FORW;
873 con_list_type = SD_LIST_BACK;
876 /* Walk through either back list of INSN or forw list of ELEM
877 depending on which one is shorter. */
878 if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
880 /* Find the dep_link with producer PRO in consumer's back_deps. */
881 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
882 if (DEP_PRO (dep) == pro)
884 found_p = true;
885 break;
888 else
890 /* Find the dep_link with consumer CON in producer's forw_deps. */
891 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
892 if (DEP_CON (dep) == con)
894 found_p = true;
895 break;
899 if (found_p)
901 if (sd_it_ptr != NULL)
902 *sd_it_ptr = sd_it;
904 return dep;
907 return NULL;
910 /* Find a dependency between producer PRO and consumer CON.
911 Use dependency [if available] to check if dependency is present at all.
912 Search through resolved dependency lists if RESOLVED_P is true.
913 If the dependency or NULL if none found. */
914 dep_t
915 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
917 if (true_dependency_cache != NULL)
918 /* Avoiding the list walk below can cut compile times dramatically
919 for some code. */
921 int elem_luid = INSN_LUID (pro);
922 int insn_luid = INSN_LUID (con);
924 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
925 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
926 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
927 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
928 return NULL;
931 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
934 /* Add or update a dependence described by DEP.
935 MEM1 and MEM2, if non-null, correspond to memory locations in case of
936 data speculation.
938 The function returns a value indicating if an old entry has been changed
939 or a new entry has been added to insn's backward deps.
941 This function merely checks if producer and consumer is the same insn
942 and doesn't create a dep in this case. Actual manipulation of
943 dependence data structures is performed in add_or_update_dep_1. */
944 static enum DEPS_ADJUST_RESULT
945 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
947 rtx_insn *elem = DEP_PRO (dep);
948 rtx_insn *insn = DEP_CON (dep);
950 gcc_assert (INSN_P (insn) && INSN_P (elem));
952 /* Don't depend an insn on itself. */
953 if (insn == elem)
955 if (sched_deps_info->generate_spec_deps)
956 /* INSN has an internal dependence, which we can't overcome. */
957 HAS_INTERNAL_DEP (insn) = 1;
959 return DEP_NODEP;
962 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
965 /* Ask dependency caches what needs to be done for dependence DEP.
966 Return DEP_CREATED if new dependence should be created and there is no
967 need to try to find one searching the dependencies lists.
968 Return DEP_PRESENT if there already is a dependence described by DEP and
969 hence nothing is to be done.
970 Return DEP_CHANGED if there already is a dependence, but it should be
971 updated to incorporate additional information from DEP. */
972 static enum DEPS_ADJUST_RESULT
973 ask_dependency_caches (dep_t dep)
975 int elem_luid = INSN_LUID (DEP_PRO (dep));
976 int insn_luid = INSN_LUID (DEP_CON (dep));
978 gcc_assert (true_dependency_cache != NULL
979 && output_dependency_cache != NULL
980 && anti_dependency_cache != NULL
981 && control_dependency_cache != NULL);
983 if (!(current_sched_info->flags & USE_DEPS_LIST))
985 enum reg_note present_dep_type;
987 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
988 present_dep_type = REG_DEP_TRUE;
989 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
990 present_dep_type = REG_DEP_OUTPUT;
991 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
992 present_dep_type = REG_DEP_ANTI;
993 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
994 present_dep_type = REG_DEP_CONTROL;
995 else
996 /* There is no existing dep so it should be created. */
997 return DEP_CREATED;
999 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
1000 /* DEP does not add anything to the existing dependence. */
1001 return DEP_PRESENT;
1003 else
1005 ds_t present_dep_types = 0;
1007 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
1008 present_dep_types |= DEP_TRUE;
1009 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
1010 present_dep_types |= DEP_OUTPUT;
1011 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
1012 present_dep_types |= DEP_ANTI;
1013 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
1014 present_dep_types |= DEP_CONTROL;
1016 if (present_dep_types == 0)
1017 /* There is no existing dep so it should be created. */
1018 return DEP_CREATED;
1020 if (!(current_sched_info->flags & DO_SPECULATION)
1021 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
1023 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
1024 == present_dep_types)
1025 /* DEP does not add anything to the existing dependence. */
1026 return DEP_PRESENT;
1028 else
1030 /* Only true dependencies can be data speculative and
1031 only anti dependencies can be control speculative. */
1032 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1033 == present_dep_types);
1035 /* if (DEP is SPECULATIVE) then
1036 ..we should update DEP_STATUS
1037 else
1038 ..we should reset existing dep to non-speculative. */
1042 return DEP_CHANGED;
1045 /* Set dependency caches according to DEP. */
1046 static void
1047 set_dependency_caches (dep_t dep)
1049 int elem_luid = INSN_LUID (DEP_PRO (dep));
1050 int insn_luid = INSN_LUID (DEP_CON (dep));
1052 if (!(current_sched_info->flags & USE_DEPS_LIST))
1054 switch (DEP_TYPE (dep))
1056 case REG_DEP_TRUE:
1057 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1058 break;
1060 case REG_DEP_OUTPUT:
1061 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1062 break;
1064 case REG_DEP_ANTI:
1065 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1066 break;
1068 case REG_DEP_CONTROL:
1069 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1070 break;
1072 default:
1073 gcc_unreachable ();
1076 else
1078 ds_t ds = DEP_STATUS (dep);
1080 if (ds & DEP_TRUE)
1081 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1082 if (ds & DEP_OUTPUT)
1083 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1084 if (ds & DEP_ANTI)
1085 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1086 if (ds & DEP_CONTROL)
1087 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1089 if (ds & SPECULATIVE)
1091 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1092 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1097 /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1098 caches accordingly. */
1099 static void
1100 update_dependency_caches (dep_t dep, enum reg_note old_type)
1102 int elem_luid = INSN_LUID (DEP_PRO (dep));
1103 int insn_luid = INSN_LUID (DEP_CON (dep));
1105 /* Clear corresponding cache entry because type of the link
1106 may have changed. Keep them if we use_deps_list. */
1107 if (!(current_sched_info->flags & USE_DEPS_LIST))
1109 switch (old_type)
1111 case REG_DEP_OUTPUT:
1112 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1113 break;
1115 case REG_DEP_ANTI:
1116 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1117 break;
1119 case REG_DEP_CONTROL:
1120 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1121 break;
1123 default:
1124 gcc_unreachable ();
1128 set_dependency_caches (dep);
1131 /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1132 static void
1133 change_spec_dep_to_hard (sd_iterator_def sd_it)
1135 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1136 dep_link_t link = DEP_NODE_BACK (node);
1137 dep_t dep = DEP_NODE_DEP (node);
1138 rtx_insn *elem = DEP_PRO (dep);
1139 rtx_insn *insn = DEP_CON (dep);
1141 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1143 DEP_STATUS (dep) &= ~SPECULATIVE;
1145 if (true_dependency_cache != NULL)
1146 /* Clear the cache entry. */
1147 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1148 INSN_LUID (elem));
1151 /* Update DEP to incorporate information from NEW_DEP.
1152 SD_IT points to DEP in case it should be moved to another list.
1153 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1154 data-speculative dependence should be updated. */
1155 static enum DEPS_ADJUST_RESULT
1156 update_dep (dep_t dep, dep_t new_dep,
1157 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1158 rtx mem1 ATTRIBUTE_UNUSED,
1159 rtx mem2 ATTRIBUTE_UNUSED)
1161 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1162 enum reg_note old_type = DEP_TYPE (dep);
1163 bool was_spec = dep_spec_p (dep);
1165 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1166 DEP_MULTIPLE (dep) = 1;
1168 /* If this is a more restrictive type of dependence than the
1169 existing one, then change the existing dependence to this
1170 type. */
1171 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1173 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1174 res = DEP_CHANGED;
1177 if (current_sched_info->flags & USE_DEPS_LIST)
1178 /* Update DEP_STATUS. */
1180 ds_t dep_status = DEP_STATUS (dep);
1181 ds_t ds = DEP_STATUS (new_dep);
1182 ds_t new_status = ds | dep_status;
1184 if (new_status & SPECULATIVE)
1186 /* Either existing dep or a dep we're adding or both are
1187 speculative. */
1188 if (!(ds & SPECULATIVE)
1189 || !(dep_status & SPECULATIVE))
1190 /* The new dep can't be speculative. */
1191 new_status &= ~SPECULATIVE;
1192 else
1194 /* Both are speculative. Merge probabilities. */
1195 if (mem1 != NULL)
1197 dw_t dw;
1199 dw = estimate_dep_weak (mem1, mem2);
1200 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1203 new_status = ds_merge (dep_status, ds);
1207 ds = new_status;
1209 if (dep_status != ds)
1211 DEP_STATUS (dep) = ds;
1212 res = DEP_CHANGED;
1216 if (was_spec && !dep_spec_p (dep))
1217 /* The old dep was speculative, but now it isn't. */
1218 change_spec_dep_to_hard (sd_it);
1220 if (true_dependency_cache != NULL
1221 && res == DEP_CHANGED)
1222 update_dependency_caches (dep, old_type);
1224 return res;
1227 /* Add or update a dependence described by DEP.
1228 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1229 data speculation.
1231 The function returns a value indicating if an old entry has been changed
1232 or a new entry has been added to insn's backward deps or nothing has
1233 been updated at all. */
1234 static enum DEPS_ADJUST_RESULT
1235 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1236 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1238 bool maybe_present_p = true;
1239 bool present_p = false;
1241 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1242 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1244 #ifdef ENABLE_CHECKING
1245 check_dep (new_dep, mem1 != NULL);
1246 #endif
1248 if (true_dependency_cache != NULL)
1250 switch (ask_dependency_caches (new_dep))
1252 case DEP_PRESENT:
1253 dep_t present_dep;
1254 sd_iterator_def sd_it;
1256 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1257 DEP_CON (new_dep),
1258 resolved_p, &sd_it);
1259 DEP_MULTIPLE (present_dep) = 1;
1260 return DEP_PRESENT;
1262 case DEP_CHANGED:
1263 maybe_present_p = true;
1264 present_p = true;
1265 break;
1267 case DEP_CREATED:
1268 maybe_present_p = false;
1269 present_p = false;
1270 break;
1272 default:
1273 gcc_unreachable ();
1274 break;
1278 /* Check that we don't already have this dependence. */
1279 if (maybe_present_p)
1281 dep_t present_dep;
1282 sd_iterator_def sd_it;
1284 gcc_assert (true_dependency_cache == NULL || present_p);
1286 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1287 DEP_CON (new_dep),
1288 resolved_p, &sd_it);
1290 if (present_dep != NULL)
1291 /* We found an existing dependency between ELEM and INSN. */
1292 return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1293 else
1294 /* We didn't find a dep, it shouldn't present in the cache. */
1295 gcc_assert (!present_p);
1298 /* Might want to check one level of transitivity to save conses.
1299 This check should be done in maybe_add_or_update_dep_1.
1300 Since we made it to add_or_update_dep_1, we must create
1301 (or update) a link. */
1303 if (mem1 != NULL_RTX)
1305 gcc_assert (sched_deps_info->generate_spec_deps);
1306 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1307 estimate_dep_weak (mem1, mem2));
1310 sd_add_dep (new_dep, resolved_p);
1312 return DEP_CREATED;
1315 /* Initialize BACK_LIST_PTR with consumer's backward list and
1316 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1317 initialize with lists that hold resolved deps. */
1318 static void
1319 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1320 deps_list_t *back_list_ptr,
1321 deps_list_t *forw_list_ptr)
1323 rtx_insn *con = DEP_CON (dep);
1325 if (!resolved_p)
1327 if (dep_spec_p (dep))
1328 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1329 else
1330 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1332 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1334 else
1336 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1337 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1341 /* Add dependence described by DEP.
1342 If RESOLVED_P is true treat the dependence as a resolved one. */
1343 void
1344 sd_add_dep (dep_t dep, bool resolved_p)
1346 dep_node_t n = create_dep_node ();
1347 deps_list_t con_back_deps;
1348 deps_list_t pro_forw_deps;
1349 rtx_insn *elem = DEP_PRO (dep);
1350 rtx_insn *insn = DEP_CON (dep);
1352 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1354 if ((current_sched_info->flags & DO_SPECULATION) == 0
1355 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1356 DEP_STATUS (dep) &= ~SPECULATIVE;
1358 copy_dep (DEP_NODE_DEP (n), dep);
1360 get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1362 add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1364 #ifdef ENABLE_CHECKING
1365 check_dep (dep, false);
1366 #endif
1368 add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1370 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1371 in the bitmap caches of dependency information. */
1372 if (true_dependency_cache != NULL)
1373 set_dependency_caches (dep);
1376 /* Add or update backward dependence between INSN and ELEM
1377 with given type DEP_TYPE and dep_status DS.
1378 This function is a convenience wrapper. */
1379 enum DEPS_ADJUST_RESULT
1380 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1382 return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1385 /* Resolved dependence pointed to by SD_IT.
1386 SD_IT will advance to the next element. */
1387 void
1388 sd_resolve_dep (sd_iterator_def sd_it)
1390 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1391 dep_t dep = DEP_NODE_DEP (node);
1392 rtx_insn *pro = DEP_PRO (dep);
1393 rtx_insn *con = DEP_CON (dep);
1395 if (dep_spec_p (dep))
1396 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1397 INSN_RESOLVED_BACK_DEPS (con));
1398 else
1399 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1400 INSN_RESOLVED_BACK_DEPS (con));
1402 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1403 INSN_RESOLVED_FORW_DEPS (pro));
1406 /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1407 pointed to by SD_IT to unresolved state. */
1408 void
1409 sd_unresolve_dep (sd_iterator_def sd_it)
1411 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1412 dep_t dep = DEP_NODE_DEP (node);
1413 rtx_insn *pro = DEP_PRO (dep);
1414 rtx_insn *con = DEP_CON (dep);
1416 if (dep_spec_p (dep))
1417 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1418 INSN_SPEC_BACK_DEPS (con));
1419 else
1420 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1421 INSN_HARD_BACK_DEPS (con));
1423 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1424 INSN_FORW_DEPS (pro));
1427 /* Make TO depend on all the FROM's producers.
1428 If RESOLVED_P is true add dependencies to the resolved lists. */
1429 void
1430 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1432 sd_list_types_def list_type;
1433 sd_iterator_def sd_it;
1434 dep_t dep;
1436 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1438 FOR_EACH_DEP (from, list_type, sd_it, dep)
1440 dep_def _new_dep, *new_dep = &_new_dep;
1442 copy_dep (new_dep, dep);
1443 DEP_CON (new_dep) = to;
1444 sd_add_dep (new_dep, resolved_p);
1448 /* Remove a dependency referred to by SD_IT.
1449 SD_IT will point to the next dependence after removal. */
1450 void
1451 sd_delete_dep (sd_iterator_def sd_it)
1453 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1454 dep_t dep = DEP_NODE_DEP (n);
1455 rtx_insn *pro = DEP_PRO (dep);
1456 rtx_insn *con = DEP_CON (dep);
1457 deps_list_t con_back_deps;
1458 deps_list_t pro_forw_deps;
1460 if (true_dependency_cache != NULL)
1462 int elem_luid = INSN_LUID (pro);
1463 int insn_luid = INSN_LUID (con);
1465 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1466 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1467 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1468 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1470 if (current_sched_info->flags & DO_SPECULATION)
1471 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1474 get_back_and_forw_lists (dep, sd_it.resolved_p,
1475 &con_back_deps, &pro_forw_deps);
1477 remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1478 remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1480 delete_dep_node (n);
1483 /* Dump size of the lists. */
1484 #define DUMP_LISTS_SIZE (2)
1486 /* Dump dependencies of the lists. */
1487 #define DUMP_LISTS_DEPS (4)
1489 /* Dump all information about the lists. */
1490 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1492 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1493 FLAGS is a bit mask specifying what information about the lists needs
1494 to be printed.
1495 If FLAGS has the very first bit set, then dump all information about
1496 the lists and propagate this bit into the callee dump functions. */
1497 static void
1498 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1500 sd_iterator_def sd_it;
1501 dep_t dep;
1502 int all;
1504 all = (flags & 1);
1506 if (all)
1507 flags |= DUMP_LISTS_ALL;
1509 fprintf (dump, "[");
1511 if (flags & DUMP_LISTS_SIZE)
1512 fprintf (dump, "%d; ", sd_lists_size (insn, types));
1514 if (flags & DUMP_LISTS_DEPS)
1516 FOR_EACH_DEP (insn, types, sd_it, dep)
1518 dump_dep (dump, dep, dump_dep_flags | all);
1519 fprintf (dump, " ");
1524 /* Dump all information about deps_lists of INSN specified by TYPES
1525 to STDERR. */
1526 void
1527 sd_debug_lists (rtx insn, sd_list_types_def types)
1529 dump_lists (stderr, insn, types, 1);
1530 fprintf (stderr, "\n");
1533 /* A wrapper around add_dependence_1, to add a dependence of CON on
1534 PRO, with type DEP_TYPE. This function implements special handling
1535 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1536 the type to REG_DEP_ANTI if we can determine that predication is
1537 impossible; otherwise we add additional true dependencies on the
1538 INSN_COND_DEPS list of the jump (which PRO must be). */
1539 void
1540 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1542 if (dep_type == REG_DEP_CONTROL
1543 && !(current_sched_info->flags & DO_PREDICATION))
1544 dep_type = REG_DEP_ANTI;
1546 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1547 so we must also make the insn dependent on the setter of the
1548 condition. */
1549 if (dep_type == REG_DEP_CONTROL)
1551 rtx_insn *real_pro = pro;
1552 rtx_insn *other = real_insn_for_shadow (real_pro);
1553 rtx cond;
1555 if (other != NULL_RTX)
1556 real_pro = other;
1557 cond = sched_get_reverse_condition_uncached (real_pro);
1558 /* Verify that the insn does not use a different value in
1559 the condition register than the one that was present at
1560 the jump. */
1561 if (cond == NULL_RTX)
1562 dep_type = REG_DEP_ANTI;
1563 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1565 HARD_REG_SET uses;
1566 CLEAR_HARD_REG_SET (uses);
1567 note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1568 if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1569 dep_type = REG_DEP_ANTI;
1571 if (dep_type == REG_DEP_CONTROL)
1573 if (sched_verbose >= 5)
1574 fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1575 INSN_UID (real_pro));
1576 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1577 REG_DEP_TRUE, false);
1581 add_dependence_1 (con, pro, dep_type);
1584 /* A convenience wrapper to operate on an entire list. HARD should be
1585 true if DEP_NONREG should be set on newly created dependencies. */
1587 static void
1588 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1589 enum reg_note dep_type, bool hard)
1591 mark_as_hard = hard;
1592 for (; list; list = list->next ())
1594 if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1595 add_dependence (insn, list->insn (), dep_type);
1597 mark_as_hard = false;
1600 /* Similar, but free *LISTP at the same time, when the context
1601 is not readonly. HARD should be true if DEP_NONREG should be set on
1602 newly created dependencies. */
1604 static void
1605 add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
1606 rtx_insn_list **listp,
1607 int uncond, enum reg_note dep_type, bool hard)
1609 add_dependence_list (insn, *listp, uncond, dep_type, hard);
1611 /* We don't want to short-circuit dependencies involving debug
1612 insns, because they may cause actual dependencies to be
1613 disregarded. */
1614 if (deps->readonly || DEBUG_INSN_P (insn))
1615 return;
1617 free_INSN_LIST_list (listp);
1620 /* Remove all occurrences of INSN from LIST. Return the number of
1621 occurrences removed. */
1623 static int
1624 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1626 int removed = 0;
1628 while (*listp)
1630 if ((*listp)->insn () == insn)
1632 remove_free_INSN_LIST_node (listp);
1633 removed++;
1634 continue;
1637 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1640 return removed;
1643 /* Same as above, but process two lists at once. */
1644 static int
1645 remove_from_both_dependence_lists (rtx_insn *insn,
1646 rtx_insn_list **listp,
1647 rtx_expr_list **exprp)
1649 int removed = 0;
1651 while (*listp)
1653 if (XEXP (*listp, 0) == insn)
1655 remove_free_INSN_LIST_node (listp);
1656 remove_free_EXPR_LIST_node (exprp);
1657 removed++;
1658 continue;
1661 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1662 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1665 return removed;
1668 /* Clear all dependencies for an insn. */
1669 static void
1670 delete_all_dependences (rtx_insn *insn)
1672 sd_iterator_def sd_it;
1673 dep_t dep;
1675 /* The below cycle can be optimized to clear the caches and back_deps
1676 in one call but that would provoke duplication of code from
1677 delete_dep (). */
1679 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1680 sd_iterator_cond (&sd_it, &dep);)
1681 sd_delete_dep (sd_it);
1684 /* All insns in a scheduling group except the first should only have
1685 dependencies on the previous insn in the group. So we find the
1686 first instruction in the scheduling group by walking the dependence
1687 chains backwards. Then we add the dependencies for the group to
1688 the previous nonnote insn. */
1690 static void
1691 chain_to_prev_insn (rtx_insn *insn)
1693 sd_iterator_def sd_it;
1694 dep_t dep;
1695 rtx_insn *prev_nonnote;
1697 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1699 rtx_insn *i = insn;
1700 rtx_insn *pro = DEP_PRO (dep);
1704 i = prev_nonnote_insn (i);
1706 if (pro == i)
1707 goto next_link;
1708 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1710 if (! sched_insns_conditions_mutex_p (i, pro))
1711 add_dependence (i, pro, DEP_TYPE (dep));
1712 next_link:;
1715 delete_all_dependences (insn);
1717 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1718 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1719 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1720 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1723 /* Process an insn's memory dependencies. There are four kinds of
1724 dependencies:
1726 (0) read dependence: read follows read
1727 (1) true dependence: read follows write
1728 (2) output dependence: write follows write
1729 (3) anti dependence: write follows read
1731 We are careful to build only dependencies which actually exist, and
1732 use transitivity to avoid building too many links. */
1734 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1735 The MEM is a memory reference contained within INSN, which we are saving
1736 so that we can do memory aliasing on it. */
1738 static void
1739 add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1740 rtx_insn *insn, rtx mem)
1742 rtx_insn_list **insn_list;
1743 rtx_insn_list *insn_node;
1744 rtx_expr_list **mem_list;
1745 rtx_expr_list *mem_node;
1747 gcc_assert (!deps->readonly);
1748 if (read_p)
1750 insn_list = &deps->pending_read_insns;
1751 mem_list = &deps->pending_read_mems;
1752 if (!DEBUG_INSN_P (insn))
1753 deps->pending_read_list_length++;
1755 else
1757 insn_list = &deps->pending_write_insns;
1758 mem_list = &deps->pending_write_mems;
1759 deps->pending_write_list_length++;
1762 insn_node = alloc_INSN_LIST (insn, *insn_list);
1763 *insn_list = insn_node;
1765 if (sched_deps_info->use_cselib)
1767 mem = shallow_copy_rtx (mem);
1768 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1769 GET_MODE (mem), insn);
1771 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1772 *mem_list = mem_node;
1775 /* Make a dependency between every memory reference on the pending lists
1776 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1777 dependencies for a read operation, similarly with FOR_WRITE. */
1779 static void
1780 flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
1781 int for_write)
1783 if (for_write)
1785 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1786 1, REG_DEP_ANTI, true);
1787 if (!deps->readonly)
1789 free_EXPR_LIST_list (&deps->pending_read_mems);
1790 deps->pending_read_list_length = 0;
1794 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1795 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1796 true);
1798 add_dependence_list_and_free (deps, insn,
1799 &deps->last_pending_memory_flush, 1,
1800 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1801 true);
1803 add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1804 REG_DEP_ANTI, true);
1806 if (DEBUG_INSN_P (insn))
1808 if (for_write)
1809 free_INSN_LIST_list (&deps->pending_read_insns);
1810 free_INSN_LIST_list (&deps->pending_write_insns);
1811 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1812 free_INSN_LIST_list (&deps->pending_jump_insns);
1815 if (!deps->readonly)
1817 free_EXPR_LIST_list (&deps->pending_write_mems);
1818 deps->pending_write_list_length = 0;
1820 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1821 deps->pending_flush_length = 1;
1823 mark_as_hard = false;
1826 /* Instruction which dependencies we are analyzing. */
1827 static rtx_insn *cur_insn = NULL;
1829 /* Implement hooks for haifa scheduler. */
1831 static void
1832 haifa_start_insn (rtx_insn *insn)
1834 gcc_assert (insn && !cur_insn);
1836 cur_insn = insn;
1839 static void
1840 haifa_finish_insn (void)
1842 cur_insn = NULL;
1845 void
1846 haifa_note_reg_set (int regno)
1848 SET_REGNO_REG_SET (reg_pending_sets, regno);
1851 void
1852 haifa_note_reg_clobber (int regno)
1854 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1857 void
1858 haifa_note_reg_use (int regno)
1860 SET_REGNO_REG_SET (reg_pending_uses, regno);
1863 static void
1864 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1866 if (!(ds & SPECULATIVE))
1868 mem = NULL_RTX;
1869 pending_mem = NULL_RTX;
1871 else
1872 gcc_assert (ds & BEGIN_DATA);
1875 dep_def _dep, *dep = &_dep;
1877 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1878 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1879 DEP_NONREG (dep) = 1;
1880 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1885 static void
1886 haifa_note_dep (rtx_insn *elem, ds_t ds)
1888 dep_def _dep;
1889 dep_t dep = &_dep;
1891 init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1892 if (mark_as_hard)
1893 DEP_NONREG (dep) = 1;
1894 maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1897 static void
1898 note_reg_use (int r)
1900 if (sched_deps_info->note_reg_use)
1901 sched_deps_info->note_reg_use (r);
1904 static void
1905 note_reg_set (int r)
1907 if (sched_deps_info->note_reg_set)
1908 sched_deps_info->note_reg_set (r);
1911 static void
1912 note_reg_clobber (int r)
1914 if (sched_deps_info->note_reg_clobber)
1915 sched_deps_info->note_reg_clobber (r);
1918 static void
1919 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1921 if (sched_deps_info->note_mem_dep)
1922 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1925 static void
1926 note_dep (rtx_insn *e, ds_t ds)
1928 if (sched_deps_info->note_dep)
1929 sched_deps_info->note_dep (e, ds);
1932 /* Return corresponding to DS reg_note. */
1933 enum reg_note
1934 ds_to_dt (ds_t ds)
1936 if (ds & DEP_TRUE)
1937 return REG_DEP_TRUE;
1938 else if (ds & DEP_OUTPUT)
1939 return REG_DEP_OUTPUT;
1940 else if (ds & DEP_ANTI)
1941 return REG_DEP_ANTI;
1942 else
1944 gcc_assert (ds & DEP_CONTROL);
1945 return REG_DEP_CONTROL;
1951 /* Functions for computation of info needed for register pressure
1952 sensitive insn scheduling. */
1955 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1956 static struct reg_use_data *
1957 create_insn_reg_use (int regno, rtx_insn *insn)
1959 struct reg_use_data *use;
1961 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1962 use->regno = regno;
1963 use->insn = insn;
1964 use->next_insn_use = INSN_REG_USE_LIST (insn);
1965 INSN_REG_USE_LIST (insn) = use;
1966 return use;
1969 /* Allocate reg_set_data structure for REGNO and INSN. */
1970 static void
1971 create_insn_reg_set (int regno, rtx insn)
1973 struct reg_set_data *set;
1975 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1976 set->regno = regno;
1977 set->insn = insn;
1978 set->next_insn_set = INSN_REG_SET_LIST (insn);
1979 INSN_REG_SET_LIST (insn) = set;
1982 /* Set up insn register uses for INSN and dependency context DEPS. */
1983 static void
1984 setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
1986 unsigned i;
1987 reg_set_iterator rsi;
1988 struct reg_use_data *use, *use2, *next;
1989 struct deps_reg *reg_last;
1991 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1993 if (i < FIRST_PSEUDO_REGISTER
1994 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1995 continue;
1997 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1998 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1999 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
2000 /* Ignore use which is not dying. */
2001 continue;
2003 use = create_insn_reg_use (i, insn);
2004 use->next_regno_use = use;
2005 reg_last = &deps->reg_last[i];
2007 /* Create the cycle list of uses. */
2008 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
2010 use2 = create_insn_reg_use (i, list->insn ());
2011 next = use->next_regno_use;
2012 use->next_regno_use = use2;
2013 use2->next_regno_use = next;
2018 /* Register pressure info for the currently processed insn. */
2019 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
2021 /* Return TRUE if INSN has the use structure for REGNO. */
2022 static bool
2023 insn_use_p (rtx insn, int regno)
2025 struct reg_use_data *use;
2027 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2028 if (use->regno == regno)
2029 return true;
2030 return false;
2033 /* Update the register pressure info after birth of pseudo register REGNO
2034 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2035 the register is in clobber or unused after the insn. */
2036 static void
2037 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2039 int incr, new_incr;
2040 enum reg_class cl;
2042 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2043 cl = sched_regno_pressure_class[regno];
2044 if (cl != NO_REGS)
2046 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2047 if (clobber_p)
2049 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2050 reg_pressure_info[cl].clobber_increase = new_incr;
2052 else if (unused_p)
2054 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2055 reg_pressure_info[cl].unused_set_increase = new_incr;
2057 else
2059 new_incr = reg_pressure_info[cl].set_increase + incr;
2060 reg_pressure_info[cl].set_increase = new_incr;
2061 if (! insn_use_p (insn, regno))
2062 reg_pressure_info[cl].change += incr;
2063 create_insn_reg_set (regno, insn);
2065 gcc_assert (new_incr < (1 << INCREASE_BITS));
2069 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2070 hard registers involved in the birth. */
2071 static void
2072 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2073 bool clobber_p, bool unused_p)
2075 enum reg_class cl;
2076 int new_incr, last = regno + nregs;
2078 while (regno < last)
2080 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2081 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2083 cl = sched_regno_pressure_class[regno];
2084 if (cl != NO_REGS)
2086 if (clobber_p)
2088 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2089 reg_pressure_info[cl].clobber_increase = new_incr;
2091 else if (unused_p)
2093 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2094 reg_pressure_info[cl].unused_set_increase = new_incr;
2096 else
2098 new_incr = reg_pressure_info[cl].set_increase + 1;
2099 reg_pressure_info[cl].set_increase = new_incr;
2100 if (! insn_use_p (insn, regno))
2101 reg_pressure_info[cl].change += 1;
2102 create_insn_reg_set (regno, insn);
2104 gcc_assert (new_incr < (1 << INCREASE_BITS));
2107 regno++;
2111 /* Update the register pressure info after birth of pseudo or hard
2112 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2113 correspondingly that the register is in clobber or unused after the
2114 insn. */
2115 static void
2116 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2118 int regno;
2120 if (GET_CODE (reg) == SUBREG)
2121 reg = SUBREG_REG (reg);
2123 if (! REG_P (reg))
2124 return;
2126 regno = REGNO (reg);
2127 if (regno < FIRST_PSEUDO_REGISTER)
2128 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2129 clobber_p, unused_p);
2130 else
2131 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2134 /* Update the register pressure info after death of pseudo register
2135 REGNO. */
2136 static void
2137 mark_pseudo_death (int regno)
2139 int incr;
2140 enum reg_class cl;
2142 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2143 cl = sched_regno_pressure_class[regno];
2144 if (cl != NO_REGS)
2146 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2147 reg_pressure_info[cl].change -= incr;
2151 /* Like mark_pseudo_death except that NREGS saying how many hard
2152 registers involved in the death. */
2153 static void
2154 mark_hard_regno_death (int regno, int nregs)
2156 enum reg_class cl;
2157 int last = regno + nregs;
2159 while (regno < last)
2161 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2162 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2164 cl = sched_regno_pressure_class[regno];
2165 if (cl != NO_REGS)
2166 reg_pressure_info[cl].change -= 1;
2168 regno++;
2172 /* Update the register pressure info after death of pseudo or hard
2173 register REG. */
2174 static void
2175 mark_reg_death (rtx reg)
2177 int regno;
2179 if (GET_CODE (reg) == SUBREG)
2180 reg = SUBREG_REG (reg);
2182 if (! REG_P (reg))
2183 return;
2185 regno = REGNO (reg);
2186 if (regno < FIRST_PSEUDO_REGISTER)
2187 mark_hard_regno_death (regno, REG_NREGS (reg));
2188 else
2189 mark_pseudo_death (regno);
2192 /* Process SETTER of REG. DATA is an insn containing the setter. */
2193 static void
2194 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2196 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2197 return;
2198 mark_insn_reg_birth
2199 ((rtx) data, reg, false,
2200 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2203 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2204 static void
2205 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2207 if (GET_CODE (setter) == CLOBBER)
2208 mark_insn_reg_birth ((rtx) data, reg, true, false);
2211 /* Set up reg pressure info related to INSN. */
2212 void
2213 init_insn_reg_pressure_info (rtx_insn *insn)
2215 int i, len;
2216 enum reg_class cl;
2217 static struct reg_pressure_data *pressure_info;
2218 rtx link;
2220 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2222 if (! INSN_P (insn))
2223 return;
2225 for (i = 0; i < ira_pressure_classes_num; i++)
2227 cl = ira_pressure_classes[i];
2228 reg_pressure_info[cl].clobber_increase = 0;
2229 reg_pressure_info[cl].set_increase = 0;
2230 reg_pressure_info[cl].unused_set_increase = 0;
2231 reg_pressure_info[cl].change = 0;
2234 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2236 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2238 #ifdef AUTO_INC_DEC
2239 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2240 if (REG_NOTE_KIND (link) == REG_INC)
2241 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2242 #endif
2244 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2245 if (REG_NOTE_KIND (link) == REG_DEAD)
2246 mark_reg_death (XEXP (link, 0));
2248 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2249 pressure_info
2250 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2251 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2252 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2253 * sizeof (int), 1);
2254 for (i = 0; i < ira_pressure_classes_num; i++)
2256 cl = ira_pressure_classes[i];
2257 pressure_info[i].clobber_increase
2258 = reg_pressure_info[cl].clobber_increase;
2259 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2260 pressure_info[i].unused_set_increase
2261 = reg_pressure_info[cl].unused_set_increase;
2262 pressure_info[i].change = reg_pressure_info[cl].change;
2269 /* Internal variable for sched_analyze_[12] () functions.
2270 If it is nonzero, this means that sched_analyze_[12] looks
2271 at the most toplevel SET. */
2272 static bool can_start_lhs_rhs_p;
2274 /* Extend reg info for the deps context DEPS given that
2275 we have just generated a register numbered REGNO. */
2276 static void
2277 extend_deps_reg_info (struct deps_desc *deps, int regno)
2279 int max_regno = regno + 1;
2281 gcc_assert (!reload_completed);
2283 /* In a readonly context, it would not hurt to extend info,
2284 but it should not be needed. */
2285 if (reload_completed && deps->readonly)
2287 deps->max_reg = max_regno;
2288 return;
2291 if (max_regno > deps->max_reg)
2293 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2294 max_regno);
2295 memset (&deps->reg_last[deps->max_reg],
2296 0, (max_regno - deps->max_reg)
2297 * sizeof (struct deps_reg));
2298 deps->max_reg = max_regno;
2302 /* Extends REG_INFO_P if needed. */
2303 void
2304 maybe_extend_reg_info_p (void)
2306 /* Extend REG_INFO_P, if needed. */
2307 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2309 size_t new_reg_info_p_size = max_regno + 128;
2311 gcc_assert (!reload_completed && sel_sched_p ());
2313 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2314 new_reg_info_p_size,
2315 reg_info_p_size,
2316 sizeof (*reg_info_p));
2317 reg_info_p_size = new_reg_info_p_size;
2321 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2322 The type of the reference is specified by REF and can be SET,
2323 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2325 static void
2326 sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
2327 enum rtx_code ref, rtx_insn *insn)
2329 /* We could emit new pseudos in renaming. Extend the reg structures. */
2330 if (!reload_completed && sel_sched_p ()
2331 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2332 extend_deps_reg_info (deps, regno);
2334 maybe_extend_reg_info_p ();
2336 /* A hard reg in a wide mode may really be multiple registers.
2337 If so, mark all of them just like the first. */
2338 if (regno < FIRST_PSEUDO_REGISTER)
2340 int i = hard_regno_nregs[regno][mode];
2341 if (ref == SET)
2343 while (--i >= 0)
2344 note_reg_set (regno + i);
2346 else if (ref == USE)
2348 while (--i >= 0)
2349 note_reg_use (regno + i);
2351 else
2353 while (--i >= 0)
2354 note_reg_clobber (regno + i);
2358 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2359 it does not reload. Ignore these as they have served their
2360 purpose already. */
2361 else if (regno >= deps->max_reg)
2363 enum rtx_code code = GET_CODE (PATTERN (insn));
2364 gcc_assert (code == USE || code == CLOBBER);
2367 else
2369 if (ref == SET)
2370 note_reg_set (regno);
2371 else if (ref == USE)
2372 note_reg_use (regno);
2373 else
2374 note_reg_clobber (regno);
2376 /* Pseudos that are REG_EQUIV to something may be replaced
2377 by that during reloading. We need only add dependencies for
2378 the address in the REG_EQUIV note. */
2379 if (!reload_completed && get_reg_known_equiv_p (regno))
2381 rtx t = get_reg_known_value (regno);
2382 if (MEM_P (t))
2383 sched_analyze_2 (deps, XEXP (t, 0), insn);
2386 /* Don't let it cross a call after scheduling if it doesn't
2387 already cross one. */
2388 if (REG_N_CALLS_CROSSED (regno) == 0)
2390 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2391 deps->sched_before_next_call
2392 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2393 else
2394 add_dependence_list (insn, deps->last_function_call, 1,
2395 REG_DEP_ANTI, false);
2400 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2401 rtx, X, creating all dependencies generated by the write to the
2402 destination of X, and reads of everything mentioned. */
2404 static void
2405 sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2407 rtx dest = XEXP (x, 0);
2408 enum rtx_code code = GET_CODE (x);
2409 bool cslr_p = can_start_lhs_rhs_p;
2411 can_start_lhs_rhs_p = false;
2413 gcc_assert (dest);
2414 if (dest == 0)
2415 return;
2417 if (cslr_p && sched_deps_info->start_lhs)
2418 sched_deps_info->start_lhs (dest);
2420 if (GET_CODE (dest) == PARALLEL)
2422 int i;
2424 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2425 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2426 sched_analyze_1 (deps,
2427 gen_rtx_CLOBBER (VOIDmode,
2428 XEXP (XVECEXP (dest, 0, i), 0)),
2429 insn);
2431 if (cslr_p && sched_deps_info->finish_lhs)
2432 sched_deps_info->finish_lhs ();
2434 if (code == SET)
2436 can_start_lhs_rhs_p = cslr_p;
2438 sched_analyze_2 (deps, SET_SRC (x), insn);
2440 can_start_lhs_rhs_p = false;
2443 return;
2446 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2447 || GET_CODE (dest) == ZERO_EXTRACT)
2449 if (GET_CODE (dest) == STRICT_LOW_PART
2450 || GET_CODE (dest) == ZERO_EXTRACT
2451 || df_read_modify_subreg_p (dest))
2453 /* These both read and modify the result. We must handle
2454 them as writes to get proper dependencies for following
2455 instructions. We must handle them as reads to get proper
2456 dependencies from this to previous instructions.
2457 Thus we need to call sched_analyze_2. */
2459 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2461 if (GET_CODE (dest) == ZERO_EXTRACT)
2463 /* The second and third arguments are values read by this insn. */
2464 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2465 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2467 dest = XEXP (dest, 0);
2470 if (REG_P (dest))
2472 int regno = REGNO (dest);
2473 machine_mode mode = GET_MODE (dest);
2475 sched_analyze_reg (deps, regno, mode, code, insn);
2477 #ifdef STACK_REGS
2478 /* Treat all writes to a stack register as modifying the TOS. */
2479 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2481 /* Avoid analyzing the same register twice. */
2482 if (regno != FIRST_STACK_REG)
2483 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2485 add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2486 FIRST_STACK_REG);
2488 #endif
2490 else if (MEM_P (dest))
2492 /* Writing memory. */
2493 rtx t = dest;
2495 if (sched_deps_info->use_cselib)
2497 machine_mode address_mode = get_address_mode (dest);
2499 t = shallow_copy_rtx (dest);
2500 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2501 GET_MODE (t), insn);
2502 XEXP (t, 0)
2503 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2504 insn);
2506 t = canon_rtx (t);
2508 /* Pending lists can't get larger with a readonly context. */
2509 if (!deps->readonly
2510 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2511 >= MAX_PENDING_LIST_LENGTH))
2513 /* Flush all pending reads and writes to prevent the pending lists
2514 from getting any larger. Insn scheduling runs too slowly when
2515 these lists get long. When compiling GCC with itself,
2516 this flush occurs 8 times for sparc, and 10 times for m88k using
2517 the default value of 32. */
2518 flush_pending_lists (deps, insn, false, true);
2520 else
2522 rtx_insn_list *pending;
2523 rtx_expr_list *pending_mem;
2525 pending = deps->pending_read_insns;
2526 pending_mem = deps->pending_read_mems;
2527 while (pending)
2529 if (anti_dependence (pending_mem->element (), t)
2530 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2531 note_mem_dep (t, pending_mem->element (), pending->insn (),
2532 DEP_ANTI);
2534 pending = pending->next ();
2535 pending_mem = pending_mem->next ();
2538 pending = deps->pending_write_insns;
2539 pending_mem = deps->pending_write_mems;
2540 while (pending)
2542 if (output_dependence (pending_mem->element (), t)
2543 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2544 note_mem_dep (t, pending_mem->element (),
2545 pending->insn (),
2546 DEP_OUTPUT);
2548 pending = pending->next ();
2549 pending_mem = pending_mem-> next ();
2552 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2553 REG_DEP_ANTI, true);
2554 add_dependence_list (insn, deps->pending_jump_insns, 1,
2555 REG_DEP_CONTROL, true);
2557 if (!deps->readonly)
2558 add_insn_mem_dependence (deps, false, insn, dest);
2560 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2563 if (cslr_p && sched_deps_info->finish_lhs)
2564 sched_deps_info->finish_lhs ();
2566 /* Analyze reads. */
2567 if (GET_CODE (x) == SET)
2569 can_start_lhs_rhs_p = cslr_p;
2571 sched_analyze_2 (deps, SET_SRC (x), insn);
2573 can_start_lhs_rhs_p = false;
2577 /* Analyze the uses of memory and registers in rtx X in INSN. */
2578 static void
2579 sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
2581 int i;
2582 int j;
2583 enum rtx_code code;
2584 const char *fmt;
2585 bool cslr_p = can_start_lhs_rhs_p;
2587 can_start_lhs_rhs_p = false;
2589 gcc_assert (x);
2590 if (x == 0)
2591 return;
2593 if (cslr_p && sched_deps_info->start_rhs)
2594 sched_deps_info->start_rhs (x);
2596 code = GET_CODE (x);
2598 switch (code)
2600 CASE_CONST_ANY:
2601 case SYMBOL_REF:
2602 case CONST:
2603 case LABEL_REF:
2604 /* Ignore constants. */
2605 if (cslr_p && sched_deps_info->finish_rhs)
2606 sched_deps_info->finish_rhs ();
2608 return;
2610 case CC0:
2611 if (!HAVE_cc0)
2612 gcc_unreachable ();
2614 /* User of CC0 depends on immediately preceding insn. */
2615 SCHED_GROUP_P (insn) = 1;
2616 /* Don't move CC0 setter to another block (it can set up the
2617 same flag for previous CC0 users which is safe). */
2618 CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2620 if (cslr_p && sched_deps_info->finish_rhs)
2621 sched_deps_info->finish_rhs ();
2623 return;
2625 case REG:
2627 int regno = REGNO (x);
2628 machine_mode mode = GET_MODE (x);
2630 sched_analyze_reg (deps, regno, mode, USE, insn);
2632 #ifdef STACK_REGS
2633 /* Treat all reads of a stack register as modifying the TOS. */
2634 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2636 /* Avoid analyzing the same register twice. */
2637 if (regno != FIRST_STACK_REG)
2638 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2639 sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2641 #endif
2643 if (cslr_p && sched_deps_info->finish_rhs)
2644 sched_deps_info->finish_rhs ();
2646 return;
2649 case MEM:
2651 /* Reading memory. */
2652 rtx_insn_list *u;
2653 rtx_insn_list *pending;
2654 rtx_expr_list *pending_mem;
2655 rtx t = x;
2657 if (sched_deps_info->use_cselib)
2659 machine_mode address_mode = get_address_mode (t);
2661 t = shallow_copy_rtx (t);
2662 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2663 GET_MODE (t), insn);
2664 XEXP (t, 0)
2665 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2666 insn);
2669 if (!DEBUG_INSN_P (insn))
2671 t = canon_rtx (t);
2672 pending = deps->pending_read_insns;
2673 pending_mem = deps->pending_read_mems;
2674 while (pending)
2676 if (read_dependence (pending_mem->element (), t)
2677 && ! sched_insns_conditions_mutex_p (insn,
2678 pending->insn ()))
2679 note_mem_dep (t, pending_mem->element (),
2680 pending->insn (),
2681 DEP_ANTI);
2683 pending = pending->next ();
2684 pending_mem = pending_mem->next ();
2687 pending = deps->pending_write_insns;
2688 pending_mem = deps->pending_write_mems;
2689 while (pending)
2691 if (true_dependence (pending_mem->element (), VOIDmode, t)
2692 && ! sched_insns_conditions_mutex_p (insn,
2693 pending->insn ()))
2694 note_mem_dep (t, pending_mem->element (),
2695 pending->insn (),
2696 sched_deps_info->generate_spec_deps
2697 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2699 pending = pending->next ();
2700 pending_mem = pending_mem->next ();
2703 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2704 add_dependence (insn, u->insn (), REG_DEP_ANTI);
2706 for (u = deps->pending_jump_insns; u; u = u->next ())
2707 if (deps_may_trap_p (x))
2709 if ((sched_deps_info->generate_spec_deps)
2710 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2712 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2713 MAX_DEP_WEAK);
2715 note_dep (u->insn (), ds);
2717 else
2718 add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2722 /* Always add these dependencies to pending_reads, since
2723 this insn may be followed by a write. */
2724 if (!deps->readonly)
2726 if ((deps->pending_read_list_length
2727 + deps->pending_write_list_length)
2728 >= MAX_PENDING_LIST_LENGTH
2729 && !DEBUG_INSN_P (insn))
2730 flush_pending_lists (deps, insn, true, true);
2731 add_insn_mem_dependence (deps, true, insn, x);
2734 sched_analyze_2 (deps, XEXP (x, 0), insn);
2736 if (cslr_p && sched_deps_info->finish_rhs)
2737 sched_deps_info->finish_rhs ();
2739 return;
2742 /* Force pending stores to memory in case a trap handler needs them. */
2743 case TRAP_IF:
2744 flush_pending_lists (deps, insn, true, false);
2745 break;
2747 case PREFETCH:
2748 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2749 reg_pending_barrier = TRUE_BARRIER;
2750 /* Prefetch insn contains addresses only. So if the prefetch
2751 address has no registers, there will be no dependencies on
2752 the prefetch insn. This is wrong with result code
2753 correctness point of view as such prefetch can be moved below
2754 a jump insn which usually generates MOVE_BARRIER preventing
2755 to move insns containing registers or memories through the
2756 barrier. It is also wrong with generated code performance
2757 point of view as prefetch withouth dependecies will have a
2758 tendency to be issued later instead of earlier. It is hard
2759 to generate accurate dependencies for prefetch insns as
2760 prefetch has only the start address but it is better to have
2761 something than nothing. */
2762 if (!deps->readonly)
2764 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2765 if (sched_deps_info->use_cselib)
2766 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2767 add_insn_mem_dependence (deps, true, insn, x);
2769 break;
2771 case UNSPEC_VOLATILE:
2772 flush_pending_lists (deps, insn, true, true);
2773 /* FALLTHRU */
2775 case ASM_OPERANDS:
2776 case ASM_INPUT:
2778 /* Traditional and volatile asm instructions must be considered to use
2779 and clobber all hard registers, all pseudo-registers and all of
2780 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2782 Consider for instance a volatile asm that changes the fpu rounding
2783 mode. An insn should not be moved across this even if it only uses
2784 pseudo-regs because it might give an incorrectly rounded result. */
2785 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2786 && !DEBUG_INSN_P (insn))
2787 reg_pending_barrier = TRUE_BARRIER;
2789 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2790 We can not just fall through here since then we would be confused
2791 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2792 traditional asms unlike their normal usage. */
2794 if (code == ASM_OPERANDS)
2796 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2797 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2799 if (cslr_p && sched_deps_info->finish_rhs)
2800 sched_deps_info->finish_rhs ();
2802 return;
2804 break;
2807 case PRE_DEC:
2808 case POST_DEC:
2809 case PRE_INC:
2810 case POST_INC:
2811 /* These both read and modify the result. We must handle them as writes
2812 to get proper dependencies for following instructions. We must handle
2813 them as reads to get proper dependencies from this to previous
2814 instructions. Thus we need to pass them to both sched_analyze_1
2815 and sched_analyze_2. We must call sched_analyze_2 first in order
2816 to get the proper antecedent for the read. */
2817 sched_analyze_2 (deps, XEXP (x, 0), insn);
2818 sched_analyze_1 (deps, x, insn);
2820 if (cslr_p && sched_deps_info->finish_rhs)
2821 sched_deps_info->finish_rhs ();
2823 return;
2825 case POST_MODIFY:
2826 case PRE_MODIFY:
2827 /* op0 = op0 + op1 */
2828 sched_analyze_2 (deps, XEXP (x, 0), insn);
2829 sched_analyze_2 (deps, XEXP (x, 1), insn);
2830 sched_analyze_1 (deps, x, insn);
2832 if (cslr_p && sched_deps_info->finish_rhs)
2833 sched_deps_info->finish_rhs ();
2835 return;
2837 default:
2838 break;
2841 /* Other cases: walk the insn. */
2842 fmt = GET_RTX_FORMAT (code);
2843 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2845 if (fmt[i] == 'e')
2846 sched_analyze_2 (deps, XEXP (x, i), insn);
2847 else if (fmt[i] == 'E')
2848 for (j = 0; j < XVECLEN (x, i); j++)
2849 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2852 if (cslr_p && sched_deps_info->finish_rhs)
2853 sched_deps_info->finish_rhs ();
2856 /* Try to group two fuseable insns together to prevent scheduler
2857 from scheduling them apart. */
2859 static void
2860 sched_macro_fuse_insns (rtx_insn *insn)
2862 rtx_insn *prev;
2864 if (any_condjump_p (insn))
2866 unsigned int condreg1, condreg2;
2867 rtx cc_reg_1;
2868 targetm.fixed_condition_code_regs (&condreg1, &condreg2);
2869 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2870 prev = prev_nonnote_nondebug_insn (insn);
2871 if (!reg_referenced_p (cc_reg_1, PATTERN (insn))
2872 || !prev
2873 || !modified_in_p (cc_reg_1, prev))
2874 return;
2876 else
2878 rtx insn_set = single_set (insn);
2880 prev = prev_nonnote_nondebug_insn (insn);
2881 if (!prev
2882 || !insn_set
2883 || !single_set (prev))
2884 return;
2888 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2889 SCHED_GROUP_P (insn) = 1;
2893 /* Analyze an INSN with pattern X to find all dependencies. */
2894 static void
2895 sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
2897 RTX_CODE code = GET_CODE (x);
2898 rtx link;
2899 unsigned i;
2900 reg_set_iterator rsi;
2902 if (! reload_completed)
2904 HARD_REG_SET temp;
2906 extract_insn (insn);
2907 preprocess_constraints (insn);
2908 ira_implicitly_set_insn_hard_regs (&temp);
2909 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2910 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2913 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2914 && code == SET);
2916 /* Group compare and branch insns for macro-fusion. */
2917 if (targetm.sched.macro_fusion_p
2918 && targetm.sched.macro_fusion_p ())
2919 sched_macro_fuse_insns (insn);
2921 if (may_trap_p (x))
2922 /* Avoid moving trapping instructions across function calls that might
2923 not always return. */
2924 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2925 1, REG_DEP_ANTI, true);
2927 /* We must avoid creating a situation in which two successors of the
2928 current block have different unwind info after scheduling. If at any
2929 point the two paths re-join this leads to incorrect unwind info. */
2930 /* ??? There are certain situations involving a forced frame pointer in
2931 which, with extra effort, we could fix up the unwind info at a later
2932 CFG join. However, it seems better to notice these cases earlier
2933 during prologue generation and avoid marking the frame pointer setup
2934 as frame-related at all. */
2935 if (RTX_FRAME_RELATED_P (insn))
2937 /* Make sure prologue insn is scheduled before next jump. */
2938 deps->sched_before_next_jump
2939 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2941 /* Make sure epilogue insn is scheduled after preceding jumps. */
2942 add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2943 true);
2946 if (code == COND_EXEC)
2948 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2950 /* ??? Should be recording conditions so we reduce the number of
2951 false dependencies. */
2952 x = COND_EXEC_CODE (x);
2953 code = GET_CODE (x);
2955 if (code == SET || code == CLOBBER)
2957 sched_analyze_1 (deps, x, insn);
2959 /* Bare clobber insns are used for letting life analysis, reg-stack
2960 and others know that a value is dead. Depend on the last call
2961 instruction so that reg-stack won't get confused. */
2962 if (code == CLOBBER)
2963 add_dependence_list (insn, deps->last_function_call, 1,
2964 REG_DEP_OUTPUT, true);
2966 else if (code == PARALLEL)
2968 for (i = XVECLEN (x, 0); i--;)
2970 rtx sub = XVECEXP (x, 0, i);
2971 code = GET_CODE (sub);
2973 if (code == COND_EXEC)
2975 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2976 sub = COND_EXEC_CODE (sub);
2977 code = GET_CODE (sub);
2979 if (code == SET || code == CLOBBER)
2980 sched_analyze_1 (deps, sub, insn);
2981 else
2982 sched_analyze_2 (deps, sub, insn);
2985 else
2986 sched_analyze_2 (deps, x, insn);
2988 /* Mark registers CLOBBERED or used by called function. */
2989 if (CALL_P (insn))
2991 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2993 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2994 sched_analyze_1 (deps, XEXP (link, 0), insn);
2995 else if (GET_CODE (XEXP (link, 0)) != SET)
2996 sched_analyze_2 (deps, XEXP (link, 0), insn);
2998 /* Don't schedule anything after a tail call, tail call needs
2999 to use at least all call-saved registers. */
3000 if (SIBLING_CALL_P (insn))
3001 reg_pending_barrier = TRUE_BARRIER;
3002 else if (find_reg_note (insn, REG_SETJMP, NULL))
3003 reg_pending_barrier = MOVE_BARRIER;
3006 if (JUMP_P (insn))
3008 rtx next;
3009 next = next_nonnote_nondebug_insn (insn);
3010 if (next && BARRIER_P (next))
3011 reg_pending_barrier = MOVE_BARRIER;
3012 else
3014 rtx_insn_list *pending;
3015 rtx_expr_list *pending_mem;
3017 if (sched_deps_info->compute_jump_reg_dependencies)
3019 (*sched_deps_info->compute_jump_reg_dependencies)
3020 (insn, reg_pending_control_uses);
3022 /* Make latency of jump equal to 0 by using anti-dependence. */
3023 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3025 struct deps_reg *reg_last = &deps->reg_last[i];
3026 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3027 false);
3028 add_dependence_list (insn, reg_last->implicit_sets,
3029 0, REG_DEP_ANTI, false);
3030 add_dependence_list (insn, reg_last->clobbers, 0,
3031 REG_DEP_ANTI, false);
3035 /* All memory writes and volatile reads must happen before the
3036 jump. Non-volatile reads must happen before the jump iff
3037 the result is needed by the above register used mask. */
3039 pending = deps->pending_write_insns;
3040 pending_mem = deps->pending_write_mems;
3041 while (pending)
3043 if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3044 add_dependence (insn, pending->insn (),
3045 REG_DEP_OUTPUT);
3046 pending = pending->next ();
3047 pending_mem = pending_mem->next ();
3050 pending = deps->pending_read_insns;
3051 pending_mem = deps->pending_read_mems;
3052 while (pending)
3054 if (MEM_VOLATILE_P (pending_mem->element ())
3055 && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3056 add_dependence (insn, pending->insn (),
3057 REG_DEP_OUTPUT);
3058 pending = pending->next ();
3059 pending_mem = pending_mem->next ();
3062 add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3063 REG_DEP_ANTI, true);
3064 add_dependence_list (insn, deps->pending_jump_insns, 1,
3065 REG_DEP_ANTI, true);
3069 /* If this instruction can throw an exception, then moving it changes
3070 where block boundaries fall. This is mighty confusing elsewhere.
3071 Therefore, prevent such an instruction from being moved. Same for
3072 non-jump instructions that define block boundaries.
3073 ??? Unclear whether this is still necessary in EBB mode. If not,
3074 add_branch_dependences should be adjusted for RGN mode instead. */
3075 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3076 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3077 reg_pending_barrier = MOVE_BARRIER;
3079 if (sched_pressure != SCHED_PRESSURE_NONE)
3081 setup_insn_reg_uses (deps, insn);
3082 init_insn_reg_pressure_info (insn);
3085 /* Add register dependencies for insn. */
3086 if (DEBUG_INSN_P (insn))
3088 rtx_insn *prev = deps->last_debug_insn;
3089 rtx_insn_list *u;
3091 if (!deps->readonly)
3092 deps->last_debug_insn = insn;
3094 if (prev)
3095 add_dependence (insn, prev, REG_DEP_ANTI);
3097 add_dependence_list (insn, deps->last_function_call, 1,
3098 REG_DEP_ANTI, false);
3100 if (!sel_sched_p ())
3101 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3102 add_dependence (insn, u->insn (), REG_DEP_ANTI);
3104 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3106 struct deps_reg *reg_last = &deps->reg_last[i];
3107 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3108 /* There's no point in making REG_DEP_CONTROL dependencies for
3109 debug insns. */
3110 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3111 false);
3113 if (!deps->readonly)
3114 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3116 CLEAR_REG_SET (reg_pending_uses);
3118 /* Quite often, a debug insn will refer to stuff in the
3119 previous instruction, but the reason we want this
3120 dependency here is to make sure the scheduler doesn't
3121 gratuitously move a debug insn ahead. This could dirty
3122 DF flags and cause additional analysis that wouldn't have
3123 occurred in compilation without debug insns, and such
3124 additional analysis can modify the generated code. */
3125 prev = PREV_INSN (insn);
3127 if (prev && NONDEBUG_INSN_P (prev))
3128 add_dependence (insn, prev, REG_DEP_ANTI);
3130 else
3132 regset_head set_or_clobbered;
3134 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3136 struct deps_reg *reg_last = &deps->reg_last[i];
3137 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3138 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3139 false);
3140 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3141 false);
3143 if (!deps->readonly)
3145 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3146 reg_last->uses_length++;
3150 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3151 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3153 struct deps_reg *reg_last = &deps->reg_last[i];
3154 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3155 add_dependence_list (insn, reg_last->implicit_sets, 0,
3156 REG_DEP_ANTI, false);
3157 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3158 false);
3160 if (!deps->readonly)
3162 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3163 reg_last->uses_length++;
3167 if (targetm.sched.exposed_pipeline)
3169 INIT_REG_SET (&set_or_clobbered);
3170 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3171 reg_pending_sets);
3172 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3174 struct deps_reg *reg_last = &deps->reg_last[i];
3175 rtx list;
3176 for (list = reg_last->uses; list; list = XEXP (list, 1))
3178 rtx other = XEXP (list, 0);
3179 if (INSN_CACHED_COND (other) != const_true_rtx
3180 && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3181 INSN_CACHED_COND (other) = const_true_rtx;
3186 /* If the current insn is conditional, we can't free any
3187 of the lists. */
3188 if (sched_has_condition_p (insn))
3190 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3192 struct deps_reg *reg_last = &deps->reg_last[i];
3193 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3194 false);
3195 add_dependence_list (insn, reg_last->implicit_sets, 0,
3196 REG_DEP_ANTI, false);
3197 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3198 false);
3199 add_dependence_list (insn, reg_last->control_uses, 0,
3200 REG_DEP_CONTROL, false);
3202 if (!deps->readonly)
3204 reg_last->clobbers
3205 = alloc_INSN_LIST (insn, reg_last->clobbers);
3206 reg_last->clobbers_length++;
3209 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3211 struct deps_reg *reg_last = &deps->reg_last[i];
3212 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3213 false);
3214 add_dependence_list (insn, reg_last->implicit_sets, 0,
3215 REG_DEP_ANTI, false);
3216 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3217 false);
3218 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3219 false);
3220 add_dependence_list (insn, reg_last->control_uses, 0,
3221 REG_DEP_CONTROL, false);
3223 if (!deps->readonly)
3224 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3227 else
3229 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3231 struct deps_reg *reg_last = &deps->reg_last[i];
3232 if (reg_last->uses_length >= MAX_PENDING_LIST_LENGTH
3233 || reg_last->clobbers_length >= MAX_PENDING_LIST_LENGTH)
3235 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3236 REG_DEP_OUTPUT, false);
3237 add_dependence_list_and_free (deps, insn,
3238 &reg_last->implicit_sets, 0,
3239 REG_DEP_ANTI, false);
3240 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3241 REG_DEP_ANTI, false);
3242 add_dependence_list_and_free (deps, insn,
3243 &reg_last->control_uses, 0,
3244 REG_DEP_ANTI, false);
3245 add_dependence_list_and_free (deps, insn,
3246 &reg_last->clobbers, 0,
3247 REG_DEP_OUTPUT, false);
3249 if (!deps->readonly)
3251 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3252 reg_last->clobbers_length = 0;
3253 reg_last->uses_length = 0;
3256 else
3258 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3259 false);
3260 add_dependence_list (insn, reg_last->implicit_sets, 0,
3261 REG_DEP_ANTI, false);
3262 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3263 false);
3264 add_dependence_list (insn, reg_last->control_uses, 0,
3265 REG_DEP_CONTROL, false);
3268 if (!deps->readonly)
3270 reg_last->clobbers_length++;
3271 reg_last->clobbers
3272 = alloc_INSN_LIST (insn, reg_last->clobbers);
3275 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3277 struct deps_reg *reg_last = &deps->reg_last[i];
3279 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3280 REG_DEP_OUTPUT, false);
3281 add_dependence_list_and_free (deps, insn,
3282 &reg_last->implicit_sets,
3283 0, REG_DEP_ANTI, false);
3284 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3285 REG_DEP_OUTPUT, false);
3286 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3287 REG_DEP_ANTI, false);
3288 add_dependence_list (insn, reg_last->control_uses, 0,
3289 REG_DEP_CONTROL, false);
3291 if (!deps->readonly)
3293 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3294 reg_last->uses_length = 0;
3295 reg_last->clobbers_length = 0;
3299 if (!deps->readonly)
3301 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3303 struct deps_reg *reg_last = &deps->reg_last[i];
3304 reg_last->control_uses
3305 = alloc_INSN_LIST (insn, reg_last->control_uses);
3310 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3311 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3313 struct deps_reg *reg_last = &deps->reg_last[i];
3314 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3315 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3316 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3317 add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3318 false);
3320 if (!deps->readonly)
3321 reg_last->implicit_sets
3322 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3325 if (!deps->readonly)
3327 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3328 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3329 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3330 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3331 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
3332 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3333 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3335 /* Set up the pending barrier found. */
3336 deps->last_reg_pending_barrier = reg_pending_barrier;
3339 CLEAR_REG_SET (reg_pending_uses);
3340 CLEAR_REG_SET (reg_pending_clobbers);
3341 CLEAR_REG_SET (reg_pending_sets);
3342 CLEAR_REG_SET (reg_pending_control_uses);
3343 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3344 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3346 /* Add dependencies if a scheduling barrier was found. */
3347 if (reg_pending_barrier)
3349 /* In the case of barrier the most added dependencies are not
3350 real, so we use anti-dependence here. */
3351 if (sched_has_condition_p (insn))
3353 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3355 struct deps_reg *reg_last = &deps->reg_last[i];
3356 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3357 true);
3358 add_dependence_list (insn, reg_last->sets, 0,
3359 reg_pending_barrier == TRUE_BARRIER
3360 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3361 add_dependence_list (insn, reg_last->implicit_sets, 0,
3362 REG_DEP_ANTI, true);
3363 add_dependence_list (insn, reg_last->clobbers, 0,
3364 reg_pending_barrier == TRUE_BARRIER
3365 ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3368 else
3370 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3372 struct deps_reg *reg_last = &deps->reg_last[i];
3373 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3374 REG_DEP_ANTI, true);
3375 add_dependence_list_and_free (deps, insn,
3376 &reg_last->control_uses, 0,
3377 REG_DEP_CONTROL, true);
3378 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3379 reg_pending_barrier == TRUE_BARRIER
3380 ? REG_DEP_TRUE : REG_DEP_ANTI,
3381 true);
3382 add_dependence_list_and_free (deps, insn,
3383 &reg_last->implicit_sets, 0,
3384 REG_DEP_ANTI, true);
3385 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3386 reg_pending_barrier == TRUE_BARRIER
3387 ? REG_DEP_TRUE : REG_DEP_ANTI,
3388 true);
3390 if (!deps->readonly)
3392 reg_last->uses_length = 0;
3393 reg_last->clobbers_length = 0;
3398 if (!deps->readonly)
3399 for (i = 0; i < (unsigned)deps->max_reg; i++)
3401 struct deps_reg *reg_last = &deps->reg_last[i];
3402 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3403 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3406 /* Don't flush pending lists on speculative checks for
3407 selective scheduling. */
3408 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3409 flush_pending_lists (deps, insn, true, true);
3411 reg_pending_barrier = NOT_A_BARRIER;
3414 /* If a post-call group is still open, see if it should remain so.
3415 This insn must be a simple move of a hard reg to a pseudo or
3416 vice-versa.
3418 We must avoid moving these insns for correctness on targets
3419 with small register classes, and for special registers like
3420 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3421 hard regs for all targets. */
3423 if (deps->in_post_call_group_p)
3425 rtx tmp, set = single_set (insn);
3426 int src_regno, dest_regno;
3428 if (set == NULL)
3430 if (DEBUG_INSN_P (insn))
3431 /* We don't want to mark debug insns as part of the same
3432 sched group. We know they really aren't, but if we use
3433 debug insns to tell that a call group is over, we'll
3434 get different code if debug insns are not there and
3435 instructions that follow seem like they should be part
3436 of the call group.
3438 Also, if we did, chain_to_prev_insn would move the
3439 deps of the debug insn to the call insn, modifying
3440 non-debug post-dependency counts of the debug insn
3441 dependencies and otherwise messing with the scheduling
3442 order.
3444 Instead, let such debug insns be scheduled freely, but
3445 keep the call group open in case there are insns that
3446 should be part of it afterwards. Since we grant debug
3447 insns higher priority than even sched group insns, it
3448 will all turn out all right. */
3449 goto debug_dont_end_call_group;
3450 else
3451 goto end_call_group;
3454 tmp = SET_DEST (set);
3455 if (GET_CODE (tmp) == SUBREG)
3456 tmp = SUBREG_REG (tmp);
3457 if (REG_P (tmp))
3458 dest_regno = REGNO (tmp);
3459 else
3460 goto end_call_group;
3462 tmp = SET_SRC (set);
3463 if (GET_CODE (tmp) == SUBREG)
3464 tmp = SUBREG_REG (tmp);
3465 if ((GET_CODE (tmp) == PLUS
3466 || GET_CODE (tmp) == MINUS)
3467 && REG_P (XEXP (tmp, 0))
3468 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3469 && dest_regno == STACK_POINTER_REGNUM)
3470 src_regno = STACK_POINTER_REGNUM;
3471 else if (REG_P (tmp))
3472 src_regno = REGNO (tmp);
3473 else
3474 goto end_call_group;
3476 if (src_regno < FIRST_PSEUDO_REGISTER
3477 || dest_regno < FIRST_PSEUDO_REGISTER)
3479 if (!deps->readonly
3480 && deps->in_post_call_group_p == post_call_initial)
3481 deps->in_post_call_group_p = post_call;
3483 if (!sel_sched_p () || sched_emulate_haifa_p)
3485 SCHED_GROUP_P (insn) = 1;
3486 CANT_MOVE (insn) = 1;
3489 else
3491 end_call_group:
3492 if (!deps->readonly)
3493 deps->in_post_call_group_p = not_post_call;
3497 debug_dont_end_call_group:
3498 if ((current_sched_info->flags & DO_SPECULATION)
3499 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3500 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3501 be speculated. */
3503 if (sel_sched_p ())
3504 sel_mark_hard_insn (insn);
3505 else
3507 sd_iterator_def sd_it;
3508 dep_t dep;
3510 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3511 sd_iterator_cond (&sd_it, &dep);)
3512 change_spec_dep_to_hard (sd_it);
3516 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3517 honor their original ordering. */
3518 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3520 if (deps->last_args_size)
3521 add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3522 deps->last_args_size = insn;
3526 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3527 longjmp, loop forever, ...). */
3528 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3529 test for ECF_NORETURN? */
3530 static bool
3531 call_may_noreturn_p (rtx_insn *insn)
3533 rtx call;
3535 /* const or pure calls that aren't looping will always return. */
3536 if (RTL_CONST_OR_PURE_CALL_P (insn)
3537 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3538 return false;
3540 call = get_call_rtx_from (insn);
3541 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3543 rtx symbol = XEXP (XEXP (call, 0), 0);
3544 if (SYMBOL_REF_DECL (symbol)
3545 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3547 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3548 == BUILT_IN_NORMAL)
3549 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3551 case BUILT_IN_BCMP:
3552 case BUILT_IN_BCOPY:
3553 case BUILT_IN_BZERO:
3554 case BUILT_IN_INDEX:
3555 case BUILT_IN_MEMCHR:
3556 case BUILT_IN_MEMCMP:
3557 case BUILT_IN_MEMCPY:
3558 case BUILT_IN_MEMMOVE:
3559 case BUILT_IN_MEMPCPY:
3560 case BUILT_IN_MEMSET:
3561 case BUILT_IN_RINDEX:
3562 case BUILT_IN_STPCPY:
3563 case BUILT_IN_STPNCPY:
3564 case BUILT_IN_STRCAT:
3565 case BUILT_IN_STRCHR:
3566 case BUILT_IN_STRCMP:
3567 case BUILT_IN_STRCPY:
3568 case BUILT_IN_STRCSPN:
3569 case BUILT_IN_STRLEN:
3570 case BUILT_IN_STRNCAT:
3571 case BUILT_IN_STRNCMP:
3572 case BUILT_IN_STRNCPY:
3573 case BUILT_IN_STRPBRK:
3574 case BUILT_IN_STRRCHR:
3575 case BUILT_IN_STRSPN:
3576 case BUILT_IN_STRSTR:
3577 /* Assume certain string/memory builtins always return. */
3578 return false;
3579 default:
3580 break;
3585 /* For all other calls assume that they might not always return. */
3586 return true;
3589 /* Return true if INSN should be made dependent on the previous instruction
3590 group, and if all INSN's dependencies should be moved to the first
3591 instruction of that group. */
3593 static bool
3594 chain_to_prev_insn_p (rtx_insn *insn)
3596 rtx prev, x;
3598 /* INSN forms a group with the previous instruction. */
3599 if (SCHED_GROUP_P (insn))
3600 return true;
3602 /* If the previous instruction clobbers a register R and this one sets
3603 part of R, the clobber was added specifically to help us track the
3604 liveness of R. There's no point scheduling the clobber and leaving
3605 INSN behind, especially if we move the clobber to another block. */
3606 prev = prev_nonnote_nondebug_insn (insn);
3607 if (prev
3608 && INSN_P (prev)
3609 && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3610 && GET_CODE (PATTERN (prev)) == CLOBBER)
3612 x = XEXP (PATTERN (prev), 0);
3613 if (set_of (x, insn))
3614 return true;
3617 return false;
3620 /* Analyze INSN with DEPS as a context. */
3621 void
3622 deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
3624 if (sched_deps_info->start_insn)
3625 sched_deps_info->start_insn (insn);
3627 /* Record the condition for this insn. */
3628 if (NONDEBUG_INSN_P (insn))
3630 rtx t;
3631 sched_get_condition_with_rev (insn, NULL);
3632 t = INSN_CACHED_COND (insn);
3633 INSN_COND_DEPS (insn) = NULL;
3634 if (reload_completed
3635 && (current_sched_info->flags & DO_PREDICATION)
3636 && COMPARISON_P (t)
3637 && REG_P (XEXP (t, 0))
3638 && CONSTANT_P (XEXP (t, 1)))
3640 unsigned int regno;
3641 int nregs;
3642 rtx_insn_list *cond_deps = NULL;
3643 t = XEXP (t, 0);
3644 regno = REGNO (t);
3645 nregs = REG_NREGS (t);
3646 while (nregs-- > 0)
3648 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3649 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3650 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3651 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3653 INSN_COND_DEPS (insn) = cond_deps;
3657 if (JUMP_P (insn))
3659 /* Make each JUMP_INSN (but not a speculative check)
3660 a scheduling barrier for memory references. */
3661 if (!deps->readonly
3662 && !(sel_sched_p ()
3663 && sel_insn_is_speculation_check (insn)))
3665 /* Keep the list a reasonable size. */
3666 if (deps->pending_flush_length++ >= MAX_PENDING_LIST_LENGTH)
3667 flush_pending_lists (deps, insn, true, true);
3668 else
3669 deps->pending_jump_insns
3670 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3673 /* For each insn which shouldn't cross a jump, add a dependence. */
3674 add_dependence_list_and_free (deps, insn,
3675 &deps->sched_before_next_jump, 1,
3676 REG_DEP_ANTI, true);
3678 sched_analyze_insn (deps, PATTERN (insn), insn);
3680 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3682 sched_analyze_insn (deps, PATTERN (insn), insn);
3684 else if (CALL_P (insn))
3686 int i;
3688 CANT_MOVE (insn) = 1;
3690 if (find_reg_note (insn, REG_SETJMP, NULL))
3692 /* This is setjmp. Assume that all registers, not just
3693 hard registers, may be clobbered by this call. */
3694 reg_pending_barrier = MOVE_BARRIER;
3696 else
3698 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3699 /* A call may read and modify global register variables. */
3700 if (global_regs[i])
3702 SET_REGNO_REG_SET (reg_pending_sets, i);
3703 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3705 /* Other call-clobbered hard regs may be clobbered.
3706 Since we only have a choice between 'might be clobbered'
3707 and 'definitely not clobbered', we must include all
3708 partly call-clobbered registers here. */
3709 else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3710 || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3711 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3712 /* We don't know what set of fixed registers might be used
3713 by the function, but it is certain that the stack pointer
3714 is among them, but be conservative. */
3715 else if (fixed_regs[i])
3716 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3717 /* The frame pointer is normally not used by the function
3718 itself, but by the debugger. */
3719 /* ??? MIPS o32 is an exception. It uses the frame pointer
3720 in the macro expansion of jal but does not represent this
3721 fact in the call_insn rtl. */
3722 else if (i == FRAME_POINTER_REGNUM
3723 || (i == HARD_FRAME_POINTER_REGNUM
3724 && (! reload_completed || frame_pointer_needed)))
3725 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3728 /* For each insn which shouldn't cross a call, add a dependence
3729 between that insn and this call insn. */
3730 add_dependence_list_and_free (deps, insn,
3731 &deps->sched_before_next_call, 1,
3732 REG_DEP_ANTI, true);
3734 sched_analyze_insn (deps, PATTERN (insn), insn);
3736 /* If CALL would be in a sched group, then this will violate
3737 convention that sched group insns have dependencies only on the
3738 previous instruction.
3740 Of course one can say: "Hey! What about head of the sched group?"
3741 And I will answer: "Basic principles (one dep per insn) are always
3742 the same." */
3743 gcc_assert (!SCHED_GROUP_P (insn));
3745 /* In the absence of interprocedural alias analysis, we must flush
3746 all pending reads and writes, and start new dependencies starting
3747 from here. But only flush writes for constant calls (which may
3748 be passed a pointer to something we haven't written yet). */
3749 flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3751 if (!deps->readonly)
3753 /* Remember the last function call for limiting lifetimes. */
3754 free_INSN_LIST_list (&deps->last_function_call);
3755 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3757 if (call_may_noreturn_p (insn))
3759 /* Remember the last function call that might not always return
3760 normally for limiting moves of trapping insns. */
3761 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3762 deps->last_function_call_may_noreturn
3763 = alloc_INSN_LIST (insn, NULL_RTX);
3766 /* Before reload, begin a post-call group, so as to keep the
3767 lifetimes of hard registers correct. */
3768 if (! reload_completed)
3769 deps->in_post_call_group_p = post_call;
3773 if (sched_deps_info->use_cselib)
3774 cselib_process_insn (insn);
3776 if (sched_deps_info->finish_insn)
3777 sched_deps_info->finish_insn ();
3779 /* Fixup the dependencies in the sched group. */
3780 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3781 && chain_to_prev_insn_p (insn)
3782 && !sel_sched_p ())
3783 chain_to_prev_insn (insn);
3786 /* Initialize DEPS for the new block beginning with HEAD. */
3787 void
3788 deps_start_bb (struct deps_desc *deps, rtx_insn *head)
3790 gcc_assert (!deps->readonly);
3792 /* Before reload, if the previous block ended in a call, show that
3793 we are inside a post-call group, so as to keep the lifetimes of
3794 hard registers correct. */
3795 if (! reload_completed && !LABEL_P (head))
3797 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3799 if (insn && CALL_P (insn))
3800 deps->in_post_call_group_p = post_call_initial;
3804 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3805 dependencies for each insn. */
3806 void
3807 sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3809 rtx_insn *insn;
3811 if (sched_deps_info->use_cselib)
3812 cselib_init (CSELIB_RECORD_MEMORY);
3814 deps_start_bb (deps, head);
3816 for (insn = head;; insn = NEXT_INSN (insn))
3819 if (INSN_P (insn))
3821 /* And initialize deps_lists. */
3822 sd_init_insn (insn);
3823 /* Clean up SCHED_GROUP_P which may be set by last
3824 scheduler pass. */
3825 if (SCHED_GROUP_P (insn))
3826 SCHED_GROUP_P (insn) = 0;
3829 deps_analyze_insn (deps, insn);
3831 if (insn == tail)
3833 if (sched_deps_info->use_cselib)
3834 cselib_finish ();
3835 return;
3838 gcc_unreachable ();
3841 /* Helper for sched_free_deps ().
3842 Delete INSN's (RESOLVED_P) backward dependencies. */
3843 static void
3844 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3846 sd_iterator_def sd_it;
3847 dep_t dep;
3848 sd_list_types_def types;
3850 if (resolved_p)
3851 types = SD_LIST_RES_BACK;
3852 else
3853 types = SD_LIST_BACK;
3855 for (sd_it = sd_iterator_start (insn, types);
3856 sd_iterator_cond (&sd_it, &dep);)
3858 dep_link_t link = *sd_it.linkp;
3859 dep_node_t node = DEP_LINK_NODE (link);
3860 deps_list_t back_list;
3861 deps_list_t forw_list;
3863 get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3864 remove_from_deps_list (link, back_list);
3865 delete_dep_node (node);
3869 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3870 deps_lists. */
3871 void
3872 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3874 rtx_insn *insn;
3875 rtx_insn *next_tail = NEXT_INSN (tail);
3877 /* We make two passes since some insns may be scheduled before their
3878 dependencies are resolved. */
3879 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3880 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3882 /* Clear forward deps and leave the dep_nodes to the
3883 corresponding back_deps list. */
3884 if (resolved_p)
3885 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3886 else
3887 clear_deps_list (INSN_FORW_DEPS (insn));
3889 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3890 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3892 /* Clear resolved back deps together with its dep_nodes. */
3893 delete_dep_nodes_in_back_deps (insn, resolved_p);
3895 sd_finish_insn (insn);
3899 /* Initialize variables for region data dependence analysis.
3900 When LAZY_REG_LAST is true, do not allocate reg_last array
3901 of struct deps_desc immediately. */
3903 void
3904 init_deps (struct deps_desc *deps, bool lazy_reg_last)
3906 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3908 deps->max_reg = max_reg;
3909 if (lazy_reg_last)
3910 deps->reg_last = NULL;
3911 else
3912 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3913 INIT_REG_SET (&deps->reg_last_in_use);
3915 deps->pending_read_insns = 0;
3916 deps->pending_read_mems = 0;
3917 deps->pending_write_insns = 0;
3918 deps->pending_write_mems = 0;
3919 deps->pending_jump_insns = 0;
3920 deps->pending_read_list_length = 0;
3921 deps->pending_write_list_length = 0;
3922 deps->pending_flush_length = 0;
3923 deps->last_pending_memory_flush = 0;
3924 deps->last_function_call = 0;
3925 deps->last_function_call_may_noreturn = 0;
3926 deps->sched_before_next_call = 0;
3927 deps->sched_before_next_jump = 0;
3928 deps->in_post_call_group_p = not_post_call;
3929 deps->last_debug_insn = 0;
3930 deps->last_args_size = 0;
3931 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3932 deps->readonly = 0;
3935 /* Init only reg_last field of DEPS, which was not allocated before as
3936 we inited DEPS lazily. */
3937 void
3938 init_deps_reg_last (struct deps_desc *deps)
3940 gcc_assert (deps && deps->max_reg > 0);
3941 gcc_assert (deps->reg_last == NULL);
3943 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3947 /* Free insn lists found in DEPS. */
3949 void
3950 free_deps (struct deps_desc *deps)
3952 unsigned i;
3953 reg_set_iterator rsi;
3955 /* We set max_reg to 0 when this context was already freed. */
3956 if (deps->max_reg == 0)
3958 gcc_assert (deps->reg_last == NULL);
3959 return;
3961 deps->max_reg = 0;
3963 free_INSN_LIST_list (&deps->pending_read_insns);
3964 free_EXPR_LIST_list (&deps->pending_read_mems);
3965 free_INSN_LIST_list (&deps->pending_write_insns);
3966 free_EXPR_LIST_list (&deps->pending_write_mems);
3967 free_INSN_LIST_list (&deps->last_pending_memory_flush);
3969 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3970 times. For a testcase with 42000 regs and 8000 small basic blocks,
3971 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
3972 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3974 struct deps_reg *reg_last = &deps->reg_last[i];
3975 if (reg_last->uses)
3976 free_INSN_LIST_list (&reg_last->uses);
3977 if (reg_last->sets)
3978 free_INSN_LIST_list (&reg_last->sets);
3979 if (reg_last->implicit_sets)
3980 free_INSN_LIST_list (&reg_last->implicit_sets);
3981 if (reg_last->control_uses)
3982 free_INSN_LIST_list (&reg_last->control_uses);
3983 if (reg_last->clobbers)
3984 free_INSN_LIST_list (&reg_last->clobbers);
3986 CLEAR_REG_SET (&deps->reg_last_in_use);
3988 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3989 it at all. */
3990 free (deps->reg_last);
3991 deps->reg_last = NULL;
3993 deps = NULL;
3996 /* Remove INSN from dependence contexts DEPS. */
3997 void
3998 remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
4000 int removed;
4001 unsigned i;
4002 reg_set_iterator rsi;
4004 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4005 &deps->pending_read_mems);
4006 if (!DEBUG_INSN_P (insn))
4007 deps->pending_read_list_length -= removed;
4008 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4009 &deps->pending_write_mems);
4010 deps->pending_write_list_length -= removed;
4012 removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4013 deps->pending_flush_length -= removed;
4014 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4015 deps->pending_flush_length -= removed;
4017 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4019 struct deps_reg *reg_last = &deps->reg_last[i];
4020 if (reg_last->uses)
4021 remove_from_dependence_list (insn, &reg_last->uses);
4022 if (reg_last->sets)
4023 remove_from_dependence_list (insn, &reg_last->sets);
4024 if (reg_last->implicit_sets)
4025 remove_from_dependence_list (insn, &reg_last->implicit_sets);
4026 if (reg_last->clobbers)
4027 remove_from_dependence_list (insn, &reg_last->clobbers);
4028 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4029 && !reg_last->clobbers)
4030 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
4033 if (CALL_P (insn))
4035 remove_from_dependence_list (insn, &deps->last_function_call);
4036 remove_from_dependence_list (insn,
4037 &deps->last_function_call_may_noreturn);
4039 remove_from_dependence_list (insn, &deps->sched_before_next_call);
4042 /* Init deps data vector. */
4043 static void
4044 init_deps_data_vector (void)
4046 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4047 if (reserve > 0 && ! h_d_i_d.space (reserve))
4048 h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
4051 /* If it is profitable to use them, initialize or extend (depending on
4052 GLOBAL_P) dependency data. */
4053 void
4054 sched_deps_init (bool global_p)
4056 /* Average number of insns in the basic block.
4057 '+ 1' is used to make it nonzero. */
4058 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4060 init_deps_data_vector ();
4062 /* We use another caching mechanism for selective scheduling, so
4063 we don't use this one. */
4064 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4066 /* ?!? We could save some memory by computing a per-region luid mapping
4067 which could reduce both the number of vectors in the cache and the
4068 size of each vector. Instead we just avoid the cache entirely unless
4069 the average number of instructions in a basic block is very high. See
4070 the comment before the declaration of true_dependency_cache for
4071 what we consider "very high". */
4072 cache_size = 0;
4073 extend_dependency_caches (sched_max_luid, true);
4076 if (global_p)
4078 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
4079 /* Allocate lists for one block at a time. */
4080 insns_in_block);
4081 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
4082 /* Allocate nodes for one block at a time.
4083 We assume that average insn has
4084 5 producers. */
4085 5 * insns_in_block);
4090 /* Create or extend (depending on CREATE_P) dependency caches to
4091 size N. */
4092 void
4093 extend_dependency_caches (int n, bool create_p)
4095 if (create_p || true_dependency_cache)
4097 int i, luid = cache_size + n;
4099 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4100 luid);
4101 output_dependency_cache = XRESIZEVEC (bitmap_head,
4102 output_dependency_cache, luid);
4103 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4104 luid);
4105 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4106 luid);
4108 if (current_sched_info->flags & DO_SPECULATION)
4109 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4110 luid);
4112 for (i = cache_size; i < luid; i++)
4114 bitmap_initialize (&true_dependency_cache[i], 0);
4115 bitmap_initialize (&output_dependency_cache[i], 0);
4116 bitmap_initialize (&anti_dependency_cache[i], 0);
4117 bitmap_initialize (&control_dependency_cache[i], 0);
4119 if (current_sched_info->flags & DO_SPECULATION)
4120 bitmap_initialize (&spec_dependency_cache[i], 0);
4122 cache_size = luid;
4126 /* Finalize dependency information for the whole function. */
4127 void
4128 sched_deps_finish (void)
4130 gcc_assert (deps_pools_are_empty_p ());
4131 free_alloc_pool_if_empty (&dn_pool);
4132 free_alloc_pool_if_empty (&dl_pool);
4133 gcc_assert (dn_pool == NULL && dl_pool == NULL);
4135 h_d_i_d.release ();
4136 cache_size = 0;
4138 if (true_dependency_cache)
4140 int i;
4142 for (i = 0; i < cache_size; i++)
4144 bitmap_clear (&true_dependency_cache[i]);
4145 bitmap_clear (&output_dependency_cache[i]);
4146 bitmap_clear (&anti_dependency_cache[i]);
4147 bitmap_clear (&control_dependency_cache[i]);
4149 if (sched_deps_info->generate_spec_deps)
4150 bitmap_clear (&spec_dependency_cache[i]);
4152 free (true_dependency_cache);
4153 true_dependency_cache = NULL;
4154 free (output_dependency_cache);
4155 output_dependency_cache = NULL;
4156 free (anti_dependency_cache);
4157 anti_dependency_cache = NULL;
4158 free (control_dependency_cache);
4159 control_dependency_cache = NULL;
4161 if (sched_deps_info->generate_spec_deps)
4163 free (spec_dependency_cache);
4164 spec_dependency_cache = NULL;
4170 /* Initialize some global variables needed by the dependency analysis
4171 code. */
4173 void
4174 init_deps_global (void)
4176 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4177 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4178 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4179 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4180 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4181 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4182 reg_pending_barrier = NOT_A_BARRIER;
4184 if (!sel_sched_p () || sched_emulate_haifa_p)
4186 sched_deps_info->start_insn = haifa_start_insn;
4187 sched_deps_info->finish_insn = haifa_finish_insn;
4189 sched_deps_info->note_reg_set = haifa_note_reg_set;
4190 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4191 sched_deps_info->note_reg_use = haifa_note_reg_use;
4193 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4194 sched_deps_info->note_dep = haifa_note_dep;
4198 /* Free everything used by the dependency analysis code. */
4200 void
4201 finish_deps_global (void)
4203 FREE_REG_SET (reg_pending_sets);
4204 FREE_REG_SET (reg_pending_clobbers);
4205 FREE_REG_SET (reg_pending_uses);
4206 FREE_REG_SET (reg_pending_control_uses);
4209 /* Estimate the weakness of dependence between MEM1 and MEM2. */
4210 dw_t
4211 estimate_dep_weak (rtx mem1, rtx mem2)
4213 rtx r1, r2;
4215 if (mem1 == mem2)
4216 /* MEMs are the same - don't speculate. */
4217 return MIN_DEP_WEAK;
4219 r1 = XEXP (mem1, 0);
4220 r2 = XEXP (mem2, 0);
4222 if (r1 == r2
4223 || (REG_P (r1) && REG_P (r2)
4224 && REGNO (r1) == REGNO (r2)))
4225 /* Again, MEMs are the same. */
4226 return MIN_DEP_WEAK;
4227 else if ((REG_P (r1) && !REG_P (r2))
4228 || (!REG_P (r1) && REG_P (r2)))
4229 /* Different addressing modes - reason to be more speculative,
4230 than usual. */
4231 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4232 else
4233 /* We can't say anything about the dependence. */
4234 return UNCERTAIN_DEP_WEAK;
4237 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4238 This function can handle same INSN and ELEM (INSN == ELEM).
4239 It is a convenience wrapper. */
4240 static void
4241 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4243 ds_t ds;
4244 bool internal;
4246 if (dep_type == REG_DEP_TRUE)
4247 ds = DEP_TRUE;
4248 else if (dep_type == REG_DEP_OUTPUT)
4249 ds = DEP_OUTPUT;
4250 else if (dep_type == REG_DEP_CONTROL)
4251 ds = DEP_CONTROL;
4252 else
4254 gcc_assert (dep_type == REG_DEP_ANTI);
4255 ds = DEP_ANTI;
4258 /* When add_dependence is called from inside sched-deps.c, we expect
4259 cur_insn to be non-null. */
4260 internal = cur_insn != NULL;
4261 if (internal)
4262 gcc_assert (insn == cur_insn);
4263 else
4264 cur_insn = insn;
4266 note_dep (elem, ds);
4267 if (!internal)
4268 cur_insn = NULL;
4271 /* Return weakness of speculative type TYPE in the dep_status DS,
4272 without checking to prevent ICEs on malformed input. */
4273 static dw_t
4274 get_dep_weak_1 (ds_t ds, ds_t type)
4276 ds = ds & type;
4278 switch (type)
4280 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4281 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4282 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4283 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4284 default: gcc_unreachable ();
4287 return (dw_t) ds;
4290 /* Return weakness of speculative type TYPE in the dep_status DS. */
4291 dw_t
4292 get_dep_weak (ds_t ds, ds_t type)
4294 dw_t dw = get_dep_weak_1 (ds, type);
4296 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4297 return dw;
4300 /* Return the dep_status, which has the same parameters as DS, except for
4301 speculative type TYPE, that will have weakness DW. */
4302 ds_t
4303 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4305 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4307 ds &= ~type;
4308 switch (type)
4310 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4311 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4312 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4313 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4314 default: gcc_unreachable ();
4316 return ds;
4319 /* Return the join of two dep_statuses DS1 and DS2.
4320 If MAX_P is true then choose the greater probability,
4321 otherwise multiply probabilities.
4322 This function assumes that both DS1 and DS2 contain speculative bits. */
4323 static ds_t
4324 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4326 ds_t ds, t;
4328 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4330 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4332 t = FIRST_SPEC_TYPE;
4335 if ((ds1 & t) && !(ds2 & t))
4336 ds |= ds1 & t;
4337 else if (!(ds1 & t) && (ds2 & t))
4338 ds |= ds2 & t;
4339 else if ((ds1 & t) && (ds2 & t))
4341 dw_t dw1 = get_dep_weak (ds1, t);
4342 dw_t dw2 = get_dep_weak (ds2, t);
4343 ds_t dw;
4345 if (!max_p)
4347 dw = ((ds_t) dw1) * ((ds_t) dw2);
4348 dw /= MAX_DEP_WEAK;
4349 if (dw < MIN_DEP_WEAK)
4350 dw = MIN_DEP_WEAK;
4352 else
4354 if (dw1 >= dw2)
4355 dw = dw1;
4356 else
4357 dw = dw2;
4360 ds = set_dep_weak (ds, t, (dw_t) dw);
4363 if (t == LAST_SPEC_TYPE)
4364 break;
4365 t <<= SPEC_TYPE_SHIFT;
4367 while (1);
4369 return ds;
4372 /* Return the join of two dep_statuses DS1 and DS2.
4373 This function assumes that both DS1 and DS2 contain speculative bits. */
4374 ds_t
4375 ds_merge (ds_t ds1, ds_t ds2)
4377 return ds_merge_1 (ds1, ds2, false);
4380 /* Return the join of two dep_statuses DS1 and DS2. */
4381 ds_t
4382 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4384 ds_t new_status = ds | ds2;
4386 if (new_status & SPECULATIVE)
4388 if ((ds && !(ds & SPECULATIVE))
4389 || (ds2 && !(ds2 & SPECULATIVE)))
4390 /* Then this dep can't be speculative. */
4391 new_status &= ~SPECULATIVE;
4392 else
4394 /* Both are speculative. Merging probabilities. */
4395 if (mem1)
4397 dw_t dw;
4399 dw = estimate_dep_weak (mem1, mem2);
4400 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4403 if (!ds)
4404 new_status = ds2;
4405 else if (!ds2)
4406 new_status = ds;
4407 else
4408 new_status = ds_merge (ds2, ds);
4412 return new_status;
4415 /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4416 probabilities. */
4417 ds_t
4418 ds_max_merge (ds_t ds1, ds_t ds2)
4420 if (ds1 == 0 && ds2 == 0)
4421 return 0;
4423 if (ds1 == 0 && ds2 != 0)
4424 return ds2;
4426 if (ds1 != 0 && ds2 == 0)
4427 return ds1;
4429 return ds_merge_1 (ds1, ds2, true);
4432 /* Return the probability of speculation success for the speculation
4433 status DS. */
4434 dw_t
4435 ds_weak (ds_t ds)
4437 ds_t res = 1, dt;
4438 int n = 0;
4440 dt = FIRST_SPEC_TYPE;
4443 if (ds & dt)
4445 res *= (ds_t) get_dep_weak (ds, dt);
4446 n++;
4449 if (dt == LAST_SPEC_TYPE)
4450 break;
4451 dt <<= SPEC_TYPE_SHIFT;
4453 while (1);
4455 gcc_assert (n);
4456 while (--n)
4457 res /= MAX_DEP_WEAK;
4459 if (res < MIN_DEP_WEAK)
4460 res = MIN_DEP_WEAK;
4462 gcc_assert (res <= MAX_DEP_WEAK);
4464 return (dw_t) res;
4467 /* Return a dep status that contains all speculation types of DS. */
4468 ds_t
4469 ds_get_speculation_types (ds_t ds)
4471 if (ds & BEGIN_DATA)
4472 ds |= BEGIN_DATA;
4473 if (ds & BE_IN_DATA)
4474 ds |= BE_IN_DATA;
4475 if (ds & BEGIN_CONTROL)
4476 ds |= BEGIN_CONTROL;
4477 if (ds & BE_IN_CONTROL)
4478 ds |= BE_IN_CONTROL;
4480 return ds & SPECULATIVE;
4483 /* Return a dep status that contains maximal weakness for each speculation
4484 type present in DS. */
4485 ds_t
4486 ds_get_max_dep_weak (ds_t ds)
4488 if (ds & BEGIN_DATA)
4489 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4490 if (ds & BE_IN_DATA)
4491 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4492 if (ds & BEGIN_CONTROL)
4493 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4494 if (ds & BE_IN_CONTROL)
4495 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4497 return ds;
4500 /* Dump information about the dependence status S. */
4501 static void
4502 dump_ds (FILE *f, ds_t s)
4504 fprintf (f, "{");
4506 if (s & BEGIN_DATA)
4507 fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4508 if (s & BE_IN_DATA)
4509 fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4510 if (s & BEGIN_CONTROL)
4511 fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4512 if (s & BE_IN_CONTROL)
4513 fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4515 if (s & HARD_DEP)
4516 fprintf (f, "HARD_DEP; ");
4518 if (s & DEP_TRUE)
4519 fprintf (f, "DEP_TRUE; ");
4520 if (s & DEP_OUTPUT)
4521 fprintf (f, "DEP_OUTPUT; ");
4522 if (s & DEP_ANTI)
4523 fprintf (f, "DEP_ANTI; ");
4524 if (s & DEP_CONTROL)
4525 fprintf (f, "DEP_CONTROL; ");
4527 fprintf (f, "}");
4530 DEBUG_FUNCTION void
4531 debug_ds (ds_t s)
4533 dump_ds (stderr, s);
4534 fprintf (stderr, "\n");
4537 #ifdef ENABLE_CHECKING
4538 /* Verify that dependence type and status are consistent.
4539 If RELAXED_P is true, then skip dep_weakness checks. */
4540 static void
4541 check_dep (dep_t dep, bool relaxed_p)
4543 enum reg_note dt = DEP_TYPE (dep);
4544 ds_t ds = DEP_STATUS (dep);
4546 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4548 if (!(current_sched_info->flags & USE_DEPS_LIST))
4550 gcc_assert (ds == 0);
4551 return;
4554 /* Check that dependence type contains the same bits as the status. */
4555 if (dt == REG_DEP_TRUE)
4556 gcc_assert (ds & DEP_TRUE);
4557 else if (dt == REG_DEP_OUTPUT)
4558 gcc_assert ((ds & DEP_OUTPUT)
4559 && !(ds & DEP_TRUE));
4560 else if (dt == REG_DEP_ANTI)
4561 gcc_assert ((ds & DEP_ANTI)
4562 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4563 else
4564 gcc_assert (dt == REG_DEP_CONTROL
4565 && (ds & DEP_CONTROL)
4566 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4568 /* HARD_DEP can not appear in dep_status of a link. */
4569 gcc_assert (!(ds & HARD_DEP));
4571 /* Check that dependence status is set correctly when speculation is not
4572 supported. */
4573 if (!sched_deps_info->generate_spec_deps)
4574 gcc_assert (!(ds & SPECULATIVE));
4575 else if (ds & SPECULATIVE)
4577 if (!relaxed_p)
4579 ds_t type = FIRST_SPEC_TYPE;
4581 /* Check that dependence weakness is in proper range. */
4584 if (ds & type)
4585 get_dep_weak (ds, type);
4587 if (type == LAST_SPEC_TYPE)
4588 break;
4589 type <<= SPEC_TYPE_SHIFT;
4591 while (1);
4594 if (ds & BEGIN_SPEC)
4596 /* Only true dependence can be data speculative. */
4597 if (ds & BEGIN_DATA)
4598 gcc_assert (ds & DEP_TRUE);
4600 /* Control dependencies in the insn scheduler are represented by
4601 anti-dependencies, therefore only anti dependence can be
4602 control speculative. */
4603 if (ds & BEGIN_CONTROL)
4604 gcc_assert (ds & DEP_ANTI);
4606 else
4608 /* Subsequent speculations should resolve true dependencies. */
4609 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4612 /* Check that true and anti dependencies can't have other speculative
4613 statuses. */
4614 if (ds & DEP_TRUE)
4615 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4616 /* An output dependence can't be speculative at all. */
4617 gcc_assert (!(ds & DEP_OUTPUT));
4618 if (ds & DEP_ANTI)
4619 gcc_assert (ds & BEGIN_CONTROL);
4622 #endif /* ENABLE_CHECKING */
4624 /* The following code discovers opportunities to switch a memory reference
4625 and an increment by modifying the address. We ensure that this is done
4626 only for dependencies that are only used to show a single register
4627 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4628 instruction involved is subject to only one dep that can cause a pattern
4629 change.
4631 When we discover a suitable dependency, we fill in the dep_replacement
4632 structure to show how to modify the memory reference. */
4634 /* Holds information about a pair of memory reference and register increment
4635 insns which depend on each other, but could possibly be interchanged. */
4636 struct mem_inc_info
4638 rtx_insn *inc_insn;
4639 rtx_insn *mem_insn;
4641 rtx *mem_loc;
4642 /* A register occurring in the memory address for which we wish to break
4643 the dependence. This must be identical to the destination register of
4644 the increment. */
4645 rtx mem_reg0;
4646 /* Any kind of index that is added to that register. */
4647 rtx mem_index;
4648 /* The constant offset used in the memory address. */
4649 HOST_WIDE_INT mem_constant;
4650 /* The constant added in the increment insn. Negated if the increment is
4651 after the memory address. */
4652 HOST_WIDE_INT inc_constant;
4653 /* The source register used in the increment. May be different from mem_reg0
4654 if the increment occurs before the memory address. */
4655 rtx inc_input;
4658 /* Verify that the memory location described in MII can be replaced with
4659 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4660 insn remains unchanged by this function. */
4662 static rtx
4663 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4665 rtx mem = *mii->mem_loc;
4666 rtx new_mem;
4668 /* Jump through a lot of hoops to keep the attributes up to date. We
4669 do not want to call one of the change address variants that take
4670 an offset even though we know the offset in many cases. These
4671 assume you are changing where the address is pointing by the
4672 offset. */
4673 new_mem = replace_equiv_address_nv (mem, new_addr);
4674 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4676 if (sched_verbose >= 5)
4677 fprintf (sched_dump, "validation failure\n");
4678 return NULL_RTX;
4681 /* Put back the old one. */
4682 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4684 return new_mem;
4687 /* Return true if INSN is of a form "a = b op c" where a and b are
4688 regs. op is + if c is a reg and +|- if c is a const. Fill in
4689 informantion in MII about what is found.
4690 BEFORE_MEM indicates whether the increment is found before or after
4691 a corresponding memory reference. */
4693 static bool
4694 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4696 rtx pat = single_set (insn);
4697 rtx src, cst;
4698 bool regs_equal;
4700 if (RTX_FRAME_RELATED_P (insn) || !pat)
4701 return false;
4703 /* Result must be single reg. */
4704 if (!REG_P (SET_DEST (pat)))
4705 return false;
4707 if (GET_CODE (SET_SRC (pat)) != PLUS)
4708 return false;
4710 mii->inc_insn = insn;
4711 src = SET_SRC (pat);
4712 mii->inc_input = XEXP (src, 0);
4714 if (!REG_P (XEXP (src, 0)))
4715 return false;
4717 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4718 return false;
4720 cst = XEXP (src, 1);
4721 if (!CONST_INT_P (cst))
4722 return false;
4723 mii->inc_constant = INTVAL (cst);
4725 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4727 if (!before_mem)
4729 mii->inc_constant = -mii->inc_constant;
4730 if (!regs_equal)
4731 return false;
4734 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4736 /* Note that the sign has already been reversed for !before_mem. */
4737 if (STACK_GROWS_DOWNWARD)
4738 return mii->inc_constant > 0;
4739 else
4740 return mii->inc_constant < 0;
4742 return true;
4745 /* Once a suitable mem reference has been found and the corresponding data
4746 in MII has been filled in, this function is called to find a suitable
4747 add or inc insn involving the register we found in the memory
4748 reference. */
4750 static bool
4751 find_inc (struct mem_inc_info *mii, bool backwards)
4753 sd_iterator_def sd_it;
4754 dep_t dep;
4756 sd_it = sd_iterator_start (mii->mem_insn,
4757 backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4758 while (sd_iterator_cond (&sd_it, &dep))
4760 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4761 rtx_insn *pro = DEP_PRO (dep);
4762 rtx_insn *con = DEP_CON (dep);
4763 rtx_insn *inc_cand = backwards ? pro : con;
4764 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4765 goto next;
4766 if (parse_add_or_inc (mii, inc_cand, backwards))
4768 struct dep_replacement *desc;
4769 df_ref def;
4770 rtx newaddr, newmem;
4772 if (sched_verbose >= 5)
4773 fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4774 INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4776 /* Need to assure that none of the operands of the inc
4777 instruction are assigned to by the mem insn. */
4778 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4779 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4780 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4782 if (sched_verbose >= 5)
4783 fprintf (sched_dump,
4784 "inc conflicts with store failure.\n");
4785 goto next;
4788 newaddr = mii->inc_input;
4789 if (mii->mem_index != NULL_RTX)
4790 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4791 mii->mem_index);
4792 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4793 mii->mem_constant + mii->inc_constant);
4794 newmem = attempt_change (mii, newaddr);
4795 if (newmem == NULL_RTX)
4796 goto next;
4797 if (sched_verbose >= 5)
4798 fprintf (sched_dump, "successful address replacement\n");
4799 desc = XCNEW (struct dep_replacement);
4800 DEP_REPLACE (dep) = desc;
4801 desc->loc = mii->mem_loc;
4802 desc->newval = newmem;
4803 desc->orig = *desc->loc;
4804 desc->insn = mii->mem_insn;
4805 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4806 INSN_SPEC_BACK_DEPS (con));
4807 if (backwards)
4809 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4810 add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4811 REG_DEP_TRUE);
4813 else
4815 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4816 add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4817 REG_DEP_ANTI);
4819 return true;
4821 next:
4822 sd_iterator_next (&sd_it);
4824 return false;
4827 /* A recursive function that walks ADDRESS_OF_X to find memory references
4828 which could be modified during scheduling. We call find_inc for each
4829 one we find that has a recognizable form. MII holds information about
4830 the pair of memory/increment instructions.
4831 We ensure that every instruction with a memory reference (which will be
4832 the location of the replacement) is assigned at most one breakable
4833 dependency. */
4835 static bool
4836 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4838 rtx x = *address_of_x;
4839 enum rtx_code code = GET_CODE (x);
4840 const char *const fmt = GET_RTX_FORMAT (code);
4841 int i;
4843 if (code == MEM)
4845 rtx reg0 = XEXP (x, 0);
4847 mii->mem_loc = address_of_x;
4848 mii->mem_index = NULL_RTX;
4849 mii->mem_constant = 0;
4850 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4852 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4853 reg0 = XEXP (reg0, 0);
4855 if (GET_CODE (reg0) == PLUS)
4857 mii->mem_index = XEXP (reg0, 1);
4858 reg0 = XEXP (reg0, 0);
4860 if (REG_P (reg0))
4862 df_ref use;
4863 int occurrences = 0;
4865 /* Make sure this reg appears only once in this insn. Can't use
4866 count_occurrences since that only works for pseudos. */
4867 FOR_EACH_INSN_USE (use, mii->mem_insn)
4868 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4869 if (++occurrences > 1)
4871 if (sched_verbose >= 5)
4872 fprintf (sched_dump, "mem count failure\n");
4873 return false;
4876 mii->mem_reg0 = reg0;
4877 return find_inc (mii, true) || find_inc (mii, false);
4879 return false;
4882 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4884 /* If REG occurs inside a MEM used in a bit-field reference,
4885 that is unacceptable. */
4886 return false;
4889 /* Time for some deep diving. */
4890 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4892 if (fmt[i] == 'e')
4894 if (find_mem (mii, &XEXP (x, i)))
4895 return true;
4897 else if (fmt[i] == 'E')
4899 int j;
4900 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4901 if (find_mem (mii, &XVECEXP (x, i, j)))
4902 return true;
4905 return false;
4909 /* Examine the instructions between HEAD and TAIL and try to find
4910 dependencies that can be broken by modifying one of the patterns. */
4912 void
4913 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4915 rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4916 int success_in_block = 0;
4918 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4920 struct mem_inc_info mii;
4922 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4923 continue;
4925 mii.mem_insn = insn;
4926 if (find_mem (&mii, &PATTERN (insn)))
4927 success_in_block++;
4929 if (success_in_block && sched_verbose >= 5)
4930 fprintf (sched_dump, "%d candidates for address modification found.\n",
4931 success_in_block);
4934 #endif /* INSN_SCHEDULING */